hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f731514ce63880879d8950cd12e196a3a011a776 | 11,680 | py | Python | chives/util/merkle_set.py | zcomputerwiz/chives-light-wallet | b5f57f46bf4f804cc06a6e2bdf8cbde41bba2fe0 | [
"Apache-2.0"
] | null | null | null | chives/util/merkle_set.py | zcomputerwiz/chives-light-wallet | b5f57f46bf4f804cc06a6e2bdf8cbde41bba2fe0 | [
"Apache-2.0"
] | null | null | null | chives/util/merkle_set.py | zcomputerwiz/chives-light-wallet | b5f57f46bf4f804cc06a6e2bdf8cbde41bba2fe0 | [
"Apache-2.0"
] | 1 | 2022-03-20T16:19:04.000Z | 2022-03-20T16:19:04.000Z | from abc import ABCMeta, abstractmethod
from hashlib import sha256
from typing import Any, Dict, List, Tuple
from chives.types.blockchain_format.sized_bytes import bytes32
"""
A simple, confidence-inspiring Merkle Set standard
Advantages of this standard:
Low CPU requirements
Small proofs of inclusion/exclusion
Reasonably simple implementation
The main tricks in this standard are:
Skips repeated hashing of exactly two things even when they share prefix bits
Proofs support proving including/exclusion for a large number of values in
a single string. They're a serialization of a subset of the tree.
Proof format:
multiproof: subtree
subtree: middle or terminal or truncated or empty
middle: MIDDLE 1 subtree subtree
terminal: TERMINAL 1 hash 32
# If the sibling is empty truncated implies more than two children.
truncated: TRUNCATED 1 hash 32
empty: EMPTY 1
EMPTY: \x00
TERMINAL: \x01
MIDDLE: \x02
TRUNCATED: \x03
"""
EMPTY = bytes([0])
TERMINAL = bytes([1])
MIDDLE = bytes([2])
TRUNCATED = bytes([3])
BLANK = bytes32([0] * 32)
prehashed: Dict[bytes, Any] = {}
def init_prehashed():
for x in [EMPTY, TERMINAL, MIDDLE]:
for y in [EMPTY, TERMINAL, MIDDLE]:
prehashed[x + y] = sha256(bytes([0] * 30) + x + y)
init_prehashed()
def hashdown(mystr: bytes) -> bytes:
assert len(mystr) == 66
h = prehashed[bytes(mystr[0:1] + mystr[33:34])].copy()
h.update(mystr[1:33] + mystr[34:])
return h.digest()[:32]
def compress_root(mystr: bytes) -> bytes32:
assert len(mystr) == 33
if mystr[0:1] == MIDDLE:
return bytes32(mystr[1:])
if mystr[0:1] == EMPTY:
assert mystr[1:] == BLANK
return BLANK
return bytes32(sha256(mystr).digest()[:32])
def get_bit(mybytes: bytes, pos: int) -> int:
assert len(mybytes) == 32
return (mybytes[pos // 8] >> (7 - (pos % 8))) & 1
class Node(metaclass=ABCMeta):
hash: bytes
@abstractmethod
def get_hash(self) -> bytes:
pass
@abstractmethod
def is_empty(self) -> bool:
pass
@abstractmethod
def is_terminal(self) -> bool:
pass
@abstractmethod
def is_double(self) -> bool:
pass
@abstractmethod
def add(self, toadd: bytes, depth: int) -> "Node":
pass
@abstractmethod
def remove(self, toremove: bytes, depth: int):
pass
@abstractmethod
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
pass
@abstractmethod
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
pass
@abstractmethod
def _audit(self, hashes: List[bytes], bits: List[int]):
pass
class MerkleSet:
root: Node
def __init__(self, root: Node = None):
if root is None:
self.root = _empty
else:
self.root = root
def get_root(self) -> bytes32:
return compress_root(self.root.get_hash())
def add_already_hashed(self, toadd: bytes):
self.root = self.root.add(toadd, 0)
def remove_already_hashed(self, toremove: bytes):
self.root = self.root.remove(toremove, 0)
def is_included_already_hashed(self, tocheck: bytes) -> Tuple[bool, bytes]:
proof: List = []
r = self.root.is_included(tocheck, 0, proof)
return r, b"".join(proof)
def _audit(self, hashes: List[bytes]):
newhashes: List = []
self.root._audit(newhashes, [])
assert newhashes == sorted(newhashes)
class EmptyNode(Node):
def __init__(self):
self.hash = BLANK
def get_hash(self) -> bytes:
return EMPTY + BLANK
def is_empty(self) -> bool:
return True
def is_terminal(self) -> bool:
return False
def is_double(self) -> bool:
raise SetError()
def add(self, toadd: bytes, depth: int) -> Node:
return TerminalNode(toadd)
def remove(self, toremove: bytes, depth: int) -> Node:
return self
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
p.append(EMPTY)
return False
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
p.append(EMPTY)
def _audit(self, hashes: List[bytes], bits: List[int]):
pass
_empty = EmptyNode()
class TerminalNode(Node):
def __init__(self, hash: bytes, bits: List[int] = None):
assert len(hash) == 32
self.hash = hash
if bits is not None:
self._audit([], bits)
def get_hash(self) -> bytes:
return TERMINAL + self.hash
def is_empty(self) -> bool:
return False
def is_terminal(self) -> bool:
return True
def is_double(self) -> bool:
raise SetError()
def add(self, toadd: bytes, depth: int) -> Node:
if toadd == self.hash:
return self
if toadd > self.hash:
return self._make_middle([self, TerminalNode(toadd)], depth)
else:
return self._make_middle([TerminalNode(toadd), self], depth)
def _make_middle(self, children: Any, depth: int) -> Node:
cbits = [get_bit(child.hash, depth) for child in children]
if cbits[0] != cbits[1]:
return MiddleNode(children)
nextvals: List[Node] = [_empty, _empty]
nextvals[cbits[0] ^ 1] = _empty
nextvals[cbits[0]] = self._make_middle(children, depth + 1)
return MiddleNode(nextvals)
def remove(self, toremove: bytes, depth: int) -> Node:
if toremove == self.hash:
return _empty
return self
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
p.append(TERMINAL + self.hash)
return tocheck == self.hash
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
p.append(TERMINAL + self.hash)
def _audit(self, hashes: List[bytes], bits: List[int]):
hashes.append(self.hash)
for pos, v in enumerate(bits):
assert get_bit(self.hash, pos) == v
class MiddleNode(Node):
def __init__(self, children: List[Node]):
self.children = children
if children[0].is_empty() and children[1].is_double():
self.hash = children[1].hash
elif children[1].is_empty() and children[0].is_double():
self.hash = children[0].hash
else:
if children[0].is_empty() and (children[1].is_empty() or children[1].is_terminal()):
raise SetError()
if children[1].is_empty() and children[0].is_terminal():
raise SetError
if children[0].is_terminal() and children[1].is_terminal() and children[0].hash >= children[1].hash:
raise SetError
self.hash = hashdown(children[0].get_hash() + children[1].get_hash())
def get_hash(self) -> bytes:
return MIDDLE + self.hash
def is_empty(self) -> bool:
return False
def is_terminal(self) -> bool:
return False
def is_double(self) -> bool:
if self.children[0].is_empty():
return self.children[1].is_double()
if self.children[1].is_empty():
return self.children[0].is_double()
return self.children[0].is_terminal() and self.children[1].is_terminal()
def add(self, toadd: bytes, depth: int) -> Node:
bit = get_bit(toadd, depth)
child = self.children[bit]
newchild = child.add(toadd, depth + 1)
if newchild is child:
return self
newvals = [x for x in self.children]
newvals[bit] = newchild
return MiddleNode(newvals)
def remove(self, toremove: bytes, depth: int) -> Node:
bit = get_bit(toremove, depth)
child = self.children[bit]
newchild = child.remove(toremove, depth + 1)
if newchild is child:
return self
otherchild = self.children[bit ^ 1]
if newchild.is_empty() and otherchild.is_terminal():
return otherchild
if newchild.is_terminal() and otherchild.is_empty():
return newchild
newvals = [x for x in self.children]
newvals[bit] = newchild
return MiddleNode(newvals)
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
p.append(MIDDLE)
if get_bit(tocheck, depth) == 0:
r = self.children[0].is_included(tocheck, depth + 1, p)
self.children[1].other_included(tocheck, depth + 1, p, not self.children[0].is_empty())
return r
else:
self.children[0].other_included(tocheck, depth + 1, p, not self.children[1].is_empty())
return self.children[1].is_included(tocheck, depth + 1, p)
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
if collapse or not self.is_double():
p.append(TRUNCATED + self.hash)
else:
self.is_included(tocheck, depth, p)
def _audit(self, hashes: List[bytes], bits: List[int]):
self.children[0]._audit(hashes, bits + [0])
self.children[1]._audit(hashes, bits + [1])
class TruncatedNode(Node):
def __init__(self, hash: bytes):
self.hash = hash
def get_hash(self) -> bytes:
return MIDDLE + self.hash
def is_empty(self) -> bool:
return False
def is_terminal(self) -> bool:
return False
def is_double(self) -> bool:
return False
def add(self, toadd: bytes, depth: int) -> Node:
return self
def remove(self, toremove: bytes, depth: int) -> Node:
return self
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
raise SetError()
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
p.append(TRUNCATED + self.hash)
def _audit(self, hashes: List[bytes], bits: List[int]):
pass
class SetError(Exception):
pass
def confirm_included(root: Node, val: bytes, proof: bytes32) -> bool:
return confirm_not_included_already_hashed(root, sha256(val).digest(), proof)
def confirm_included_already_hashed(root: Node, val: bytes, proof: bytes) -> bool:
return _confirm(root, val, proof, True)
def confirm_not_included(root: Node, val: bytes, proof: bytes32) -> bool:
return confirm_not_included_already_hashed(root, sha256(val).digest(), proof)
def confirm_not_included_already_hashed(root: Node, val: bytes, proof: bytes) -> bool:
return _confirm(root, val, proof, False)
def _confirm(root: Node, val: bytes, proof: bytes, expected: bool) -> bool:
try:
p = deserialize_proof(proof)
if p.get_root() != root:
return False
r, junk = p.is_included_already_hashed(val)
return r == expected
except SetError:
return False
def deserialize_proof(proof: bytes) -> MerkleSet:
try:
r, pos = _deserialize(proof, 0, [])
if pos != len(proof):
raise SetError()
return MerkleSet(r)
except IndexError:
raise SetError()
def _deserialize(proof: bytes, pos: int, bits: List[int]) -> Tuple[Node, int]:
t = proof[pos : pos + 1] # flake8: noqa
if t == EMPTY:
return _empty, pos + 1
if t == TERMINAL:
return TerminalNode(proof[pos + 1 : pos + 33], bits), pos + 33 # flake8: noqa
if t == TRUNCATED:
return TruncatedNode(proof[pos + 1 : pos + 33]), pos + 33 # flake8: noqa
if t != MIDDLE:
raise SetError()
v0, pos = _deserialize(proof, pos + 1, bits + [0])
v1, pos = _deserialize(proof, pos, bits + [1])
return MiddleNode([v0, v1]), pos
| 29.054726 | 112 | 0.617894 | from abc import ABCMeta, abstractmethod
from hashlib import sha256
from typing import Any, Dict, List, Tuple
from chives.types.blockchain_format.sized_bytes import bytes32
EMPTY = bytes([0])
TERMINAL = bytes([1])
MIDDLE = bytes([2])
TRUNCATED = bytes([3])
BLANK = bytes32([0] * 32)
prehashed: Dict[bytes, Any] = {}
def init_prehashed():
for x in [EMPTY, TERMINAL, MIDDLE]:
for y in [EMPTY, TERMINAL, MIDDLE]:
prehashed[x + y] = sha256(bytes([0] * 30) + x + y)
init_prehashed()
def hashdown(mystr: bytes) -> bytes:
assert len(mystr) == 66
h = prehashed[bytes(mystr[0:1] + mystr[33:34])].copy()
h.update(mystr[1:33] + mystr[34:])
return h.digest()[:32]
def compress_root(mystr: bytes) -> bytes32:
assert len(mystr) == 33
if mystr[0:1] == MIDDLE:
return bytes32(mystr[1:])
if mystr[0:1] == EMPTY:
assert mystr[1:] == BLANK
return BLANK
return bytes32(sha256(mystr).digest()[:32])
def get_bit(mybytes: bytes, pos: int) -> int:
assert len(mybytes) == 32
return (mybytes[pos // 8] >> (7 - (pos % 8))) & 1
class Node(metaclass=ABCMeta):
hash: bytes
@abstractmethod
def get_hash(self) -> bytes:
pass
@abstractmethod
def is_empty(self) -> bool:
pass
@abstractmethod
def is_terminal(self) -> bool:
pass
@abstractmethod
def is_double(self) -> bool:
pass
@abstractmethod
def add(self, toadd: bytes, depth: int) -> "Node":
pass
@abstractmethod
def remove(self, toremove: bytes, depth: int):
pass
@abstractmethod
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
pass
@abstractmethod
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
pass
@abstractmethod
def _audit(self, hashes: List[bytes], bits: List[int]):
pass
class MerkleSet:
root: Node
def __init__(self, root: Node = None):
if root is None:
self.root = _empty
else:
self.root = root
def get_root(self) -> bytes32:
return compress_root(self.root.get_hash())
def add_already_hashed(self, toadd: bytes):
self.root = self.root.add(toadd, 0)
def remove_already_hashed(self, toremove: bytes):
self.root = self.root.remove(toremove, 0)
def is_included_already_hashed(self, tocheck: bytes) -> Tuple[bool, bytes]:
proof: List = []
r = self.root.is_included(tocheck, 0, proof)
return r, b"".join(proof)
def _audit(self, hashes: List[bytes]):
newhashes: List = []
self.root._audit(newhashes, [])
assert newhashes == sorted(newhashes)
class EmptyNode(Node):
def __init__(self):
self.hash = BLANK
def get_hash(self) -> bytes:
return EMPTY + BLANK
def is_empty(self) -> bool:
return True
def is_terminal(self) -> bool:
return False
def is_double(self) -> bool:
raise SetError()
def add(self, toadd: bytes, depth: int) -> Node:
return TerminalNode(toadd)
def remove(self, toremove: bytes, depth: int) -> Node:
return self
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
p.append(EMPTY)
return False
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
p.append(EMPTY)
def _audit(self, hashes: List[bytes], bits: List[int]):
pass
_empty = EmptyNode()
class TerminalNode(Node):
def __init__(self, hash: bytes, bits: List[int] = None):
assert len(hash) == 32
self.hash = hash
if bits is not None:
self._audit([], bits)
def get_hash(self) -> bytes:
return TERMINAL + self.hash
def is_empty(self) -> bool:
return False
def is_terminal(self) -> bool:
return True
def is_double(self) -> bool:
raise SetError()
def add(self, toadd: bytes, depth: int) -> Node:
if toadd == self.hash:
return self
if toadd > self.hash:
return self._make_middle([self, TerminalNode(toadd)], depth)
else:
return self._make_middle([TerminalNode(toadd), self], depth)
def _make_middle(self, children: Any, depth: int) -> Node:
cbits = [get_bit(child.hash, depth) for child in children]
if cbits[0] != cbits[1]:
return MiddleNode(children)
nextvals: List[Node] = [_empty, _empty]
nextvals[cbits[0] ^ 1] = _empty
nextvals[cbits[0]] = self._make_middle(children, depth + 1)
return MiddleNode(nextvals)
def remove(self, toremove: bytes, depth: int) -> Node:
if toremove == self.hash:
return _empty
return self
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
p.append(TERMINAL + self.hash)
return tocheck == self.hash
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
p.append(TERMINAL + self.hash)
def _audit(self, hashes: List[bytes], bits: List[int]):
hashes.append(self.hash)
for pos, v in enumerate(bits):
assert get_bit(self.hash, pos) == v
class MiddleNode(Node):
def __init__(self, children: List[Node]):
self.children = children
if children[0].is_empty() and children[1].is_double():
self.hash = children[1].hash
elif children[1].is_empty() and children[0].is_double():
self.hash = children[0].hash
else:
if children[0].is_empty() and (children[1].is_empty() or children[1].is_terminal()):
raise SetError()
if children[1].is_empty() and children[0].is_terminal():
raise SetError
if children[0].is_terminal() and children[1].is_terminal() and children[0].hash >= children[1].hash:
raise SetError
self.hash = hashdown(children[0].get_hash() + children[1].get_hash())
def get_hash(self) -> bytes:
return MIDDLE + self.hash
def is_empty(self) -> bool:
return False
def is_terminal(self) -> bool:
return False
def is_double(self) -> bool:
if self.children[0].is_empty():
return self.children[1].is_double()
if self.children[1].is_empty():
return self.children[0].is_double()
return self.children[0].is_terminal() and self.children[1].is_terminal()
def add(self, toadd: bytes, depth: int) -> Node:
bit = get_bit(toadd, depth)
child = self.children[bit]
newchild = child.add(toadd, depth + 1)
if newchild is child:
return self
newvals = [x for x in self.children]
newvals[bit] = newchild
return MiddleNode(newvals)
def remove(self, toremove: bytes, depth: int) -> Node:
bit = get_bit(toremove, depth)
child = self.children[bit]
newchild = child.remove(toremove, depth + 1)
if newchild is child:
return self
otherchild = self.children[bit ^ 1]
if newchild.is_empty() and otherchild.is_terminal():
return otherchild
if newchild.is_terminal() and otherchild.is_empty():
return newchild
newvals = [x for x in self.children]
newvals[bit] = newchild
return MiddleNode(newvals)
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
p.append(MIDDLE)
if get_bit(tocheck, depth) == 0:
r = self.children[0].is_included(tocheck, depth + 1, p)
self.children[1].other_included(tocheck, depth + 1, p, not self.children[0].is_empty())
return r
else:
self.children[0].other_included(tocheck, depth + 1, p, not self.children[1].is_empty())
return self.children[1].is_included(tocheck, depth + 1, p)
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
if collapse or not self.is_double():
p.append(TRUNCATED + self.hash)
else:
self.is_included(tocheck, depth, p)
def _audit(self, hashes: List[bytes], bits: List[int]):
self.children[0]._audit(hashes, bits + [0])
self.children[1]._audit(hashes, bits + [1])
class TruncatedNode(Node):
def __init__(self, hash: bytes):
self.hash = hash
def get_hash(self) -> bytes:
return MIDDLE + self.hash
def is_empty(self) -> bool:
return False
def is_terminal(self) -> bool:
return False
def is_double(self) -> bool:
return False
def add(self, toadd: bytes, depth: int) -> Node:
return self
def remove(self, toremove: bytes, depth: int) -> Node:
return self
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
raise SetError()
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
p.append(TRUNCATED + self.hash)
def _audit(self, hashes: List[bytes], bits: List[int]):
pass
class SetError(Exception):
pass
def confirm_included(root: Node, val: bytes, proof: bytes32) -> bool:
return confirm_not_included_already_hashed(root, sha256(val).digest(), proof)
def confirm_included_already_hashed(root: Node, val: bytes, proof: bytes) -> bool:
return _confirm(root, val, proof, True)
def confirm_not_included(root: Node, val: bytes, proof: bytes32) -> bool:
return confirm_not_included_already_hashed(root, sha256(val).digest(), proof)
def confirm_not_included_already_hashed(root: Node, val: bytes, proof: bytes) -> bool:
return _confirm(root, val, proof, False)
def _confirm(root: Node, val: bytes, proof: bytes, expected: bool) -> bool:
try:
p = deserialize_proof(proof)
if p.get_root() != root:
return False
r, junk = p.is_included_already_hashed(val)
return r == expected
except SetError:
return False
def deserialize_proof(proof: bytes) -> MerkleSet:
try:
r, pos = _deserialize(proof, 0, [])
if pos != len(proof):
raise SetError()
return MerkleSet(r)
except IndexError:
raise SetError()
def _deserialize(proof: bytes, pos: int, bits: List[int]) -> Tuple[Node, int]:
t = proof[pos : pos + 1]
if t == EMPTY:
return _empty, pos + 1
if t == TERMINAL:
return TerminalNode(proof[pos + 1 : pos + 33], bits), pos + 33
if t == TRUNCATED:
return TruncatedNode(proof[pos + 1 : pos + 33]), pos + 33
if t != MIDDLE:
raise SetError()
v0, pos = _deserialize(proof, pos + 1, bits + [0])
v1, pos = _deserialize(proof, pos, bits + [1])
return MiddleNode([v0, v1]), pos
| true | true |
f73151573f84138e26ebce007711c74837f84410 | 16,958 | py | Python | colour/characterisation/datasets/cameras/dslr/sensitivities.py | aurelienpierre/colour | 3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47 | [
"BSD-3-Clause"
] | null | null | null | colour/characterisation/datasets/cameras/dslr/sensitivities.py | aurelienpierre/colour | 3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47 | [
"BSD-3-Clause"
] | null | null | null | colour/characterisation/datasets/cameras/dslr/sensitivities.py | aurelienpierre/colour | 3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47 | [
"BSD-3-Clause"
] | null | null | null | """
Sensitivities of *DSLR* Cameras
===============================
Defines the sensitivities of *DSLR* cameras.
Each *DSLR* camera data is in the form of a *dict* of
:class:`colour.characterisation.RGB_CameraSensitivities` classes as follows::
{
'name': RGB_CameraSensitivities,
...,
'name': RGB_CameraSensitivities
}
The following *DSLR* cameras are available:
- Nikon 5100 (NPL)
- Sigma SDMerill (NPL)
References
----------
- :cite:`Darrodi2015a` : Darrodi, M. M., Finlayson, G., Goodman, T., &
Mackiewicz, M. (2015). Reference data set for camera spectral sensitivity
estimation. Journal of the Optical Society of America A, 32(3), 381.
doi:10.1364/JOSAA.32.000381
"""
from __future__ import annotations
from functools import partial
from colour.characterisation import RGB_CameraSensitivities
from colour.hints import Dict
from colour.utilities import LazyCaseInsensitiveMapping
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"DATA_CAMERA_SENSITIVITIES_DSLR",
"MSDS_CAMERA_SENSITIVITIES_DSLR",
]
DATA_CAMERA_SENSITIVITIES_DSLR: Dict = {
"Nikon 5100 (NPL)": {
380.0: (
0.00156384299336578000,
0.00011500000000000000,
0.00180956039402335990,
),
385.0: (
0.00189691771384825000,
0.00152114360178015000,
0.00048982814544150399,
),
390.0: (
0.00000000000000000000,
0.00057430499183558695,
0.00087943069176996504,
),
395.0: (
0.00000000000000000000,
0.00000000000000000000,
0.00000000000000000000,
),
400.0: (
0.00000000000000000000,
0.00000000000000000000,
0.00153246068848051000,
),
405.0: (
0.00071776703300973298,
0.00119722386224553000,
0.00569805602282062030,
),
410.0: (
0.00292397466563330000,
0.00133571498448177000,
0.01660828769874150200,
),
415.0: (
0.01293626801713740000,
0.01319431696052810100,
0.07879120559214590500,
),
420.0: (
0.04959786481566520000,
0.06497102451249539600,
0.36171350364994898000,
),
425.0: (
0.07607250435970400200,
0.11510308718828900000,
0.65970462106512295000,
),
430.0: (
0.07658892708274399300,
0.13706582547087201000,
0.75534360010359503000,
),
435.0: (
0.06833381956036009600,
0.15242852584030600000,
0.81045312707380701000,
),
440.0: (
0.06131816189646559900,
0.16864005450745301000,
0.87494523362472998000,
),
445.0: (
0.05473314457789760200,
0.18329934605049600000,
0.92671273991178704000,
),
450.0: (
0.04886204743702320100,
0.19603263456229600000,
0.96314088025989897000,
),
455.0: (
0.04284591974257399800,
0.21733653278361301000,
0.98065048133510302000,
),
460.0: (
0.04022845332691499900,
0.25424357380995000000,
1.00000000000000000000,
),
465.0: (
0.04340795992263239700,
0.30864811930649899000,
0.99640467488711104000,
),
470.0: (
0.04762021431177430200,
0.37346871184252001000,
0.98896988650084305000,
),
475.0: (
0.05077188480559390000,
0.42915806139893697000,
0.95660139953157997000,
),
480.0: (
0.05280329597225499900,
0.45965432432137399000,
0.90495886986980800000,
),
485.0: (
0.05257122025495090300,
0.47106435446394301000,
0.83940927710351598000,
),
490.0: (
0.04789463902845950100,
0.48885616444524799000,
0.75146259578963404000,
),
495.0: (
0.04823994170483859900,
0.53715178104087602000,
0.66010202032260801000,
),
500.0: (
0.05022924089718029700,
0.61649118695883898000,
0.56706879193613802000,
),
505.0: (
0.05507649735001429700,
0.70700638759968903000,
0.47935094782603899000,
),
510.0: (
0.06370211901178619900,
0.80096424601366301000,
0.39406273870351299000,
),
515.0: (
0.08038951305895999900,
0.88137256686267296000,
0.31427061879449603000,
),
520.0: (
0.10038750399831201000,
0.93887792119838498000,
0.24981663439426000000,
),
525.0: (
0.11861314902313400000,
0.98446559576523596000,
0.20182351924718100000,
),
530.0: (
0.12360875120338000000,
1.00000000000000000000,
0.16163395085177601000,
),
535.0: (
0.10306249932787701000,
0.99084026557129701000,
0.13516143147333401000,
),
540.0: (
0.07634108360672720000,
0.96154626462922099000,
0.10998875716043301000,
),
545.0: (
0.05278086364640900000,
0.92814388346877297000,
0.08639435407789379500,
),
550.0: (
0.04118873831058649700,
0.88910231592076505000,
0.06525313059219839400,
),
555.0: (
0.03904385351931050100,
0.83494222924161199000,
0.04785595345227559900,
),
560.0: (
0.04254429440089119900,
0.77631807500187500000,
0.03413932303860940000,
),
565.0: (
0.06021313241068020100,
0.70731424532056497000,
0.02401990976851929900,
),
570.0: (
0.11179621705066800000,
0.63579620249170998000,
0.01976793598476750100,
),
575.0: (
0.26967059703276203000,
0.56551528450380395000,
0.01634844781073010000,
),
580.0: (
0.56450337990639099000,
0.49275517253522499000,
0.01381733937020259900,
),
585.0: (
0.85360126947261405000,
0.42475654159075799000,
0.01195294647966710000,
),
590.0: (
0.98103242181506201000,
0.35178931226078303000,
0.01000909395820090100,
),
595.0: (
1.00000000000000000000,
0.27817849879541801000,
0.00758776308929657970,
),
600.0: (
0.96307105371259005000,
0.21167353249961901000,
0.00645584463521649970,
),
605.0: (
0.90552061898043101000,
0.15671644549433000000,
0.00522978285684488030,
),
610.0: (
0.83427841652645296000,
0.11803962073050200000,
0.00365998459503786990,
),
615.0: (
0.76798733762510296000,
0.08885249534231440300,
0.00395538505488667040,
),
620.0: (
0.70366798041157996000,
0.07010184404853669900,
0.00396835221654468030,
),
625.0: (
0.63916484476123703000,
0.05690899470893220200,
0.00349138004486036990,
),
630.0: (
0.57081292173776299000,
0.04729879101895839700,
0.00404302103181797010,
),
635.0: (
0.49581796193158800000,
0.04119589002556579800,
0.00418929985295813000,
),
640.0: (
0.43833913452368101000,
0.03525207084991220000,
0.00554676856500057980,
),
645.0: (
0.38896992260406899000,
0.03069313144532450100,
0.00546423323547744030,
),
650.0: (
0.34295621205484700000,
0.02680396295683950100,
0.00597382847392098970,
),
655.0: (
0.29278541836293998000,
0.02352430119871520100,
0.00630906774763779000,
),
660.0: (
0.23770718073119301000,
0.02034633252474659900,
0.00610412697742267980,
),
665.0: (
0.16491386803178501000,
0.01545848325340879900,
0.00483655792375416000,
),
670.0: (
0.09128771706377150600,
0.00944075104617158980,
0.00302664794586984980,
),
675.0: (
0.04205615047283590300,
0.00508102204063505970,
0.00172169700987674990,
),
680.0: (
0.02058267877678380100,
0.00291019166901752010,
0.00078065128657817595,
),
685.0: (
0.01028680596369610000,
0.00162657557793382010,
0.00056963070848184102,
),
690.0: (
0.00540759846247261970,
0.00092251569139627796,
0.00027523296133938200,
),
695.0: (
0.00272409261591003000,
0.00049743349969026901,
0.00029672137857068598,
),
700.0: (
0.00127834798711079000,
0.00041215940263165701,
0.00024951192304202899,
),
705.0: (
0.00078123118374132301,
0.00031692634104666300,
8.5000000000000006e-05,
),
710.0: (
0.00047981421940270001,
0.00025621496960251102,
0.00041916895092770603,
),
715.0: (
0.00049133356428571098,
0.00000000000000000000,
0.00015331743444139899,
),
720.0: (
0.00017414897796340199,
0.00024353518865341200,
1.8300000000000001e-05,
),
725.0: (
0.00012017462571764001,
6.0200000000000000e-05,
0.00000000000000000000,
),
730.0: (
0.00000000000000000000,
0.00000000000000000000,
0.00033869381945204901,
),
735.0: (
6.1199999999999997e-05,
0.00000000000000000000,
0.00000000000000000000,
),
740.0: (
0.00000000000000000000,
0.00000000000000000000,
0.00000000000000000000,
),
745.0: (
0.00000000000000000000,
1.7099999999999999e-05,
0.00016527828734010200,
),
750.0: (
0.00031099754946016501,
5.2099999999999999e-05,
0.00017755262214537101,
),
755.0: (
0.00000000000000000000,
8.8499999999999996e-05,
0.00000000000000000000,
),
760.0: (
0.00000000000000000000,
0.00000000000000000000,
2.4300000000000001e-05,
),
765.0: (
0.00000000000000000000,
0.00000000000000000000,
6.1799999999999998e-05,
),
770.0: (
8.5599999999999994e-05,
0.00013799999999999999,
0.00026260703183506501,
),
775.0: (
0.00013831372865247499,
0.0001786501727059410,
0.00028050537004191899,
),
780.0: (
3.6199999999999999e-05,
4.2500000000000003e-05,
0.00000000000000000000,
),
},
"Sigma SDMerill (NPL)": {
400.0: (
0.00562107440608700020,
0.00632809751263116970,
0.16215942413307899000,
),
410.0: (
0.00650335624511722000,
0.00976180459591275040,
0.28549837804628603000,
),
420.0: (
0.07407911289140040000,
0.02527177008261050100,
0.39690431060902098000,
),
430.0: (
0.04302295946292879900,
0.08375118585311219800,
0.50831024317175599000,
),
440.0: (
0.03450952562247010200,
0.14370381974360999000,
0.62211847246948804000,
),
450.0: (
0.01889156723434350100,
0.18361168930882199000,
0.73742136245769496000,
),
460.0: (
0.00731107699680200000,
0.40909478009952999000,
0.94538036670138004000,
),
470.0: (
0.04549915123096019700,
0.51595564086176404000,
0.96441494770280400000,
),
480.0: (
0.05676752921111680200,
0.60120664662705503000,
1.00000000000000000000,
),
490.0: (
0.13419592065917799000,
0.67031679980136305000,
0.98598021188452500000,
),
500.0: (
0.16475268997837600000,
0.75258747153475802000,
0.98340266357529005000,
),
510.0: (
0.21712641978639199000,
0.84381384368944201000,
0.96969219567072595000,
),
520.0: (
0.30648343835824399000,
0.90151724558812696000,
0.94280817402079797000,
),
530.0: (
0.34984579614888500000,
0.91975030668767699000,
0.89664279918070899000,
),
540.0: (
0.44374258133259298000,
0.96799429052157804000,
0.88444590220041897000,
),
550.0: (
0.44488860528126301000,
0.95725231064041105000,
0.86791899071597101000,
),
560.0: (
0.47897575674702603000,
0.95204791860047400000,
0.83375679584908402000,
),
570.0: (
0.50950291481073895000,
0.97628014458399803000,
0.83204140240572999000,
),
580.0: (
0.59262909378530504000,
0.97258624388955806000,
0.80054956384778198000,
),
590.0: (
0.67383327560697603000,
1.00000000000000000000,
0.78289512474646505000,
),
600.0: (
0.71403771488106504000,
0.96948452757777404000,
0.73946953007191796000,
),
610.0: (
0.86000761311495100000,
0.95441319124850699000,
0.66718640174985699000,
),
620.0: (
0.89810302849565204000,
0.93335435890921303000,
0.62043627806816704000,
),
630.0: (
1.00000000000000000000,
0.92571406833636205000,
0.61116087876956704000,
),
640.0: (
0.99494213311245205000,
0.88486439541503403000,
0.55173556195710605000,
),
650.0: (
0.92085127736137995000,
0.76165184741615699000,
0.46538831744516401000,
),
660.0: (
0.18143311631425299000,
0.14052437057150499000,
0.07961907836720690000,
),
670.0: (
0.00630978795372749960,
0.00414367215817645990,
0.00059244446107236802,
),
680.0: (
0.00528874383171553000,
0.00183198958165669010,
0.00468563680483140980,
),
},
}
MSDS_CAMERA_SENSITIVITIES_DSLR = LazyCaseInsensitiveMapping(
{
"Nikon 5100 (NPL)": partial(
RGB_CameraSensitivities,
DATA_CAMERA_SENSITIVITIES_DSLR["Nikon 5100 (NPL)"],
name="Nikon 5100 (NPL)",
),
"Sigma SDMerill (NPL)": partial(
RGB_CameraSensitivities,
DATA_CAMERA_SENSITIVITIES_DSLR["Sigma SDMerill (NPL)"],
name="Sigma SDMerill (NPL)",
),
}
)
"""
Multi-spectral distributions of *DSLR* camera sensitivities.
References
----------
:cite:`Darrodi2015a`
"""
| 27.046252 | 78 | 0.513445 |
from __future__ import annotations
from functools import partial
from colour.characterisation import RGB_CameraSensitivities
from colour.hints import Dict
from colour.utilities import LazyCaseInsensitiveMapping
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"DATA_CAMERA_SENSITIVITIES_DSLR",
"MSDS_CAMERA_SENSITIVITIES_DSLR",
]
DATA_CAMERA_SENSITIVITIES_DSLR: Dict = {
"Nikon 5100 (NPL)": {
380.0: (
0.00156384299336578000,
0.00011500000000000000,
0.00180956039402335990,
),
385.0: (
0.00189691771384825000,
0.00152114360178015000,
0.00048982814544150399,
),
390.0: (
0.00000000000000000000,
0.00057430499183558695,
0.00087943069176996504,
),
395.0: (
0.00000000000000000000,
0.00000000000000000000,
0.00000000000000000000,
),
400.0: (
0.00000000000000000000,
0.00000000000000000000,
0.00153246068848051000,
),
405.0: (
0.00071776703300973298,
0.00119722386224553000,
0.00569805602282062030,
),
410.0: (
0.00292397466563330000,
0.00133571498448177000,
0.01660828769874150200,
),
415.0: (
0.01293626801713740000,
0.01319431696052810100,
0.07879120559214590500,
),
420.0: (
0.04959786481566520000,
0.06497102451249539600,
0.36171350364994898000,
),
425.0: (
0.07607250435970400200,
0.11510308718828900000,
0.65970462106512295000,
),
430.0: (
0.07658892708274399300,
0.13706582547087201000,
0.75534360010359503000,
),
435.0: (
0.06833381956036009600,
0.15242852584030600000,
0.81045312707380701000,
),
440.0: (
0.06131816189646559900,
0.16864005450745301000,
0.87494523362472998000,
),
445.0: (
0.05473314457789760200,
0.18329934605049600000,
0.92671273991178704000,
),
450.0: (
0.04886204743702320100,
0.19603263456229600000,
0.96314088025989897000,
),
455.0: (
0.04284591974257399800,
0.21733653278361301000,
0.98065048133510302000,
),
460.0: (
0.04022845332691499900,
0.25424357380995000000,
1.00000000000000000000,
),
465.0: (
0.04340795992263239700,
0.30864811930649899000,
0.99640467488711104000,
),
470.0: (
0.04762021431177430200,
0.37346871184252001000,
0.98896988650084305000,
),
475.0: (
0.05077188480559390000,
0.42915806139893697000,
0.95660139953157997000,
),
480.0: (
0.05280329597225499900,
0.45965432432137399000,
0.90495886986980800000,
),
485.0: (
0.05257122025495090300,
0.47106435446394301000,
0.83940927710351598000,
),
490.0: (
0.04789463902845950100,
0.48885616444524799000,
0.75146259578963404000,
),
495.0: (
0.04823994170483859900,
0.53715178104087602000,
0.66010202032260801000,
),
500.0: (
0.05022924089718029700,
0.61649118695883898000,
0.56706879193613802000,
),
505.0: (
0.05507649735001429700,
0.70700638759968903000,
0.47935094782603899000,
),
510.0: (
0.06370211901178619900,
0.80096424601366301000,
0.39406273870351299000,
),
515.0: (
0.08038951305895999900,
0.88137256686267296000,
0.31427061879449603000,
),
520.0: (
0.10038750399831201000,
0.93887792119838498000,
0.24981663439426000000,
),
525.0: (
0.11861314902313400000,
0.98446559576523596000,
0.20182351924718100000,
),
530.0: (
0.12360875120338000000,
1.00000000000000000000,
0.16163395085177601000,
),
535.0: (
0.10306249932787701000,
0.99084026557129701000,
0.13516143147333401000,
),
540.0: (
0.07634108360672720000,
0.96154626462922099000,
0.10998875716043301000,
),
545.0: (
0.05278086364640900000,
0.92814388346877297000,
0.08639435407789379500,
),
550.0: (
0.04118873831058649700,
0.88910231592076505000,
0.06525313059219839400,
),
555.0: (
0.03904385351931050100,
0.83494222924161199000,
0.04785595345227559900,
),
560.0: (
0.04254429440089119900,
0.77631807500187500000,
0.03413932303860940000,
),
565.0: (
0.06021313241068020100,
0.70731424532056497000,
0.02401990976851929900,
),
570.0: (
0.11179621705066800000,
0.63579620249170998000,
0.01976793598476750100,
),
575.0: (
0.26967059703276203000,
0.56551528450380395000,
0.01634844781073010000,
),
580.0: (
0.56450337990639099000,
0.49275517253522499000,
0.01381733937020259900,
),
585.0: (
0.85360126947261405000,
0.42475654159075799000,
0.01195294647966710000,
),
590.0: (
0.98103242181506201000,
0.35178931226078303000,
0.01000909395820090100,
),
595.0: (
1.00000000000000000000,
0.27817849879541801000,
0.00758776308929657970,
),
600.0: (
0.96307105371259005000,
0.21167353249961901000,
0.00645584463521649970,
),
605.0: (
0.90552061898043101000,
0.15671644549433000000,
0.00522978285684488030,
),
610.0: (
0.83427841652645296000,
0.11803962073050200000,
0.00365998459503786990,
),
615.0: (
0.76798733762510296000,
0.08885249534231440300,
0.00395538505488667040,
),
620.0: (
0.70366798041157996000,
0.07010184404853669900,
0.00396835221654468030,
),
625.0: (
0.63916484476123703000,
0.05690899470893220200,
0.00349138004486036990,
),
630.0: (
0.57081292173776299000,
0.04729879101895839700,
0.00404302103181797010,
),
635.0: (
0.49581796193158800000,
0.04119589002556579800,
0.00418929985295813000,
),
640.0: (
0.43833913452368101000,
0.03525207084991220000,
0.00554676856500057980,
),
645.0: (
0.38896992260406899000,
0.03069313144532450100,
0.00546423323547744030,
),
650.0: (
0.34295621205484700000,
0.02680396295683950100,
0.00597382847392098970,
),
655.0: (
0.29278541836293998000,
0.02352430119871520100,
0.00630906774763779000,
),
660.0: (
0.23770718073119301000,
0.02034633252474659900,
0.00610412697742267980,
),
665.0: (
0.16491386803178501000,
0.01545848325340879900,
0.00483655792375416000,
),
670.0: (
0.09128771706377150600,
0.00944075104617158980,
0.00302664794586984980,
),
675.0: (
0.04205615047283590300,
0.00508102204063505970,
0.00172169700987674990,
),
680.0: (
0.02058267877678380100,
0.00291019166901752010,
0.00078065128657817595,
),
685.0: (
0.01028680596369610000,
0.00162657557793382010,
0.00056963070848184102,
),
690.0: (
0.00540759846247261970,
0.00092251569139627796,
0.00027523296133938200,
),
695.0: (
0.00272409261591003000,
0.00049743349969026901,
0.00029672137857068598,
),
700.0: (
0.00127834798711079000,
0.00041215940263165701,
0.00024951192304202899,
),
705.0: (
0.00078123118374132301,
0.00031692634104666300,
8.5000000000000006e-05,
),
710.0: (
0.00047981421940270001,
0.00025621496960251102,
0.00041916895092770603,
),
715.0: (
0.00049133356428571098,
0.00000000000000000000,
0.00015331743444139899,
),
720.0: (
0.00017414897796340199,
0.00024353518865341200,
1.8300000000000001e-05,
),
725.0: (
0.00012017462571764001,
6.0200000000000000e-05,
0.00000000000000000000,
),
730.0: (
0.00000000000000000000,
0.00000000000000000000,
0.00033869381945204901,
),
735.0: (
6.1199999999999997e-05,
0.00000000000000000000,
0.00000000000000000000,
),
740.0: (
0.00000000000000000000,
0.00000000000000000000,
0.00000000000000000000,
),
745.0: (
0.00000000000000000000,
1.7099999999999999e-05,
0.00016527828734010200,
),
750.0: (
0.00031099754946016501,
5.2099999999999999e-05,
0.00017755262214537101,
),
755.0: (
0.00000000000000000000,
8.8499999999999996e-05,
0.00000000000000000000,
),
760.0: (
0.00000000000000000000,
0.00000000000000000000,
2.4300000000000001e-05,
),
765.0: (
0.00000000000000000000,
0.00000000000000000000,
6.1799999999999998e-05,
),
770.0: (
8.5599999999999994e-05,
0.00013799999999999999,
0.00026260703183506501,
),
775.0: (
0.00013831372865247499,
0.0001786501727059410,
0.00028050537004191899,
),
780.0: (
3.6199999999999999e-05,
4.2500000000000003e-05,
0.00000000000000000000,
),
},
"Sigma SDMerill (NPL)": {
400.0: (
0.00562107440608700020,
0.00632809751263116970,
0.16215942413307899000,
),
410.0: (
0.00650335624511722000,
0.00976180459591275040,
0.28549837804628603000,
),
420.0: (
0.07407911289140040000,
0.02527177008261050100,
0.39690431060902098000,
),
430.0: (
0.04302295946292879900,
0.08375118585311219800,
0.50831024317175599000,
),
440.0: (
0.03450952562247010200,
0.14370381974360999000,
0.62211847246948804000,
),
450.0: (
0.01889156723434350100,
0.18361168930882199000,
0.73742136245769496000,
),
460.0: (
0.00731107699680200000,
0.40909478009952999000,
0.94538036670138004000,
),
470.0: (
0.04549915123096019700,
0.51595564086176404000,
0.96441494770280400000,
),
480.0: (
0.05676752921111680200,
0.60120664662705503000,
1.00000000000000000000,
),
490.0: (
0.13419592065917799000,
0.67031679980136305000,
0.98598021188452500000,
),
500.0: (
0.16475268997837600000,
0.75258747153475802000,
0.98340266357529005000,
),
510.0: (
0.21712641978639199000,
0.84381384368944201000,
0.96969219567072595000,
),
520.0: (
0.30648343835824399000,
0.90151724558812696000,
0.94280817402079797000,
),
530.0: (
0.34984579614888500000,
0.91975030668767699000,
0.89664279918070899000,
),
540.0: (
0.44374258133259298000,
0.96799429052157804000,
0.88444590220041897000,
),
550.0: (
0.44488860528126301000,
0.95725231064041105000,
0.86791899071597101000,
),
560.0: (
0.47897575674702603000,
0.95204791860047400000,
0.83375679584908402000,
),
570.0: (
0.50950291481073895000,
0.97628014458399803000,
0.83204140240572999000,
),
580.0: (
0.59262909378530504000,
0.97258624388955806000,
0.80054956384778198000,
),
590.0: (
0.67383327560697603000,
1.00000000000000000000,
0.78289512474646505000,
),
600.0: (
0.71403771488106504000,
0.96948452757777404000,
0.73946953007191796000,
),
610.0: (
0.86000761311495100000,
0.95441319124850699000,
0.66718640174985699000,
),
620.0: (
0.89810302849565204000,
0.93335435890921303000,
0.62043627806816704000,
),
630.0: (
1.00000000000000000000,
0.92571406833636205000,
0.61116087876956704000,
),
640.0: (
0.99494213311245205000,
0.88486439541503403000,
0.55173556195710605000,
),
650.0: (
0.92085127736137995000,
0.76165184741615699000,
0.46538831744516401000,
),
660.0: (
0.18143311631425299000,
0.14052437057150499000,
0.07961907836720690000,
),
670.0: (
0.00630978795372749960,
0.00414367215817645990,
0.00059244446107236802,
),
680.0: (
0.00528874383171553000,
0.00183198958165669010,
0.00468563680483140980,
),
},
}
MSDS_CAMERA_SENSITIVITIES_DSLR = LazyCaseInsensitiveMapping(
{
"Nikon 5100 (NPL)": partial(
RGB_CameraSensitivities,
DATA_CAMERA_SENSITIVITIES_DSLR["Nikon 5100 (NPL)"],
name="Nikon 5100 (NPL)",
),
"Sigma SDMerill (NPL)": partial(
RGB_CameraSensitivities,
DATA_CAMERA_SENSITIVITIES_DSLR["Sigma SDMerill (NPL)"],
name="Sigma SDMerill (NPL)",
),
}
)
| true | true |
f731522e9661ea03a15b6c0891cbf1369590cc3e | 5,128 | py | Python | youtubeto/raindrop.py | Perlence/youtube-to | b0183719f3c40825f7fab520294bd55574fde581 | [
"BSD-3-Clause"
] | 1 | 2021-06-18T22:34:00.000Z | 2021-06-18T22:34:00.000Z | youtubeto/raindrop.py | Perlence/youtube-to | b0183719f3c40825f7fab520294bd55574fde581 | [
"BSD-3-Clause"
] | null | null | null | youtubeto/raindrop.py | Perlence/youtube-to | b0183719f3c40825f7fab520294bd55574fde581 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from gevent import monkey
monkey.patch_all(thread=False, select=False)
import json
import arrow
from apiclient.discovery import build
from gevent.pool import Pool
from httplib2 import Http
from logbook import Logger
from oauth2client import client
from . import config
logger = Logger('youtubeto.raindrop')
class Raindrop(object):
path = 'https://raindrop.io/api/'
def __init__(self, session_id=None):
self.session_id = session_id
def _request(self, uri, method='GET', body=None, headers=None, **kwargs):
uri = self.path + uri
if headers is None:
headers = {}
if body is not None:
body = json.dumps(body)
headers['Content-Type'] = 'application/json; charset=UTF-8'
if self.session_id is not None:
headers['Cookie'] = 'connect.sid=' + self.session_id
_, content = Http().request(uri, method, body, headers, **kwargs)
return json.loads(content)
def get(self, uri):
return self._request(uri, 'GET')
def create(self, uri, **params):
return self._request(uri, 'POST', params)
def delete(self, uri):
return self._request(uri, 'DELETE')
def update(self, uri, **params):
return self._request(uri, 'PUT', params)
def main():
if config.YOUTUBE_TOKEN_EXPIRY:
youtube_token_expiry = arrow.get(config.YOUTUBE_TOKEN_EXPIRY)
else:
youtube_token_expiry = None
if config.YOUTUBE_REFRESH_TOKEN:
creds = client.OAuth2Credentials(
config.YOUTUBE_ACCESS_TOKEN, config.YOUTUBE_CLIENT_ID,
config.YOUTUBE_CLIENT_SECRET, config.YOUTUBE_REFRESH_TOKEN,
youtube_token_expiry, config.YOUTUBE_TOKEN_URI,
config.YOUTUBE_USER_AGENT)
if youtube_token_expiry <= arrow.get():
creds.refresh(Http())
config.YOUTUBE_ACCESS_TOKEN = creds.access_token
config.YOUTUBE_TOKEN_EXPIRY = creds.token_expiry.isoformat()
config.save()
else:
import webbrowser
flow = client.OAuth2WebServerFlow(
config.YOUTUBE_CLIENT_ID,
config.YOUTUBE_CLIENT_SECRET,
config.YOUTUBE_SCOPE,
config.YOUTUBE_REDIRECT_URI)
webbrowser.open(flow.step1_get_authorize_url())
code = raw_input('Input code: ')
creds = flow.step2_exchange(code)
config.YOUTUBE_ACCESS_TOKEN = creds.access_token
config.YOUTUBE_CLIENT_ID = creds.client_id
config.YOUTUBE_CLIENT_SECRET = creds.client_secret
config.YOUTUBE_REFRESH_TOKEN = creds.refresh_token
config.YOUTUBE_TOKEN_EXPIRY = creds.token_expiry.isoformat()
config.YOUTUBE_TOKEN_URI = creds.token_uri
config.YOUTUBE_USER_AGENT = creds.user_agent
config.save()
http = authorized_http(creds)
youtube = build('youtube', 'v3', http=http())
if not config.RAINDROP_SESSION_ID:
import webbrowser
webbrowser.open('https://raindrop.io/account/signin')
config.RAINDROP_SESSION_ID = raw_input('Input session id: ')
config.save()
raindrop = Raindrop(config.RAINDROP_SESSION_ID)
playlists = youtube.playlists().list(part='snippet', mine=True).execute()
favorites = next((item for item in playlists['items']
if item['snippet']['title'] == 'Favorites'), None)
req = youtube.playlistItems().list(part='snippet',
playlistId=favorites['id'])
pool = Pool()
while req:
res = req.execute()
for item in res['items']:
pool.spawn(put_in_raindrop, youtube, http, raindrop, item)
pool.join()
req = youtube.playlistItems().list_next(req, res)
def authorized_http(creds):
return lambda: creds.authorize(Http())
def put_in_raindrop(youtube, http, raindrop, item):
logger.info('Adding bookmark for {snippet[title]}', **item)
collection_id = config.RAINDROP_COLLECTION_ID
req = youtube.videos().list(part='snippet',
id=item['snippet']['resourceId']['videoId'])
video = req.execute(http())['items'][0]
url = ('http://www.youtube.com/watch'
'?v={resourceId[videoId]}'
'&list={playlistId}'
.format(**item['snippet']))
title = u'{title} by {channelTitle}'.format(**video['snippet'])
result = raindrop.create(
'raindrop',
collectionId=collection_id,
cover=0,
coverEnabled=True,
drop=False,
excerpt=video['snippet']['description'],
haveScreenshot=False,
media=[{
'link': get_biggest_thumbnail(item),
'type': 'image'
}],
tags=[],
title=title,
url=url)
logger.info('Added bookmark for {snippet[title]}', **item)
def get_biggest_thumbnail(item):
for thumbnail in ('maxres', 'standard', 'high', 'medium', 'default'):
result = item['snippet']['thumbnails'].get(thumbnail)
if result is not None:
return result['url']
if __name__ == '__main__':
main()
| 33.736842 | 77 | 0.633385 | from __future__ import absolute_import
from gevent import monkey
monkey.patch_all(thread=False, select=False)
import json
import arrow
from apiclient.discovery import build
from gevent.pool import Pool
from httplib2 import Http
from logbook import Logger
from oauth2client import client
from . import config
logger = Logger('youtubeto.raindrop')
class Raindrop(object):
path = 'https://raindrop.io/api/'
def __init__(self, session_id=None):
self.session_id = session_id
def _request(self, uri, method='GET', body=None, headers=None, **kwargs):
uri = self.path + uri
if headers is None:
headers = {}
if body is not None:
body = json.dumps(body)
headers['Content-Type'] = 'application/json; charset=UTF-8'
if self.session_id is not None:
headers['Cookie'] = 'connect.sid=' + self.session_id
_, content = Http().request(uri, method, body, headers, **kwargs)
return json.loads(content)
def get(self, uri):
return self._request(uri, 'GET')
def create(self, uri, **params):
return self._request(uri, 'POST', params)
def delete(self, uri):
return self._request(uri, 'DELETE')
def update(self, uri, **params):
return self._request(uri, 'PUT', params)
def main():
if config.YOUTUBE_TOKEN_EXPIRY:
youtube_token_expiry = arrow.get(config.YOUTUBE_TOKEN_EXPIRY)
else:
youtube_token_expiry = None
if config.YOUTUBE_REFRESH_TOKEN:
creds = client.OAuth2Credentials(
config.YOUTUBE_ACCESS_TOKEN, config.YOUTUBE_CLIENT_ID,
config.YOUTUBE_CLIENT_SECRET, config.YOUTUBE_REFRESH_TOKEN,
youtube_token_expiry, config.YOUTUBE_TOKEN_URI,
config.YOUTUBE_USER_AGENT)
if youtube_token_expiry <= arrow.get():
creds.refresh(Http())
config.YOUTUBE_ACCESS_TOKEN = creds.access_token
config.YOUTUBE_TOKEN_EXPIRY = creds.token_expiry.isoformat()
config.save()
else:
import webbrowser
flow = client.OAuth2WebServerFlow(
config.YOUTUBE_CLIENT_ID,
config.YOUTUBE_CLIENT_SECRET,
config.YOUTUBE_SCOPE,
config.YOUTUBE_REDIRECT_URI)
webbrowser.open(flow.step1_get_authorize_url())
code = raw_input('Input code: ')
creds = flow.step2_exchange(code)
config.YOUTUBE_ACCESS_TOKEN = creds.access_token
config.YOUTUBE_CLIENT_ID = creds.client_id
config.YOUTUBE_CLIENT_SECRET = creds.client_secret
config.YOUTUBE_REFRESH_TOKEN = creds.refresh_token
config.YOUTUBE_TOKEN_EXPIRY = creds.token_expiry.isoformat()
config.YOUTUBE_TOKEN_URI = creds.token_uri
config.YOUTUBE_USER_AGENT = creds.user_agent
config.save()
http = authorized_http(creds)
youtube = build('youtube', 'v3', http=http())
if not config.RAINDROP_SESSION_ID:
import webbrowser
webbrowser.open('https://raindrop.io/account/signin')
config.RAINDROP_SESSION_ID = raw_input('Input session id: ')
config.save()
raindrop = Raindrop(config.RAINDROP_SESSION_ID)
playlists = youtube.playlists().list(part='snippet', mine=True).execute()
favorites = next((item for item in playlists['items']
if item['snippet']['title'] == 'Favorites'), None)
req = youtube.playlistItems().list(part='snippet',
playlistId=favorites['id'])
pool = Pool()
while req:
res = req.execute()
for item in res['items']:
pool.spawn(put_in_raindrop, youtube, http, raindrop, item)
pool.join()
req = youtube.playlistItems().list_next(req, res)
def authorized_http(creds):
return lambda: creds.authorize(Http())
def put_in_raindrop(youtube, http, raindrop, item):
logger.info('Adding bookmark for {snippet[title]}', **item)
collection_id = config.RAINDROP_COLLECTION_ID
req = youtube.videos().list(part='snippet',
id=item['snippet']['resourceId']['videoId'])
video = req.execute(http())['items'][0]
url = ('http://www.youtube.com/watch'
'?v={resourceId[videoId]}'
'&list={playlistId}'
.format(**item['snippet']))
title = u'{title} by {channelTitle}'.format(**video['snippet'])
result = raindrop.create(
'raindrop',
collectionId=collection_id,
cover=0,
coverEnabled=True,
drop=False,
excerpt=video['snippet']['description'],
haveScreenshot=False,
media=[{
'link': get_biggest_thumbnail(item),
'type': 'image'
}],
tags=[],
title=title,
url=url)
logger.info('Added bookmark for {snippet[title]}', **item)
def get_biggest_thumbnail(item):
for thumbnail in ('maxres', 'standard', 'high', 'medium', 'default'):
result = item['snippet']['thumbnails'].get(thumbnail)
if result is not None:
return result['url']
if __name__ == '__main__':
main()
| true | true |
f731523ad4d8e5ca45ea9d3c2e855ab60f507b2e | 31 | py | Python | nso_restconf/__init__.py | rtrjl/nso_restconf | f5b8aa1cd857bf79732273c51f8dc6df13df030f | [
"BSD-Source-Code"
] | 1 | 2022-02-04T13:44:49.000Z | 2022-02-04T13:44:49.000Z | nso_restconf/__init__.py | rtrjl/nso_restconf | f5b8aa1cd857bf79732273c51f8dc6df13df030f | [
"BSD-Source-Code"
] | null | null | null | nso_restconf/__init__.py | rtrjl/nso_restconf | f5b8aa1cd857bf79732273c51f8dc6df13df030f | [
"BSD-Source-Code"
] | null | null | null | from .restconf import RestConf
| 15.5 | 30 | 0.83871 | from .restconf import RestConf
| true | true |
f73152526fda44eeae7cc9b1ebdfc4befe32c01d | 13,312 | py | Python | tests/integration/cloud/helpers/cloud_test_base.py | HudsonWu/mysalt | 8ce2f66e0d0338157923f0ea0dab912a0f43e52e | [
"Apache-2.0"
] | null | null | null | tests/integration/cloud/helpers/cloud_test_base.py | HudsonWu/mysalt | 8ce2f66e0d0338157923f0ea0dab912a0f43e52e | [
"Apache-2.0"
] | null | null | null | tests/integration/cloud/helpers/cloud_test_base.py | HudsonWu/mysalt | 8ce2f66e0d0338157923f0ea0dab912a0f43e52e | [
"Apache-2.0"
] | null | null | null | """
Tests for the Openstack Cloud Provider
"""
import logging
import os
import shutil
from time import sleep
import salt.utils.verify
from salt.config import cloud_config, cloud_providers_config
from salt.ext.six.moves import range
from salt.utils.yaml import safe_load
from tests.support.case import ShellCase
from tests.support.helpers import expensiveTest, random_string
from tests.support.paths import FILES
from tests.support.runtests import RUNTIME_VARS
TIMEOUT = 500
log = logging.getLogger(__name__)
@expensiveTest
class CloudTest(ShellCase):
PROVIDER = ""
REQUIRED_PROVIDER_CONFIG_ITEMS = tuple()
__RE_RUN_DELAY = 30
__RE_TRIES = 12
@staticmethod
def clean_cloud_dir(tmp_dir):
"""
Clean the cloud.providers.d tmp directory
"""
# make sure old provider configs are deleted
if not os.path.isdir(tmp_dir):
return
for fname in os.listdir(tmp_dir):
os.remove(os.path.join(tmp_dir, fname))
def query_instances(self):
"""
Standardize the data returned from a salt-cloud --query
"""
return {
x.strip(": ")
for x in self.run_cloud("--query")
if x.lstrip().lower().startswith("cloud-test-")
}
def _instance_exists(self, instance_name=None, query=None):
"""
:param instance_name: The name of the instance to check for in salt-cloud.
For example this is may used when a test temporarily renames an instance
:param query: The result of a salt-cloud --query run outside of this function
"""
if not instance_name:
instance_name = self.instance_name
if not query:
query = self.query_instances()
log.debug('Checking for "{}" in {}'.format(instance_name, query))
if isinstance(query, set):
return instance_name in query
return any(instance_name == q.strip(": ") for q in query)
def assertInstanceExists(self, creation_ret=None, instance_name=None):
"""
:param instance_name: Override the checked instance name, otherwise the class default will be used.
:param creation_ret: The return value from the run_cloud() function that created the instance
"""
if not instance_name:
instance_name = self.instance_name
# If it exists but doesn't show up in the creation_ret, there was probably an error during creation
if creation_ret:
self.assertIn(
instance_name,
[i.strip(": ") for i in creation_ret],
"An error occured during instance creation: |\n\t{}\n\t|".format(
"\n\t".join(creation_ret)
),
)
else:
# Verify that the instance exists via query
query = self.query_instances()
for tries in range(self.__RE_TRIES):
if self._instance_exists(instance_name, query):
log.debug(
'Instance "{}" reported after {} seconds'.format(
instance_name, tries * self.__RE_RUN_DELAY
)
)
break
else:
sleep(self.__RE_RUN_DELAY)
query = self.query_instances()
# Assert that the last query was successful
self.assertTrue(
self._instance_exists(instance_name, query),
'Instance "{}" was not created successfully: {}'.format(
self.instance_name, ", ".join(query)
),
)
log.debug('Instance exists and was created: "{}"'.format(instance_name))
def assertDestroyInstance(self, instance_name=None, timeout=None):
if timeout is None:
timeout = TIMEOUT
if not instance_name:
instance_name = self.instance_name
log.debug('Deleting instance "{}"'.format(instance_name))
delete_str = self.run_cloud(
"-d {} --assume-yes --out=yaml".format(instance_name), timeout=timeout
)
if delete_str:
delete = safe_load("\n".join(delete_str))
self.assertIn(self.profile_str, delete)
self.assertIn(self.PROVIDER, delete[self.profile_str])
self.assertIn(instance_name, delete[self.profile_str][self.PROVIDER])
delete_status = delete[self.profile_str][self.PROVIDER][instance_name]
if isinstance(delete_status, str):
self.assertEqual(delete_status, "True")
return
elif isinstance(delete_status, dict):
current_state = delete_status.get("currentState")
if current_state:
if current_state.get("ACTION"):
self.assertIn(".delete", current_state.get("ACTION"))
return
else:
self.assertEqual(current_state.get("name"), "shutting-down")
return
# It's not clear from the delete string that deletion was successful, ask salt-cloud after a delay
query = self.query_instances()
# some instances take a while to report their destruction
for tries in range(6):
if self._instance_exists(query=query):
sleep(30)
log.debug(
'Instance "{}" still found in query after {} tries: {}'.format(
instance_name, tries, query
)
)
query = self.query_instances()
# The last query should have been successful
self.assertNotIn(instance_name, self.query_instances())
@property
def instance_name(self):
if not hasattr(self, "_instance_name"):
# Create the cloud instance name to be used throughout the tests
subclass = self.__class__.__name__.strip("Test")
# Use the first three letters of the subclass, fill with '-' if too short
self._instance_name = random_string(
"cloud-test-{:-<3}-".format(subclass[:3]), uppercase=False
).lower()
return self._instance_name
@property
def providers(self):
if not hasattr(self, "_providers"):
self._providers = self.run_cloud("--list-providers")
return self._providers
@property
def provider_config(self):
if not hasattr(self, "_provider_config"):
self._provider_config = cloud_providers_config(
os.path.join(
RUNTIME_VARS.TMP_CONF_DIR,
"cloud.providers.d",
self.PROVIDER + ".conf",
)
)
return self._provider_config[self.profile_str][self.PROVIDER]
@property
def config(self):
if not hasattr(self, "_config"):
self._config = cloud_config(
os.path.join(
RUNTIME_VARS.TMP_CONF_DIR,
"cloud.profiles.d",
self.PROVIDER + ".conf",
)
)
return self._config
@property
def profile_str(self):
return self.PROVIDER + "-config"
def add_profile_config(self, name, data, conf, new_profile):
"""
copy the current profile and add a new profile in the same file
"""
conf_path = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "cloud.profiles.d", conf)
with salt.utils.files.fopen(conf_path, "r") as fp:
conf = safe_load(fp)
conf[new_profile] = conf[name].copy()
conf[new_profile].update(data)
with salt.utils.files.fopen(conf_path, "w") as fp:
salt.utils.yaml.safe_dump(conf, fp)
def setUp(self):
"""
Sets up the test requirements. In child classes, define PROVIDER and REQUIRED_PROVIDER_CONFIG_ITEMS or this will fail
"""
super().setUp()
if not self.PROVIDER:
self.fail("A PROVIDER must be defined for this test")
# check if appropriate cloud provider and profile files are present
if self.profile_str + ":" not in self.providers:
self.skipTest(
"Configuration file for {0} was not found. Check {0}.conf files "
"in tests/integration/files/conf/cloud.*.d/ to run these tests.".format(
self.PROVIDER
)
)
missing_conf_item = []
for att in self.REQUIRED_PROVIDER_CONFIG_ITEMS:
if not self.provider_config.get(att):
missing_conf_item.append(att)
if missing_conf_item:
self.skipTest(
"Conf items are missing that must be provided to run these tests: {}".format(
", ".join(missing_conf_item)
)
+ "\nCheck tests/integration/files/conf/cloud.providers.d/{}.conf".format(
self.PROVIDER
)
)
def _alt_names(self):
"""
Check for an instances created alongside this test's instance that weren't cleaned up
"""
query = self.query_instances()
instances = set()
for q in query:
# Verify but this is a new name and not a shutting down ec2 instance
if q.startswith(self.instance_name) and not q.split("-")[-1].startswith(
"DEL"
):
instances.add(q)
log.debug(
'Adding "{}" to the set of instances that needs to be deleted'.format(
q
)
)
return instances
def _ensure_deletion(self, instance_name=None):
"""
Make sure that the instance absolutely gets deleted, but fail the test if it happens in the tearDown
:return True if an instance was deleted, False if no instance was deleted; and a message
"""
destroyed = False
if not instance_name:
instance_name = self.instance_name
if self._instance_exists(instance_name):
for tries in range(3):
try:
self.assertDestroyInstance(instance_name)
return (
False,
'The instance "{}" was deleted during the tearDown, not the test.'.format(
instance_name
),
)
except AssertionError as e:
log.error(
'Failed to delete instance "{}". Tries: {}\n{}'.format(
instance_name, tries, str(e)
)
)
if not self._instance_exists():
destroyed = True
break
else:
sleep(30)
if not destroyed:
# Destroying instances in the tearDown is a contingency, not the way things should work by default.
return (
False,
'The Instance "{}" was not deleted after multiple attempts'.format(
instance_name
),
)
return (
True,
'The instance "{}" cleaned up properly after the test'.format(
instance_name
),
)
def tearDown(self):
"""
Clean up after tests, If the instance still exists for any reason, delete it.
Instances should be destroyed before the tearDown, assertDestroyInstance() should be called exactly
one time in a test for each instance created. This is a failSafe and something went wrong
if the tearDown is where an instance is destroyed.
"""
success = True
fail_messages = []
alt_names = self._alt_names()
for instance in alt_names:
alt_destroyed, alt_destroy_message = self._ensure_deletion(instance)
if not alt_destroyed:
success = False
fail_messages.append(alt_destroy_message)
log.error(
'Failed to destroy instance "{}": {}'.format(
instance, alt_destroy_message
)
)
self.assertTrue(success, "\n".join(fail_messages))
self.assertFalse(
alt_names, "Cleanup should happen in the test, not the TearDown"
)
@classmethod
def tearDownClass(cls):
cls.clean_cloud_dir(cls.tmp_provider_dir)
@classmethod
def setUpClass(cls):
# clean up before setup
cls.tmp_provider_dir = os.path.join(
RUNTIME_VARS.TMP_CONF_DIR, "cloud.providers.d"
)
cls.clean_cloud_dir(cls.tmp_provider_dir)
# add the provider config for only the cloud we are testing
provider_file = cls.PROVIDER + ".conf"
shutil.copyfile(
os.path.join(
os.path.join(FILES, "conf", "cloud.providers.d"), provider_file
),
os.path.join(os.path.join(cls.tmp_provider_dir, provider_file)),
)
| 37.498592 | 126 | 0.558293 |
import logging
import os
import shutil
from time import sleep
import salt.utils.verify
from salt.config import cloud_config, cloud_providers_config
from salt.ext.six.moves import range
from salt.utils.yaml import safe_load
from tests.support.case import ShellCase
from tests.support.helpers import expensiveTest, random_string
from tests.support.paths import FILES
from tests.support.runtests import RUNTIME_VARS
TIMEOUT = 500
log = logging.getLogger(__name__)
@expensiveTest
class CloudTest(ShellCase):
PROVIDER = ""
REQUIRED_PROVIDER_CONFIG_ITEMS = tuple()
__RE_RUN_DELAY = 30
__RE_TRIES = 12
@staticmethod
def clean_cloud_dir(tmp_dir):
if not os.path.isdir(tmp_dir):
return
for fname in os.listdir(tmp_dir):
os.remove(os.path.join(tmp_dir, fname))
def query_instances(self):
return {
x.strip(": ")
for x in self.run_cloud("--query")
if x.lstrip().lower().startswith("cloud-test-")
}
def _instance_exists(self, instance_name=None, query=None):
if not instance_name:
instance_name = self.instance_name
if not query:
query = self.query_instances()
log.debug('Checking for "{}" in {}'.format(instance_name, query))
if isinstance(query, set):
return instance_name in query
return any(instance_name == q.strip(": ") for q in query)
def assertInstanceExists(self, creation_ret=None, instance_name=None):
if not instance_name:
instance_name = self.instance_name
if creation_ret:
self.assertIn(
instance_name,
[i.strip(": ") for i in creation_ret],
"An error occured during instance creation: |\n\t{}\n\t|".format(
"\n\t".join(creation_ret)
),
)
else:
# Verify that the instance exists via query
query = self.query_instances()
for tries in range(self.__RE_TRIES):
if self._instance_exists(instance_name, query):
log.debug(
'Instance "{}" reported after {} seconds'.format(
instance_name, tries * self.__RE_RUN_DELAY
)
)
break
else:
sleep(self.__RE_RUN_DELAY)
query = self.query_instances()
# Assert that the last query was successful
self.assertTrue(
self._instance_exists(instance_name, query),
'Instance "{}" was not created successfully: {}'.format(
self.instance_name, ", ".join(query)
),
)
log.debug('Instance exists and was created: "{}"'.format(instance_name))
def assertDestroyInstance(self, instance_name=None, timeout=None):
if timeout is None:
timeout = TIMEOUT
if not instance_name:
instance_name = self.instance_name
log.debug('Deleting instance "{}"'.format(instance_name))
delete_str = self.run_cloud(
"-d {} --assume-yes --out=yaml".format(instance_name), timeout=timeout
)
if delete_str:
delete = safe_load("\n".join(delete_str))
self.assertIn(self.profile_str, delete)
self.assertIn(self.PROVIDER, delete[self.profile_str])
self.assertIn(instance_name, delete[self.profile_str][self.PROVIDER])
delete_status = delete[self.profile_str][self.PROVIDER][instance_name]
if isinstance(delete_status, str):
self.assertEqual(delete_status, "True")
return
elif isinstance(delete_status, dict):
current_state = delete_status.get("currentState")
if current_state:
if current_state.get("ACTION"):
self.assertIn(".delete", current_state.get("ACTION"))
return
else:
self.assertEqual(current_state.get("name"), "shutting-down")
return
# It's not clear from the delete string that deletion was successful, ask salt-cloud after a delay
query = self.query_instances()
for tries in range(6):
if self._instance_exists(query=query):
sleep(30)
log.debug(
'Instance "{}" still found in query after {} tries: {}'.format(
instance_name, tries, query
)
)
query = self.query_instances()
self.assertNotIn(instance_name, self.query_instances())
@property
def instance_name(self):
if not hasattr(self, "_instance_name"):
subclass = self.__class__.__name__.strip("Test")
self._instance_name = random_string(
"cloud-test-{:-<3}-".format(subclass[:3]), uppercase=False
).lower()
return self._instance_name
@property
def providers(self):
if not hasattr(self, "_providers"):
self._providers = self.run_cloud("--list-providers")
return self._providers
@property
def provider_config(self):
if not hasattr(self, "_provider_config"):
self._provider_config = cloud_providers_config(
os.path.join(
RUNTIME_VARS.TMP_CONF_DIR,
"cloud.providers.d",
self.PROVIDER + ".conf",
)
)
return self._provider_config[self.profile_str][self.PROVIDER]
@property
def config(self):
if not hasattr(self, "_config"):
self._config = cloud_config(
os.path.join(
RUNTIME_VARS.TMP_CONF_DIR,
"cloud.profiles.d",
self.PROVIDER + ".conf",
)
)
return self._config
@property
def profile_str(self):
return self.PROVIDER + "-config"
def add_profile_config(self, name, data, conf, new_profile):
conf_path = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "cloud.profiles.d", conf)
with salt.utils.files.fopen(conf_path, "r") as fp:
conf = safe_load(fp)
conf[new_profile] = conf[name].copy()
conf[new_profile].update(data)
with salt.utils.files.fopen(conf_path, "w") as fp:
salt.utils.yaml.safe_dump(conf, fp)
def setUp(self):
super().setUp()
if not self.PROVIDER:
self.fail("A PROVIDER must be defined for this test")
if self.profile_str + ":" not in self.providers:
self.skipTest(
"Configuration file for {0} was not found. Check {0}.conf files "
"in tests/integration/files/conf/cloud.*.d/ to run these tests.".format(
self.PROVIDER
)
)
missing_conf_item = []
for att in self.REQUIRED_PROVIDER_CONFIG_ITEMS:
if not self.provider_config.get(att):
missing_conf_item.append(att)
if missing_conf_item:
self.skipTest(
"Conf items are missing that must be provided to run these tests: {}".format(
", ".join(missing_conf_item)
)
+ "\nCheck tests/integration/files/conf/cloud.providers.d/{}.conf".format(
self.PROVIDER
)
)
def _alt_names(self):
query = self.query_instances()
instances = set()
for q in query:
if q.startswith(self.instance_name) and not q.split("-")[-1].startswith(
"DEL"
):
instances.add(q)
log.debug(
'Adding "{}" to the set of instances that needs to be deleted'.format(
q
)
)
return instances
def _ensure_deletion(self, instance_name=None):
destroyed = False
if not instance_name:
instance_name = self.instance_name
if self._instance_exists(instance_name):
for tries in range(3):
try:
self.assertDestroyInstance(instance_name)
return (
False,
'The instance "{}" was deleted during the tearDown, not the test.'.format(
instance_name
),
)
except AssertionError as e:
log.error(
'Failed to delete instance "{}". Tries: {}\n{}'.format(
instance_name, tries, str(e)
)
)
if not self._instance_exists():
destroyed = True
break
else:
sleep(30)
if not destroyed:
return (
False,
'The Instance "{}" was not deleted after multiple attempts'.format(
instance_name
),
)
return (
True,
'The instance "{}" cleaned up properly after the test'.format(
instance_name
),
)
def tearDown(self):
success = True
fail_messages = []
alt_names = self._alt_names()
for instance in alt_names:
alt_destroyed, alt_destroy_message = self._ensure_deletion(instance)
if not alt_destroyed:
success = False
fail_messages.append(alt_destroy_message)
log.error(
'Failed to destroy instance "{}": {}'.format(
instance, alt_destroy_message
)
)
self.assertTrue(success, "\n".join(fail_messages))
self.assertFalse(
alt_names, "Cleanup should happen in the test, not the TearDown"
)
@classmethod
def tearDownClass(cls):
cls.clean_cloud_dir(cls.tmp_provider_dir)
@classmethod
def setUpClass(cls):
cls.tmp_provider_dir = os.path.join(
RUNTIME_VARS.TMP_CONF_DIR, "cloud.providers.d"
)
cls.clean_cloud_dir(cls.tmp_provider_dir)
provider_file = cls.PROVIDER + ".conf"
shutil.copyfile(
os.path.join(
os.path.join(FILES, "conf", "cloud.providers.d"), provider_file
),
os.path.join(os.path.join(cls.tmp_provider_dir, provider_file)),
)
| true | true |
f7315522e3755914ebfefcdfd231697126d0ee40 | 49,362 | py | Python | path-finding/yolo-v5/utils/datasets.py | sa-y-an/open-source-autonomous-vehicle-controller | 0cc415fb141d1b66ac45a7bf6b50add6814728fb | [
"MIT"
] | 3 | 2021-06-15T05:10:00.000Z | 2021-09-05T18:07:01.000Z | utils/datasets.py | z430/yolov5-mask-detection | b959a4fefa1d44d052436ff9129af386e15e0455 | [
"MIT"
] | 1 | 2021-06-07T21:05:14.000Z | 2021-06-07T21:05:14.000Z | utils/datasets.py | z430/yolov5-mask-detection | b959a4fefa1d44d052436ff9129af386e15e0455 | [
"MIT"
] | 9 | 2021-06-10T08:42:53.000Z | 2022-03-28T05:46:16.000Z | # Dataset utils and dataloaders
import glob
import hashlib
import json
import logging
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool, Pool
from pathlib import Path
from threading import Thread
import cv2
import math
import numpy as np
import torch
import torch.nn.functional as F
import yaml
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \
xyn2xy, segment2box, segments2boxes, resample_segments, clean_str
from utils.metrics import bbox_ioa
from utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
num_threads = min(8, os.cpu_count()) # number of multiprocessing threads
logger = logging.getLogger(__name__)
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.md5(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
From https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info["exif"] = exif.tobytes()
return image
def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640, stride=32):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB and HWC to CHW
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
self.pipe = eval(pipe) if pipe.isnumeric() else pipe
self.cap = cv2.VideoCapture(self.pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB and HWC to CHW
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, stride=32):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
print(f'{i + 1}/{n}: {s}... ', end='')
if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
check_requirements(('pafy', 'youtube_dl'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
_, self.imgs[i] = cap.read() # guarantee first frame
self.threads[i] = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap):
# Read stream `i` frames in daemon thread
n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
while cap.isOpened() and n < f:
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n % read == 0:
success, im = cap.retrieve()
self.imgs[i] = im if success else self.imgs[i] * 0
time.sleep(1 / self.fps[i]) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img0 = self.imgs.copy()
img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB and BHWC to BCHW
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('**/*.*')) # pathlib
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels
if cache_path.is_file():
cache, exists = torch.load(cache_path), True # load
if cache.get('version') != 0.3 or cache.get('hash') != get_hash(self.label_files + self.img_files):
cache, exists = self.cache_labels(cache_path, prefix), False # re-cache
else:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
if cache['msgs']:
logging.info('\n'.join(cache['msgs'])) # display warnings
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(num_threads).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
with Pool(num_threads) as pool:
pbar = tqdm(pool.imap_unordered(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
desc=desc, total=len(self.img_files))
for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x[im_file] = [l, shape, segments]
if msg:
msgs.append(msg)
pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
pbar.close()
if msgs:
logging.info('\n'.join(msgs))
if nf == 0:
logging.info(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, len(self.img_files)
x['msgs'] = msgs # warnings
x['version'] = 0.3 # cache version
try:
torch.save(x, path) # save cache for next time
logging.info(f'{prefix}New cache created: {path}')
except Exception as e:
logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0]) # xyxy to xywh normalized
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3 x img_height x img_width
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
img = cv2.resize(img, (int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
if hgain or sgain or vgain:
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=r.dtype)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def hist_equalize(img, clahe=True, bgr=False):
# Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255
yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, probability=self.hyp['copy_paste'])
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
use_segments = any(x.any() for x in segments)
new = np.zeros((n, 4))
if use_segments: # warp segments
segments = resample_segments(segments) # upsample
for i, segment in enumerate(segments):
xy = np.ones((len(segment), 3))
xy[:, :2] = segment
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
# clip
new[i] = segment2box(xy, width, height)
else: # warp boxes
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip
new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
targets = targets[i]
targets[:, 1:5] = new[i]
return img, targets
def copy_paste(img, labels, segments, probability=0.5):
# Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)
n = len(segments)
if probability and n:
h, w, c = img.shape # height, width, channels
im_new = np.zeros(img.shape, np.uint8)
for j in random.sample(range(n), k=round(probability * n)):
l, s = labels[j], segments[j]
box = w - l[3], l[2], w - l[1], l[4]
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
if (ioa < 0.30).all(): # allow 30% obscuration of existing labels
labels = np.concatenate((labels, [[l[0], *box]]), 0)
segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))
cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)
result = cv2.bitwise_and(src1=img, src2=im_new)
result = cv2.flip(result, 1) # augment segments (flip left-right)
i = result > 0 # pixels to replace
# i[:, :] = result.max(2).reshape(h, w, 1) # act over ch
img[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug
return img, labels, segments
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../datasets/coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes()
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in img_formats:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, lb_file, prefix = args
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, corrupt
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in img_formats, f'invalid image format {im.format}'
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
assert f.read() == b'\xff\xd9', 'corrupted JPEG'
# verify labels
segments = [] # instance segments
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file, 'r') as f:
l = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne = 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
return im_file, l, shape, segments, nm, nf, ne, nc, ''
except Exception as e:
nc = 1
msg = f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False):
""" Return dataset statistics dictionary with images and instances counts per split per class
Usage: from utils.datasets import *; dataset_stats('coco128.yaml', verbose=True)
Arguments
path: Path to data.yaml
autodownload: Attempt to download dataset if not found locally
verbose: Print stats dictionary
"""
def round_labels(labels):
# Update labels to integer class and 6 decimal place floats
return [[int(c), *[round(x, 6) for x in points]] for c, *points in labels]
with open(check_file(path)) as f:
data = yaml.safe_load(f) # data dict
check_dataset(data, autodownload) # download dataset if missing
nc = data['nc'] # number of classes
stats = {'nc': nc, 'names': data['names']} # statistics dictionary
for split in 'train', 'val', 'test':
if data.get(split) is None:
stats[split] = None # i.e. no test set
continue
x = []
dataset = LoadImagesAndLabels(data[split], augment=False, rect=True) # load dataset
if split == 'train':
cache_path = Path(dataset.label_files[0]).parent.with_suffix('.cache') # *.cache path
for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):
x.append(np.bincount(label[:, 0].astype(int), minlength=nc))
x = np.array(x) # shape(128x80)
stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},
'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),
'per_class': (x > 0).sum(0).tolist()},
'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in
zip(dataset.img_files, dataset.labels)]}
# Save, print and return
with open(cache_path.with_suffix('.json'), 'w') as f:
json.dump(stats, f) # save stats *.json
if verbose:
print(json.dumps(stats, indent=2, sort_keys=False))
# print(yaml.dump([stats], sort_keys=False, default_flow_style=False))
return stats
| 42.590164 | 120 | 0.548803 |
import glob
import hashlib
import json
import logging
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool, Pool
from pathlib import Path
from threading import Thread
import cv2
import math
import numpy as np
import torch
import torch.nn.functional as F
import yaml
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \
xyn2xy, segment2box, segments2boxes, resample_segments, clean_str
from utils.metrics import bbox_ioa
from utils.torch_utils import torch_distributed_zero_first
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo']
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv']
num_threads = min(8, os.cpu_count())
logger = logging.getLogger(__name__)
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p))
h = hashlib.md5(str(size).encode())
h.update(''.join(paths).encode())
return h.hexdigest()
def exif_size(img):
s = img.size
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6:
s = (s[1], s[0])
elif rotation == 8:
s = (s[1], s[0])
except:
pass
return s
def exif_transpose(image):
exif = image.getexif()
orientation = exif.get(0x0112, 1)
if orientation > 1:
method = {2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info["exif"] = exif.tobytes()
return image
def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''):
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment,
hyp=hyp,
rect=rect,
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers])
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages:
def __init__(self, path, img_size=640, stride=32):
p = str(Path(path).absolute())
if '*' in p:
files = sorted(glob.glob(p, recursive=True))
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*')))
elif os.path.isfile(p):
files = [p]
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0])
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf:
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='')
else:
self.count += 1
img0 = cv2.imread(path)
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
img = letterbox(img0, self.img_size, stride=self.stride)[0]
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf
class LoadWebcam:
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
self.pipe = eval(pipe) if pipe.isnumeric() else pipe
self.cap = cv2.VideoCapture(self.pipe)
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3)
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'):
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1)
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
img = letterbox(img0, self.img_size, stride=self.stride)[0]
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams:
def __init__(self, sources='streams.txt', img_size=640, stride=32):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
self.sources = [clean_str(x) for x in sources]
for i, s in enumerate(sources):
print(f'{i + 1}/{n}: {s}... ', end='')
if 'youtube.com/' in s or 'youtu.be/' in s:
check_requirements(('pafy', 'youtube_dl'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url
s = eval(s) if s.isnumeric() else s
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf')
_, self.imgs[i] = cap.read()
self.threads[i] = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
print('')
s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0)
self.rect = np.unique(s, axis=0).shape[0] == 1
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap):
n, f, read = 0, self.frames[i], 1
while cap.isOpened() and n < f:
n += 1
cap.grab()
if n % read == 0:
success, im = cap.retrieve()
self.imgs[i] = im if success else self.imgs[i] * 0
time.sleep(1 / self.fps[i])
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'):
cv2.destroyAllWindows()
raise StopIteration
img0 = self.imgs.copy()
img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]
img = np.stack(img, 0)
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2)
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0
def img2label_paths(img_paths):
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
class LoadImagesAndLabels(Dataset):
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
try:
f = []
for p in path if isinstance(path, list) else [path]:
p = Path(p)
if p.is_dir():
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
elif p.is_file():
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t]
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
self.label_files = img2label_paths(self.img_files)
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
if cache_path.is_file():
cache, exists = torch.load(cache_path), True
if cache.get('version') != 0.3 or cache.get('hash') != get_hash(self.label_files + self.img_files):
cache, exists = self.cache_labels(cache_path, prefix), False
else:
cache, exists = self.cache_labels(cache_path, prefix), False
nf, nm, ne, nc, n = cache.pop('results')
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n)
if cache['msgs']:
logging.info('\n'.join(cache['msgs']))
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
[cache.pop(k) for k in ('hash', 'version', 'msgs')]
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys())
self.label_files = img2label_paths(cache.keys())
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes)
bi = np.floor(np.arange(n) / batch_size).astype(np.int)
nb = bi[-1] + 1
self.batch = bi
self.n = n
self.indices = range(n)
if self.rect:
s = self.shapes
ar = s[:, 1] / s[:, 0]
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect]
ar = ar[irect]
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
self.imgs = [None] * n
if cache_images:
gb = 0
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(num_threads).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
x = {}
nm, nf, ne, nc, msgs = 0, 0, 0, 0, []
desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
with Pool(num_threads) as pool:
pbar = tqdm(pool.imap_unordered(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
desc=desc, total=len(self.img_files))
for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x[im_file] = [l, shape, segments]
if msg:
msgs.append(msg)
pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
pbar.close()
if msgs:
logging.info('\n'.join(msgs))
if nf == 0:
logging.info(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, len(self.img_files)
x['msgs'] = msgs
x['version'] = 0.3
try:
torch.save(x, path)
logging.info(f'{prefix}New cache created: {path}')
except Exception as e:
logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}')
return x
def __len__(self):
return len(self.img_files)
self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
img, labels = load_mosaic(self, index)
shapes = None
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
r = np.random.beta(32.0, 32.0)
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
img, (h0, w0), (h, w) = load_image(self, index)
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad)
labels = self.labels[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
nL = len(labels)
if nL:
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0])
if self.augment:
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch)
for i, l in enumerate(label):
l[:, 0] = i
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch)
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]])
for i in range(n): i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
def load_image(self, index):
img = self.imgs[index]
if img is None:
path = self.img_files[index]
img = cv2.imread(path)
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2]
r = self.img_size / max(h0, w0)
if r != 1:
img = cv2.resize(img, (int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
return img, (h0, w0), img.shape[:2]
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index]
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
if hgain or sgain or vgain:
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype
x = np.arange(0, 256, dtype=r.dtype)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)
def hist_equalize(img, clahe=True, bgr=False):
yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0])
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB)
def load_mosaic(self, index):
labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border]
indices = [index] + random.choices(self.indices, k=3)
for i, index in enumerate(indices):
img, _, (h, w) = load_image(self, index)
if i == 0:
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8)
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h
elif i == 1:
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2:
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3:
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b]
padw = x1a - x1b
padh = y1a - y1b
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh)
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x)
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, probability=self.hyp['copy_paste'])
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border)
return img4, labels4
def load_mosaic9(self, index):
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8)
for i, index in enumerate(indices):
img, _, (h, w) = load_image(self, index)
if i == 0:
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8)
h0, w0 = h, w
c = s, s, s + w, s + h
elif i == 1:
c = s, s - h, s + w, s
elif i == 2:
c = s + wp, s - h, s + wp + w, s
elif i == 3:
c = s + w0, s, s + w0 + w, s + h
elif i == 4:
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5:
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6:
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7:
c = s - w, s + h0 - h, s, s + h0
elif i == 8:
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c]
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady)
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:]
hp, wp = h, w
yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border]
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc])
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x)
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border)
return img9, labels9
def replicate(img, labels):
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2
for i in s.argsort()[:round(s.size * 0.5)]:
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw))
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
shape = img.shape[:2]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup:
r = min(r, 1.0)
ratio = r, r
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]
if auto:
dw, dh = np.mod(dw, stride), np.mod(dh, stride)
elif scaleFill:
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]
dw /= 2
dh /= 2
if shape[::-1] != new_unpad:
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
border=(0, 0)):
height = img.shape[0] + border[0] * 2
width = img.shape[1] + border[1] * 2
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2
C[1, 2] = -img.shape[0] / 2
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective)
P[2, 1] = random.uniform(-perspective, perspective)
R = np.eye(3)
a = random.uniform(-degrees, degrees)
cale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180)
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height
M = T @ S @ R @ P @ C
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any():
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else:
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
n = len(targets)
if n:
use_segments = any(x.any() for x in segments)
new = np.zeros((n, 4))
if use_segments:
segments = resample_segments(segments)
for i, segment in enumerate(segments):
xy = np.ones((len(segment), 3))
xy[:, :2] = segment
xy = xy @ M.T
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]
new[i] = segment2box(xy, width, height)
else:
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2)
xy = xy @ M.T
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8)
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
targets = targets[i]
targets[:, 1:5] = new[i]
return img, targets
def copy_paste(img, labels, segments, probability=0.5):
n = len(segments)
if probability and n:
h, w, c = img.shape
im_new = np.zeros(img.shape, np.uint8)
for j in random.sample(range(n), k=round(probability * n)):
l, s = labels[j], segments[j]
box = w - l[3], l[2], w - l[1], l[4]
ioa = bbox_ioa(box, labels[:, 1:5])
if (ioa < 0.30).all():
labels = np.concatenate((labels, [[l[0], *box]]), 0)
segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))
cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)
result = cv2.bitwise_and(src1=img, src2=im_new)
result = cv2.flip(result, 1)
i = result > 0
i] = result[i] eturn img, labels, segments
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16):
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps))
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr)
def cutout(image, labels):
h, w = image.shape[:2]
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5])
labels = labels[ioa < 0.60]
return labels
def create_folder(path='./new'):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
def flatten_recursive(path='../datasets/coco128'):
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../datasets/coco128'):
path = Path(path)
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None
files = list(path.rglob('*.*'))
n = len(files)
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in img_formats:
im = cv2.imread(str(im_file))[..., ::-1]
h, w = im.shape[:2]
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32)
for j, x in enumerate(lb):
c = int(x[0])
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg'
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h]
b[2:] = b[2:] * 1.2 + 3
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w)
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
path = Path(path)
files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], [])
n = len(files)
random.seed(0)
indices = random.choices([0, 1, 2], weights=weights, k=n)
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt']
[(path.parent / x).unlink(missing_ok=True) for x in txt]
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists():
with open(path.parent / txt[i], 'a') as f:
f.write('./' + img.relative_to(path.parent).as_posix() + '\n')
def verify_image_label(args):
im_file, lb_file, prefix = args
nm, nf, ne, nc = 0, 0, 0, 0
try:
im = Image.open(im_file)
im.verify()
shape = exif_size(im)
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in img_formats, f'invalid image format {im.format}'
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
assert f.read() == b'\xff\xd9', 'corrupted JPEG'
segments = []
if os.path.isfile(lb_file):
nf = 1
with open(lb_file, 'r') as f:
l = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any([len(x) > 8 for x in l]):
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l]
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1)
l = np.array(l, dtype=np.float32)
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne = 1
l = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1
l = np.zeros((0, 5), dtype=np.float32)
return im_file, l, shape, segments, nm, nf, ne, nc, ''
except Exception as e:
nc = 1
msg = f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False):
def round_labels(labels):
return [[int(c), *[round(x, 6) for x in points]] for c, *points in labels]
with open(check_file(path)) as f:
data = yaml.safe_load(f)
check_dataset(data, autodownload)
nc = data['nc']
stats = {'nc': nc, 'names': data['names']}
for split in 'train', 'val', 'test':
if data.get(split) is None:
stats[split] = None
continue
x = []
dataset = LoadImagesAndLabels(data[split], augment=False, rect=True)
if split == 'train':
cache_path = Path(dataset.label_files[0]).parent.with_suffix('.cache')
for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):
x.append(np.bincount(label[:, 0].astype(int), minlength=nc))
x = np.array(x)
stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},
'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),
'per_class': (x > 0).sum(0).tolist()},
'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in
zip(dataset.img_files, dataset.labels)]}
with open(cache_path.with_suffix('.json'), 'w') as f:
json.dump(stats, f)
if verbose:
print(json.dumps(stats, indent=2, sort_keys=False))
return stats
| true | true |
f7315650becb2c94bec9837ddd60b3d1e3e2b7d1 | 2,007 | py | Python | extra/face.py | Leyan529/ImageClassificationPL | a4be75f4525828100d8d278e46ff5dccd829af1a | [
"MIT"
] | null | null | null | extra/face.py | Leyan529/ImageClassificationPL | a4be75f4525828100d8d278e46ff5dccd829af1a | [
"MIT"
] | null | null | null | extra/face.py | Leyan529/ImageClassificationPL | a4be75f4525828100d8d278e46ff5dccd829af1a | [
"MIT"
] | null | null | null | import torch
import matplotlib.image as img
import cv2
import dlib
from imutils.face_utils import *
import numpy as np
# image = img.imread("extra//test.jpg")
# image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) # opencvImage
dlib_path = 'extra//shape_predictor_68_face_landmarks.dat'
def get_face(img):
global detector, landmark_predictor
# 宣告臉部偵測器,以及載入預訓練的臉部特徵點模型
detector = dlib.get_frontal_face_detector()
landmark_predictor = dlib.shape_predictor(dlib_path)
# 產生臉部識別
face_rects = detector(img, 1)
for i, d in enumerate(face_rects):
# 讀取框左上右下座標
x1 = d.left()
y1 = d.top()
x2 = d.right()
y2 = d.bottom()
# 根據此座標範圍讀取臉部特徵點
shape = landmark_predictor(img, d)
# 將特徵點轉為numpy
shape = shape_to_np(shape) # (68,2)
# 透過dlib挖取臉孔部分,將臉孔圖片縮放至256*256的大小,並存放於pickle檔中
# 人臉圖像部分呢。很簡單,只要根據畫框的位置切取即可crop_img = img[y1:y2, x1:x2, :]
crop_img = img[y1:y2, x1:x2, :]
try:
resize_img = cv2.resize(crop_img, (512, 512))
# cv2.imshow("OpenCV",resize_img)
# cv2.waitKey()
return resize_img
except:
return np.array([0])
return np.array([0])
def predict_image(logger, image, model):
try:
face = get_face(image) # predict target
face = torch.tensor(face, dtype=torch.float32)/255 # normalize
face = face.permute(2, 0, 1).unsqueeze(0).cuda()
# model = torch.load('run\SCUT\pre_googlenet\experiment_6\pre_googlenet.pkl')
# model.load_state_dict(torch.load('run\SCUT\pre_googlenet\experiment_6\checkpoint.pth.tar')['state_dict'])
outputs = model(face) # [bsz, c, h, w]
_, predicted = torch.max(outputs.data, 1)
score = int(predicted.item()) * 20
# logger.info("Predict Score : {}".format(score))
return score
except Exception as e:
# print(e)
return 0 | 35.210526 | 116 | 0.605879 | import torch
import matplotlib.image as img
import cv2
import dlib
from imutils.face_utils import *
import numpy as np
extra//shape_predictor_68_face_landmarks.dat'
def get_face(img):
global detector, landmark_predictor
detector = dlib.get_frontal_face_detector()
landmark_predictor = dlib.shape_predictor(dlib_path)
face_rects = detector(img, 1)
for i, d in enumerate(face_rects):
x1 = d.left()
y1 = d.top()
x2 = d.right()
y2 = d.bottom()
shape = landmark_predictor(img, d)
shape = shape_to_np(shape)
crop_img = img[y1:y2, x1:x2, :]
try:
resize_img = cv2.resize(crop_img, (512, 512))
return resize_img
except:
return np.array([0])
return np.array([0])
def predict_image(logger, image, model):
try:
face = get_face(image)
face = torch.tensor(face, dtype=torch.float32)/255
face = face.permute(2, 0, 1).unsqueeze(0).cuda()
outputs = model(face)
_, predicted = torch.max(outputs.data, 1)
score = int(predicted.item()) * 20
return score
except Exception as e:
return 0 | true | true |
f731574329fa3890c3eac2e8b58b5c9c180f7338 | 1,636 | py | Python | src/Filtering/BinaryMathematicalMorphology/DilateABinaryImage/Code.py | justbennet/ITKExamples | cde3b1bfb396042050c399b4bae59c338cf646f2 | [
"Apache-2.0"
] | null | null | null | src/Filtering/BinaryMathematicalMorphology/DilateABinaryImage/Code.py | justbennet/ITKExamples | cde3b1bfb396042050c399b4bae59c338cf646f2 | [
"Apache-2.0"
] | null | null | null | src/Filtering/BinaryMathematicalMorphology/DilateABinaryImage/Code.py | justbennet/ITKExamples | cde3b1bfb396042050c399b4bae59c338cf646f2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import itk
if len(sys.argv) != 4:
print("Usage: " + sys.argv[0] + " <inputImage> <outputImage> <radius>")
sys.exit(1)
inputImage = sys.argv[1]
outputImage = sys.argv[2]
radiusValue = int(sys.argv[3])
PixelType = itk.UC
Dimension = 2
ImageType = itk.Image[PixelType, Dimension]
ReaderType = itk.ImageFileReader[ImageType]
reader = ReaderType.New()
reader.SetFileName(inputImage)
StructuringElementType = itk.FlatStructuringElement[Dimension]
structuringElement = StructuringElementType.Ball(radiusValue)
DilateFilterType = itk.BinaryDilateImageFilter[ImageType,
ImageType,
StructuringElementType]
dilateFilter = DilateFilterType.New()
dilateFilter.SetInput(reader.GetOutput())
dilateFilter.SetKernel(structuringElement)
dilateFilter.SetForegroundValue(255)
WriterType = itk.ImageFileWriter[ImageType]
writer = WriterType.New()
writer.SetFileName(outputImage)
writer.SetInput(dilateFilter.GetOutput())
writer.Update()
| 30.296296 | 75 | 0.732274 |
import sys
import itk
if len(sys.argv) != 4:
print("Usage: " + sys.argv[0] + " <inputImage> <outputImage> <radius>")
sys.exit(1)
inputImage = sys.argv[1]
outputImage = sys.argv[2]
radiusValue = int(sys.argv[3])
PixelType = itk.UC
Dimension = 2
ImageType = itk.Image[PixelType, Dimension]
ReaderType = itk.ImageFileReader[ImageType]
reader = ReaderType.New()
reader.SetFileName(inputImage)
StructuringElementType = itk.FlatStructuringElement[Dimension]
structuringElement = StructuringElementType.Ball(radiusValue)
DilateFilterType = itk.BinaryDilateImageFilter[ImageType,
ImageType,
StructuringElementType]
dilateFilter = DilateFilterType.New()
dilateFilter.SetInput(reader.GetOutput())
dilateFilter.SetKernel(structuringElement)
dilateFilter.SetForegroundValue(255)
WriterType = itk.ImageFileWriter[ImageType]
writer = WriterType.New()
writer.SetFileName(outputImage)
writer.SetInput(dilateFilter.GetOutput())
writer.Update()
| true | true |
f731577f3eb816602726166e59d42f01b5f4b241 | 9,919 | py | Python | src/sasctl/utils/cli.py | brtieu/python-sasctl | 1eed427c39faaddda78dec4806f12f3f8964890e | [
"Apache-2.0"
] | 30 | 2019-07-12T00:18:21.000Z | 2022-03-18T08:36:35.000Z | src/sasctl/utils/cli.py | brtieu/python-sasctl | 1eed427c39faaddda78dec4806f12f3f8964890e | [
"Apache-2.0"
] | 89 | 2019-07-12T20:46:46.000Z | 2022-03-29T16:16:46.000Z | src/sasctl/utils/cli.py | brtieu/python-sasctl | 1eed427c39faaddda78dec4806f12f3f8964890e | [
"Apache-2.0"
] | 41 | 2019-07-11T15:08:55.000Z | 2022-01-10T05:30:50.000Z | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import argparse
import inspect
import json
import logging
import os
import pkgutil
import warnings
from collections import namedtuple, defaultdict
from importlib import import_module
from pprint import pprint
ArgInfo = namedtuple('ArgInfo', ['name', 'type', 'required', 'default', 'doc'])
def sasctl_command(name, subname=None):
"""Decorator that tags the function as being usable from the command line.
Parameters
----------
name : str
the name of the command that will be shown on the command line.
subname : str
the name of the service that the command will be listed under
Returns
-------
function
Examples
--------
Define a command called 'cmd' not associated with a service
>>> @sasctl_command('cmd')
>>> def func():
...
Define a command called 'cmd' associated with the 'svc' service
>>> @sasctl_command('svc', 'cmd')
>>> def func():
...
Define a command and allow it's name and service to be auto-assigned
>>> @sasctl_command
>>> def func():
...
"""
def decorator(func):
if isinstance(name, str):
if isinstance(subname, str):
command_name = subname
service_name = name
else:
command_name = name
service_name = subname
else:
command_name = func.__name__
if any(
command_name.startswith(x)
for x in ['list_', 'update_', 'get_', 'create_', 'delete_']
):
parts = command_name.split('_')
command_name = parts[0]
service_name = parts[-1]
else:
service_name = subname
def parse_args():
"""Retrieve argument metadata from function signature and docstring."""
arg_spec = inspect.getargspec(func)
defaults = list(arg_spec.defaults) if arg_spec.defaults is not None else []
required = [True] * (len(arg_spec.args) - len(defaults)) + [False] * len(
defaults
)
defaults = [None] * (len(arg_spec.args) - len(defaults)) + defaults
types = []
help_doc = []
doc = inspect.getdoc(func)
if doc and doc.find('Parameters\n'):
doc_lines = doc[doc.find('Parameters\n') :].splitlines()
doc_lines.pop(0) # First line is "Parameters"
if doc_lines and doc_lines[0].startswith('---'):
doc_lines.pop(
0
) # Discard ----------- line under "Parameters" heading
while doc_lines:
var = doc_lines.pop(0)
if var.startswith('Returns') or var.strip() == '':
break
if ':' in var:
types.append(var.split(':')[-1].strip())
else:
types.append('str')
if doc_lines and doc_lines[0].startswith(' '):
help_doc.append(doc_lines.pop(0).strip())
else:
help_doc.append('')
else:
types = ['str'] * len(arg_spec.args)
help_doc = [None] * len(arg_spec.args)
return [
ArgInfo(n, t, r, d, o)
for n, t, r, d, o in zip(
arg_spec.args, types, required, defaults, help_doc
)
]
func._cli_command = command_name
func._cli_service = service_name
func._cli_arguments = parse_args
return func
if callable(name):
# allow direct decoration without arguments
return decorator(name)
return decorator
def _find_services(module='sasctl'):
"""Recursively find all functions in all modules that have been decorated as CLI commands."""
m = __import__(module, fromlist=['']) # returns a module
def find_recurse(module, services):
for obj in dir(module):
obj = getattr(module, obj)
source_module = getattr(obj, '__module__', type(obj).__module__)
# Module-level functions that are tagged as commands
if hasattr(obj, '_cli_command') and hasattr(obj, '_cli_service'):
services[obj._cli_service][obj._cli_command] = obj
# Check methods on service classes
elif source_module.startswith('sasctl._services'):
for atr in dir(obj):
atr = getattr(obj, atr)
if hasattr(atr, '_cli_command') and hasattr(atr, '_cli_service'):
services[atr._cli_service][atr._cli_command] = atr
# recurse into submodules
submodules = pkgutil.iter_modules(getattr(module, '__path__', []))
for submodule in submodules:
# ModuleInfo returned py 3.6 has .name
# Tuple of (module_loader, name, ispkg) returned by older versions
submodule_name = getattr(submodule, 'name', submodule[1])
# TODO: Temporary until pzmm fully merged with sasctl
if submodule_name == 'pzmm':
continue
submodule = import_module('.' + submodule_name, package=module.__name__)
# if hasattr(submodule, 'name'):
# # ModuleInfo returned py 3.6
# submodule = import_module('.' + submodule.name, package=module.__name__)
# else:
# # Tuple of (module_loader, name, ispkg) returned by older versions
# submodule = import_module('.' + submodule[1], package=module.__name__)
services = find_recurse(submodule, services)
return services
services = find_recurse(m, defaultdict(dict))
return services
def _get_func_description(func):
description = getattr(func, '__doc__', '')
lines = description.split('\n')
if lines:
return lines[0]
def _build_parser(services):
from sasctl import __version__
# TODO: Set command docstring
# Create standard, top-level arguments
parser = argparse.ArgumentParser(
prog='sasctl', description='sasctl interacts with a SAS Viya environment.'
)
parser.add_argument(
'-k', '--insecure', action='store_true', help='skip SSL verification'
)
parser.add_argument(
'-f', '--format', choices=['json'], default='json', help='output format'
)
parser.add_argument('-v', '--verbose', action='count')
parser.add_argument(
'--version', action='version', version='%(prog)s ' + __version__
)
subparsers = parser.add_subparsers(title='service', dest='service')
subparsers.required = True
for service, commands in services.items():
service_parser = subparsers.add_parser(service)
service_subparser = service_parser.add_subparsers(
title='command', dest='command'
)
service_subparser.required = True
# Add the command and arguments for each command
for command in commands:
func = services[service][command]
cmd_parser = service_subparser.add_parser(
command, help=_get_func_description(func)
)
for arg in func._cli_arguments():
if arg.name in ('self', 'cls'):
continue
if arg.required:
cmd_parser.add_argument(arg.name, help=arg.doc)
else:
cmd_parser.add_argument(
'--' + arg.name,
required=arg.required,
default=arg.default,
help=arg.doc,
)
return parser
def main(args=None):
"""Main entry point when executed as a command line utility."""
from sasctl import Session, current_session
# Find all services and associated commands
services = _find_services()
parser = _build_parser(services)
args = parser.parse_args(args)
if args.verbose is None or args.verbose == 0:
lvl = logging.WARNING
elif args.verbose == 1:
lvl = logging.INFO
else:
lvl = logging.DEBUG
handler = logging.StreamHandler()
handler.setLevel(lvl)
logging.getLogger('sasctl.core').addHandler(handler)
logging.getLogger('sasctl.core').setLevel(lvl)
warnings.simplefilter('ignore')
func = services[args.service][args.command]
kwargs = vars(args).copy()
# Remove args that shouldn't be passed to the underlying command
for k in ['command', 'service', 'insecure', 'verbose', 'format']:
kwargs.pop(k, None)
username = os.environ.get('SASCTL_USER_NAME')
password = os.environ.get('SASCTL_PASSWORD')
server = os.environ.get('SASCTL_SERVER_NAME')
if server is None:
parser.error(
"Hostname must be specified in the 'SASCTL_SERVER_NAME' environment variable."
)
verify_ssl = not args.insecure
try:
# current_session() should never be set when executing from the
# command line but it allows us to provide a pre-created session
# during testing
with current_session() or Session(
server, username, password, verify_ssl=verify_ssl
):
result = func(**kwargs)
if isinstance(result, list):
pprint([str(x) for x in result])
elif isinstance(result, dict) and args.format == 'json':
print(json.dumps(result, indent=2))
else:
pprint(result)
except RuntimeError as e:
parser.error(e)
| 32.521311 | 97 | 0.569311 |
import argparse
import inspect
import json
import logging
import os
import pkgutil
import warnings
from collections import namedtuple, defaultdict
from importlib import import_module
from pprint import pprint
ArgInfo = namedtuple('ArgInfo', ['name', 'type', 'required', 'default', 'doc'])
def sasctl_command(name, subname=None):
def decorator(func):
if isinstance(name, str):
if isinstance(subname, str):
command_name = subname
service_name = name
else:
command_name = name
service_name = subname
else:
command_name = func.__name__
if any(
command_name.startswith(x)
for x in ['list_', 'update_', 'get_', 'create_', 'delete_']
):
parts = command_name.split('_')
command_name = parts[0]
service_name = parts[-1]
else:
service_name = subname
def parse_args():
arg_spec = inspect.getargspec(func)
defaults = list(arg_spec.defaults) if arg_spec.defaults is not None else []
required = [True] * (len(arg_spec.args) - len(defaults)) + [False] * len(
defaults
)
defaults = [None] * (len(arg_spec.args) - len(defaults)) + defaults
types = []
help_doc = []
doc = inspect.getdoc(func)
if doc and doc.find('Parameters\n'):
doc_lines = doc[doc.find('Parameters\n') :].splitlines()
doc_lines.pop(0)
if doc_lines and doc_lines[0].startswith('---'):
doc_lines.pop(
0
)
while doc_lines:
var = doc_lines.pop(0)
if var.startswith('Returns') or var.strip() == '':
break
if ':' in var:
types.append(var.split(':')[-1].strip())
else:
types.append('str')
if doc_lines and doc_lines[0].startswith(' '):
help_doc.append(doc_lines.pop(0).strip())
else:
help_doc.append('')
else:
types = ['str'] * len(arg_spec.args)
help_doc = [None] * len(arg_spec.args)
return [
ArgInfo(n, t, r, d, o)
for n, t, r, d, o in zip(
arg_spec.args, types, required, defaults, help_doc
)
]
func._cli_command = command_name
func._cli_service = service_name
func._cli_arguments = parse_args
return func
if callable(name):
return decorator(name)
return decorator
def _find_services(module='sasctl'):
m = __import__(module, fromlist=[''])
def find_recurse(module, services):
for obj in dir(module):
obj = getattr(module, obj)
source_module = getattr(obj, '__module__', type(obj).__module__)
if hasattr(obj, '_cli_command') and hasattr(obj, '_cli_service'):
services[obj._cli_service][obj._cli_command] = obj
elif source_module.startswith('sasctl._services'):
for atr in dir(obj):
atr = getattr(obj, atr)
if hasattr(atr, '_cli_command') and hasattr(atr, '_cli_service'):
services[atr._cli_service][atr._cli_command] = atr
submodules = pkgutil.iter_modules(getattr(module, '__path__', []))
for submodule in submodules:
submodule_name = getattr(submodule, 'name', submodule[1])
if submodule_name == 'pzmm':
continue
submodule = import_module('.' + submodule_name, package=module.__name__)
ces)
return services
services = find_recurse(m, defaultdict(dict))
return services
def _get_func_description(func):
description = getattr(func, '__doc__', '')
lines = description.split('\n')
if lines:
return lines[0]
def _build_parser(services):
from sasctl import __version__
parser = argparse.ArgumentParser(
prog='sasctl', description='sasctl interacts with a SAS Viya environment.'
)
parser.add_argument(
'-k', '--insecure', action='store_true', help='skip SSL verification'
)
parser.add_argument(
'-f', '--format', choices=['json'], default='json', help='output format'
)
parser.add_argument('-v', '--verbose', action='count')
parser.add_argument(
'--version', action='version', version='%(prog)s ' + __version__
)
subparsers = parser.add_subparsers(title='service', dest='service')
subparsers.required = True
for service, commands in services.items():
service_parser = subparsers.add_parser(service)
service_subparser = service_parser.add_subparsers(
title='command', dest='command'
)
service_subparser.required = True
for command in commands:
func = services[service][command]
cmd_parser = service_subparser.add_parser(
command, help=_get_func_description(func)
)
for arg in func._cli_arguments():
if arg.name in ('self', 'cls'):
continue
if arg.required:
cmd_parser.add_argument(arg.name, help=arg.doc)
else:
cmd_parser.add_argument(
'--' + arg.name,
required=arg.required,
default=arg.default,
help=arg.doc,
)
return parser
def main(args=None):
from sasctl import Session, current_session
services = _find_services()
parser = _build_parser(services)
args = parser.parse_args(args)
if args.verbose is None or args.verbose == 0:
lvl = logging.WARNING
elif args.verbose == 1:
lvl = logging.INFO
else:
lvl = logging.DEBUG
handler = logging.StreamHandler()
handler.setLevel(lvl)
logging.getLogger('sasctl.core').addHandler(handler)
logging.getLogger('sasctl.core').setLevel(lvl)
warnings.simplefilter('ignore')
func = services[args.service][args.command]
kwargs = vars(args).copy()
for k in ['command', 'service', 'insecure', 'verbose', 'format']:
kwargs.pop(k, None)
username = os.environ.get('SASCTL_USER_NAME')
password = os.environ.get('SASCTL_PASSWORD')
server = os.environ.get('SASCTL_SERVER_NAME')
if server is None:
parser.error(
"Hostname must be specified in the 'SASCTL_SERVER_NAME' environment variable."
)
verify_ssl = not args.insecure
try:
# current_session() should never be set when executing from the
# command line but it allows us to provide a pre-created session
# during testing
with current_session() or Session(
server, username, password, verify_ssl=verify_ssl
):
result = func(**kwargs)
if isinstance(result, list):
pprint([str(x) for x in result])
elif isinstance(result, dict) and args.format == 'json':
print(json.dumps(result, indent=2))
else:
pprint(result)
except RuntimeError as e:
parser.error(e)
| true | true |
f731586ef1b81b7d4f26fef5e91e60a95e44ab32 | 28 | py | Python | purectypes/struct_value.py | aguinet/purectypes | e1db225ba865468b1f0d2fe017a7291da41acbfd | [
"Apache-2.0"
] | 19 | 2020-02-22T12:29:39.000Z | 2021-10-02T02:36:01.000Z | purectypes/struct_value.py | aguinet/purectypes | e1db225ba865468b1f0d2fe017a7291da41acbfd | [
"Apache-2.0"
] | null | null | null | purectypes/struct_value.py | aguinet/purectypes | e1db225ba865468b1f0d2fe017a7291da41acbfd | [
"Apache-2.0"
] | 2 | 2020-02-22T12:29:46.000Z | 2020-10-11T18:48:53.000Z | class StructValue:
pass
| 9.333333 | 18 | 0.714286 | class StructValue:
pass
| true | true |
f731592f46955b17ebfec9140599ba5b56cb5dab | 33,850 | py | Python | mlflow/pyfunc/__init__.py | washcycle/mlflow | 5a60ab34a4cecfe0b9636f6df77c087faa8b6959 | [
"Apache-2.0"
] | 3 | 2018-10-16T16:34:46.000Z | 2020-01-08T09:34:34.000Z | mlflow/pyfunc/__init__.py | washcycle/mlflow | 5a60ab34a4cecfe0b9636f6df77c087faa8b6959 | [
"Apache-2.0"
] | null | null | null | mlflow/pyfunc/__init__.py | washcycle/mlflow | 5a60ab34a4cecfe0b9636f6df77c087faa8b6959 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The ``mlflow.pyfunc`` module defines a generic :ref:`filesystem format <pyfunc-filesystem-format>`
for Python models and provides utilities for saving to and loading from this format. The format is
self contained in the sense that it includes all necessary information for anyone to load it and
use it. Dependencies are either stored directly with the model or referenced via a Conda
environment.
The ``mlflow.pyfunc`` module also defines utilities for creating custom ``pyfunc`` models
using frameworks and inference logic that may not be natively included in MLflow. See
:ref:`pyfunc-create-custom`.
.. _pyfunc-filesystem-format:
*****************
Filesystem format
*****************
The Pyfunc format is defined as a directory structure containing all required data, code, and
configuration::
./dst-path/
./MLmodel: configuration
<code>: code packaged with the model (specified in the MLmodel file)
<data>: data packaged with the model (specified in the MLmodel file)
<env>: Conda environment definition (specified in the MLmodel file)
The directory structure may contain additional contents that can be referenced by the ``MLmodel``
configuration.
.. _pyfunc-model-config:
MLModel configuration
#####################
A Python model contains an ``MLmodel`` file in **python_function** format in its root with the
following parameters:
- loader_module [required]:
Python module that can load the model. Expected as module identifier
e.g. ``mlflow.sklearn``, it will be imported using ``importlib.import_module``.
The imported module must contain a function with the following signature::
_load_pyfunc(path: string) -> <pyfunc model>
The path argument is specified by the ``data`` parameter and may refer to a file or
directory.
- code [optional]:
Relative path to a directory containing the code packaged with this model.
All files and directories inside this directory are added to the Python path
prior to importing the model loader.
- data [optional]:
Relative path to a file or directory containing model data.
The path is passed to the model loader.
- env [optional]:
Relative path to an exported Conda environment. If present this environment
should be activated prior to running the model.
- Optionally, any additional parameters necessary for interpreting the serialized model in
``pyfunc`` format.
.. rubric:: Example
>>> tree example/sklearn_iris/mlruns/run1/outputs/linear-lr
::
├── MLmodel
├── code
│ ├── sklearn_iris.py
│
├── data
│ └── model.pkl
└── mlflow_env.yml
>>> cat example/sklearn_iris/mlruns/run1/outputs/linear-lr/MLmodel
::
python_function:
code: code
data: data/model.pkl
loader_module: mlflow.sklearn
env: mlflow_env.yml
main: sklearn_iris
.. _pyfunc-inference-api:
*************
Inference API
*************
The convention for pyfunc models is to have a ``predict`` method or function with the following
signature::
predict(model_input: pandas.DataFrame) -> [numpy.ndarray | pandas.Series | pandas.DataFrame]
This convention is relied on by other MLflow components.
.. _pyfunc-create-custom:
******************************
Creating custom Pyfunc models
******************************
MLflow's persistence modules provide convenience functions for creating models with the
``pyfunc`` flavor in a variety of machine learning frameworks (scikit-learn, Keras, Pytorch, and
more); however, they do not cover every use case. For example, you may want to create an MLflow
model with the ``pyfunc`` flavor using a framework that MLflow does not natively support.
Alternatively, you may want to build an MLflow model that executes custom logic when evaluating
queries, such as preprocessing and postprocessing routines. Therefore, ``mlflow.pyfunc``
provides utilities for creating ``pyfunc`` models from arbitrary code and model data.
The :meth:`save_model()` and :meth:`log_model()` methods are designed to support multiple workflows
for creating custom ``pyfunc`` models that incorporate custom inference logic and artifacts
that the logic may require.
An `artifact` is a file or directory, such as a serialized model or a CSV. For example, a
serialized TensorFlow graph is an artifact. An MLflow model directory is also an artifact.
.. _pyfunc-create-custom-workflows:
Workflows
#########
:meth:`save_model()` and :meth:`log_model()` support the following workflows:
1. Programmatically defining a new MLflow model, including its attributes and artifacts.
Given a set of artifact URIs, :meth:`save_model()` and :meth:`log_model()` can
automatically download artifacts from their URIs and create an MLflow model directory.
In this case, you must define a Python class which inherits from :class:`~PythonModel`,
defining ``predict()`` and, optionally, ``load_context()``. An instance of this class is
specified via the ``python_model`` parameter; it is automatically serialized and deserialized
as a Python class, including all of its attributes.
2. Interpreting pre-existing data as an MLflow model.
If you already have a directory containing model data, :meth:`save_model()` and
:meth:`log_model()` can import the data as an MLflow model. The ``data_path`` parameter
specifies the local filesystem path to the directory containing model data.
In this case, you must provide a Python module, called a `loader module`. The
loader module defines a ``_load_pyfunc()`` method that performs the following tasks:
- Load data from the specified ``data_path``. For example, this process may include
deserializing pickled Python objects or models or parsing CSV files.
- Construct and return a pyfunc-compatible model wrapper. As in the first
use case, this wrapper must define a ``predict()`` method that is used to evaluate
queries. ``predict()`` must adhere to the :ref:`pyfunc-inference-api`.
The ``loader_module`` parameter specifies the name of your loader module.
For an example loader module implementation, refer to the `loader module
implementation in mlflow.keras <https://github.com/mlflow/mlflow/blob/
74d75109aaf2975f5026104d6125bb30f4e3f744/mlflow/keras.py#L157-L187>`_.
.. _pyfunc-create-custom-selecting-workflow:
Which workflow is right for my use case?
########################################
We consider the first workflow to be more user-friendly and generally recommend it for the
following reasons:
- It automatically resolves and collects specified model artifacts.
- It automatically serializes and deserializes the ``python_model`` instance and all of
its attributes, reducing the amount of user logic that is required to load the model
- You can create Models using logic that is defined in the ``__main__`` scope. This allows
custom models to be constructed in interactive environments, such as notebooks and the Python
REPL.
You may prefer the second, lower-level workflow for the following reasons:
- Inference logic is always persisted as code, rather than a Python object. This makes logic
easier to inspect and modify later.
- If you have already collected all of your model data in a single location, the second
workflow allows it to be saved in MLflow format directly, without enumerating constituent
artifacts.
"""
import importlib
import logging
import numpy as np
import os
import pandas
import shutil
from copy import deepcopy
import mlflow
import mlflow.pyfunc.model
import mlflow.pyfunc.utils
from mlflow.models import Model
from mlflow.pyfunc.model import PythonModel, PythonModelContext, get_default_conda_env
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils import PYTHON_VERSION, deprecated, get_major_minor_py_version
from mlflow.utils.file_utils import TempDir, _copy_file_or_tree
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, RESOURCE_ALREADY_EXISTS
FLAVOR_NAME = "python_function"
MAIN = "loader_module"
CODE = "code"
DATA = "data"
ENV = "env"
PY_VERSION = "python_version"
_logger = logging.getLogger(__name__)
def add_to_model(model, loader_module, data=None, code=None, env=None, **kwargs):
"""
Add a ``pyfunc`` spec to the model configuration.
Defines ``pyfunc`` configuration schema. Caller can use this to create a valid ``pyfunc`` model
flavor out of an existing directory structure. For example, other model flavors can use this to
specify how to use their output as a ``pyfunc``.
NOTE:
All paths are relative to the exported model root directory.
:param model: Existing model.
:param loader_module: The module to be used to load the model.
:param data: Path to the model data.
:param code: Path to the code dependencies.
:param env: Conda environment.
:param kwargs: Additional key-value pairs to include in the ``pyfunc`` flavor specification.
Values must be YAML-serializable.
:return: Updated model configuration.
"""
parms = deepcopy(kwargs)
parms[MAIN] = loader_module
parms[PY_VERSION] = PYTHON_VERSION
if code:
parms[CODE] = code
if data:
parms[DATA] = data
if env:
parms[ENV] = env
return model.add_flavor(FLAVOR_NAME, **parms)
def _load_model_env(path):
"""
Get ENV file string from a model configuration stored in Python Function format.
Returned value is a model-relative path to a Conda Environment file,
or None if none was specified at model save time
"""
return _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME).get(ENV, None)
def load_model(model_uri, suppress_warnings=False):
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/tracking.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
"""
return load_pyfunc(model_uri, suppress_warnings)
@deprecated("pyfunc.load_model", 1.0)
def load_pyfunc(model_uri, suppress_warnings=False):
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/tracking.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
model_py_version = conf.get(PY_VERSION)
if not suppress_warnings:
_warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version)
if CODE in conf and conf[CODE]:
code_path = os.path.join(local_model_path, conf[CODE])
mlflow.pyfunc.utils._add_code_to_system_path(code_path=code_path)
data_path = os.path.join(local_model_path, conf[DATA]) if (DATA in conf) else local_model_path
return importlib.import_module(conf[MAIN])._load_pyfunc(data_path)
def _warn_potentially_incompatible_py_version_if_necessary(model_py_version=None):
"""
Compares the version of Python that was used to save a given model with the version
of Python that is currently running. If a major or minor version difference is detected,
logs an appropriate warning.
"""
if model_py_version is None:
_logger.warning(
"The specified model does not have a specified Python version. It may be"
" incompatible with the version of Python that is currently running: Python %s",
PYTHON_VERSION)
elif get_major_minor_py_version(model_py_version) != get_major_minor_py_version(PYTHON_VERSION):
_logger.warning(
"The version of Python that the model was saved in, `Python %s`, differs"
" from the version of Python that is currently running, `Python %s`,"
" and may be incompatible",
model_py_version, PYTHON_VERSION)
def spark_udf(spark, model_uri, result_type="double"):
"""
A Spark UDF that can be used to invoke the Python function formatted model.
Parameters passed to the UDF are forwarded to the model as a DataFrame where the names are
ordinals (0, 1, ...).
The predictions are filtered to contain only the columns that can be represented as the
``result_type``. If the ``result_type`` is string or array of strings, all predictions are
converted to string. If the result type is not an array type, the left most column with
matching type is returned.
>>> predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model")
>>> df.withColumn("prediction", predict("name", "age")).show()
:param spark: A SparkSession object.
:param model_uri: The location, in URI format, of the MLflow model with the
:py:mod:`mlflow.pyfunc` flavor. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/tracking.html#
artifact-locations>`_.
:param result_type: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. Only a primitive
type or an array ``pyspark.sql.types.ArrayType`` of primitive type are allowed.
The following classes of result type are supported:
- "int" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an
``int32`` or an exception if there is none.
- "long" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an
``int64`` or an exception if there is none.
- ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the requested
size.
- "float" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to
``float32`` or an exception if there is none.
- "double" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to
``double`` or an exception if there is none.
- ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or
an exception if there are no numeric columns.
- "string" or ``pyspark.sql.types.StringType``: The leftmost column converted to ``string``.
- ``ArrayType(StringType)``: All columns converted to ``string``.
:return: Spark UDF that applies the model's ``predict`` method to the data and returns a
type specified by ``result_type``, which by default is a double.
"""
# Scope Spark import to this method so users don't need pyspark to use non-Spark-related
# functionality.
from mlflow.pyfunc.spark_model_cache import SparkModelCache
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import _parse_datatype_string
from pyspark.sql.types import ArrayType, DataType
from pyspark.sql.types import DoubleType, IntegerType, FloatType, LongType, StringType
if not isinstance(result_type, DataType):
result_type = _parse_datatype_string(result_type)
elem_type = result_type
if isinstance(elem_type, ArrayType):
elem_type = elem_type.elementType
supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType]
if not any([isinstance(elem_type, x) for x in supported_types]):
raise MlflowException(
message="Invalid result_type '{}'. Result type can only be one of or an array of one "
"of the following types types: {}".format(str(elem_type), str(supported_types)),
error_code=INVALID_PARAMETER_VALUE)
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
archive_path = SparkModelCache.add_local_model(spark, local_model_path)
def predict(*args):
model = SparkModelCache.get_or_load(archive_path)
schema = {str(i): arg for i, arg in enumerate(args)}
# Explicitly pass order of columns to avoid lexicographic ordering (i.e., 10 < 2)
columns = [str(i) for i, _ in enumerate(args)]
pdf = pandas.DataFrame(schema, columns=columns)
result = model.predict(pdf)
if not isinstance(result, pandas.DataFrame):
result = pandas.DataFrame(data=result)
elif type(elem_type) == IntegerType:
result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort,
np.int32]).astype(np.int32)
elif type(elem_type) == LongType:
result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, np.int, np.long])
elif type(elem_type) == FloatType:
result = result.select_dtypes(include=(np.number,)).astype(np.float32)
elif type(elem_type) == DoubleType:
result = result.select_dtypes(include=(np.number,)).astype(np.float64)
if len(result.columns) == 0:
raise MlflowException(
message="The the model did not produce any values compatible with the requested "
"type '{}'. Consider requesting udf with StringType or "
"Arraytype(StringType).".format(str(elem_type)),
error_code=INVALID_PARAMETER_VALUE)
if type(elem_type) == StringType:
result = result.applymap(str)
if type(result_type) == ArrayType:
return pandas.Series([row[1].values for row in result.iterrows()])
else:
return result[result.columns[0]]
return pandas_udf(predict, result_type)
def save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None,
mlflow_model=Model(), python_model=None, artifacts=None, **kwargs):
"""
save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None,\
mlflow_model=Model(), python_model=None, artifacts=None)
Save a Pyfunc model with custom inference logic and optional data dependencies to a path on the
local filesystem.
For information about the workflows that this method supports, please see :ref:`"workflows for
creating custom pyfunc models" <pyfunc-create-custom-workflows>` and
:ref:`"which workflow is right for my use case?" <pyfunc-create-custom-selecting-workflow>`.
Note that the parameters for the first workflow: ``loader_module``, ``data_path`` and the
parameters for the second workflow: ``python_model``, ``artifacts``, cannot be
specified together.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. This decribes the environment this model should
be run in. If ``python_model`` is not ``None``, the Conda environment must
at least specify the dependencies contained in
:func:`get_default_conda_env()`. If ``None``, the default
:func:`get_default_conda_env()` environment is added to the
model. The following is an *example* dictionary representation of a Conda
environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'cloudpickle==0.5.8'
]
}
:param mlflow_model: :py:mod:`mlflow.models.Model` configuration to which to add the
**python_function** flavor.
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
"""
mlflow_model = kwargs.pop('model', mlflow_model)
if len(kwargs) > 0:
raise TypeError("save_model() got unexpected keyword arguments: {}".format(kwargs))
first_argument_set = {
"loader_module": loader_module,
"data_path": data_path,
}
second_argument_set = {
"artifacts": artifacts,
"python_model": python_model,
}
first_argument_set_specified = any([item is not None for item in first_argument_set.values()])
second_argument_set_specified = any([item is not None for item in second_argument_set.values()])
if first_argument_set_specified and second_argument_set_specified:
raise MlflowException(
message=(
"The following sets of parameters cannot be specified together: {first_set_keys}"
" and {second_set_keys}. All parameters in one set must be `None`. Instead, found"
" the following values: {first_set_entries} and {second_set_entries}".format(
first_set_keys=first_argument_set.keys(),
second_set_keys=second_argument_set.keys(),
first_set_entries=first_argument_set,
second_set_entries=second_argument_set)),
error_code=INVALID_PARAMETER_VALUE)
elif (loader_module is None) and (python_model is None):
raise MlflowException(
message="Either `loader_module` or `python_model` must be specified!",
error_code=INVALID_PARAMETER_VALUE)
if first_argument_set_specified:
return _save_model_with_loader_module_and_data_path(
path=path, loader_module=loader_module, data_path=data_path,
code_paths=code_path, conda_env=conda_env, mlflow_model=mlflow_model)
elif second_argument_set_specified:
return mlflow.pyfunc.model._save_model_with_class_artifacts_params(
path=path, python_model=python_model, artifacts=artifacts, conda_env=conda_env,
code_paths=code_path, mlflow_model=mlflow_model)
def log_model(artifact_path, loader_module=None, data_path=None, code_path=None, conda_env=None,
python_model=None, artifacts=None):
"""
Log a Pyfunc model with custom inference logic and optional data dependencies as an MLflow
artifact for the current run.
For information about the workflows that this method supports, see :ref:`Workflows for
creating custom pyfunc models <pyfunc-create-custom-workflows>` and
:ref:`Which workflow is right for my use case? <pyfunc-create-custom-selecting-workflow>`.
You cannot specify the parameters for the first workflow: ``loader_module``, ``data_path``
and the parameters for the second workflow: ``python_model``, ``artifacts`` together.
:param artifact_path: The run-relative artifact path to which to log the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. This decribes the environment this model should
be run in. If ``python_model`` is not ``None``, the Conda environment must
at least specify the dependencies contained in
:func:`get_default_conda_env()`. If `None`, the default
:func:`get_default_conda_env()` environment is added to the
model. The following is an *example* dictionary representation of a Conda
environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'cloudpickle==0.5.8'
]
}
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
"""
return Model.log(artifact_path=artifact_path,
flavor=mlflow.pyfunc,
loader_module=loader_module,
data_path=data_path,
code_path=code_path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env)
def _save_model_with_loader_module_and_data_path(path, loader_module, data_path=None,
code_paths=None, conda_env=None,
mlflow_model=Model()):
"""
Export model as a generic Python function model.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``.
:param data_path: Path to a file or directory containing model data.
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decribes the environment
this model should be run in.
:return: Model configuration containing model info.
"""
if os.path.exists(path):
raise MlflowException(
message="Path '{}' already exists".format(path),
error_code=RESOURCE_ALREADY_EXISTS)
os.makedirs(path)
code = None
data = None
env = None
if data_path is not None:
model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir="data")
data = model_file
if code_paths is not None:
for code_path in code_paths:
_copy_file_or_tree(src=code_path, dst=path, dst_dir="code")
code = "code"
if conda_env is not None:
shutil.copy(src=conda_env, dst=os.path.join(path, "mlflow_env.yml"))
env = "mlflow_env.yml"
mlflow.pyfunc.add_to_model(
mlflow_model, loader_module=loader_module, code=code, data=data, env=env)
mlflow_model.save(os.path.join(path, 'MLmodel'))
return mlflow_model
loader_template = """
import importlib
import os
import sys
def load_pyfunc():
{update_path}return importlib.import_module('{main}')._load_pyfunc('{data_path}')
"""
| 46.883657 | 100 | 0.65288 |
import importlib
import logging
import numpy as np
import os
import pandas
import shutil
from copy import deepcopy
import mlflow
import mlflow.pyfunc.model
import mlflow.pyfunc.utils
from mlflow.models import Model
from mlflow.pyfunc.model import PythonModel, PythonModelContext, get_default_conda_env
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils import PYTHON_VERSION, deprecated, get_major_minor_py_version
from mlflow.utils.file_utils import TempDir, _copy_file_or_tree
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, RESOURCE_ALREADY_EXISTS
FLAVOR_NAME = "python_function"
MAIN = "loader_module"
CODE = "code"
DATA = "data"
ENV = "env"
PY_VERSION = "python_version"
_logger = logging.getLogger(__name__)
def add_to_model(model, loader_module, data=None, code=None, env=None, **kwargs):
parms = deepcopy(kwargs)
parms[MAIN] = loader_module
parms[PY_VERSION] = PYTHON_VERSION
if code:
parms[CODE] = code
if data:
parms[DATA] = data
if env:
parms[ENV] = env
return model.add_flavor(FLAVOR_NAME, **parms)
def _load_model_env(path):
return _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME).get(ENV, None)
def load_model(model_uri, suppress_warnings=False):
return load_pyfunc(model_uri, suppress_warnings)
@deprecated("pyfunc.load_model", 1.0)
def load_pyfunc(model_uri, suppress_warnings=False):
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
model_py_version = conf.get(PY_VERSION)
if not suppress_warnings:
_warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version)
if CODE in conf and conf[CODE]:
code_path = os.path.join(local_model_path, conf[CODE])
mlflow.pyfunc.utils._add_code_to_system_path(code_path=code_path)
data_path = os.path.join(local_model_path, conf[DATA]) if (DATA in conf) else local_model_path
return importlib.import_module(conf[MAIN])._load_pyfunc(data_path)
def _warn_potentially_incompatible_py_version_if_necessary(model_py_version=None):
if model_py_version is None:
_logger.warning(
"The specified model does not have a specified Python version. It may be"
" incompatible with the version of Python that is currently running: Python %s",
PYTHON_VERSION)
elif get_major_minor_py_version(model_py_version) != get_major_minor_py_version(PYTHON_VERSION):
_logger.warning(
"The version of Python that the model was saved in, `Python %s`, differs"
" from the version of Python that is currently running, `Python %s`,"
" and may be incompatible",
model_py_version, PYTHON_VERSION)
def spark_udf(spark, model_uri, result_type="double"):
# functionality.
from mlflow.pyfunc.spark_model_cache import SparkModelCache
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import _parse_datatype_string
from pyspark.sql.types import ArrayType, DataType
from pyspark.sql.types import DoubleType, IntegerType, FloatType, LongType, StringType
if not isinstance(result_type, DataType):
result_type = _parse_datatype_string(result_type)
elem_type = result_type
if isinstance(elem_type, ArrayType):
elem_type = elem_type.elementType
supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType]
if not any([isinstance(elem_type, x) for x in supported_types]):
raise MlflowException(
message="Invalid result_type '{}'. Result type can only be one of or an array of one "
"of the following types types: {}".format(str(elem_type), str(supported_types)),
error_code=INVALID_PARAMETER_VALUE)
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
archive_path = SparkModelCache.add_local_model(spark, local_model_path)
def predict(*args):
model = SparkModelCache.get_or_load(archive_path)
schema = {str(i): arg for i, arg in enumerate(args)}
# Explicitly pass order of columns to avoid lexicographic ordering (i.e., 10 < 2)
columns = [str(i) for i, _ in enumerate(args)]
pdf = pandas.DataFrame(schema, columns=columns)
result = model.predict(pdf)
if not isinstance(result, pandas.DataFrame):
result = pandas.DataFrame(data=result)
elif type(elem_type) == IntegerType:
result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort,
np.int32]).astype(np.int32)
elif type(elem_type) == LongType:
result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, np.int, np.long])
elif type(elem_type) == FloatType:
result = result.select_dtypes(include=(np.number,)).astype(np.float32)
elif type(elem_type) == DoubleType:
result = result.select_dtypes(include=(np.number,)).astype(np.float64)
if len(result.columns) == 0:
raise MlflowException(
message="The the model did not produce any values compatible with the requested "
"type '{}'. Consider requesting udf with StringType or "
"Arraytype(StringType).".format(str(elem_type)),
error_code=INVALID_PARAMETER_VALUE)
if type(elem_type) == StringType:
result = result.applymap(str)
if type(result_type) == ArrayType:
return pandas.Series([row[1].values for row in result.iterrows()])
else:
return result[result.columns[0]]
return pandas_udf(predict, result_type)
def save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None,
mlflow_model=Model(), python_model=None, artifacts=None, **kwargs):
mlflow_model = kwargs.pop('model', mlflow_model)
if len(kwargs) > 0:
raise TypeError("save_model() got unexpected keyword arguments: {}".format(kwargs))
first_argument_set = {
"loader_module": loader_module,
"data_path": data_path,
}
second_argument_set = {
"artifacts": artifacts,
"python_model": python_model,
}
first_argument_set_specified = any([item is not None for item in first_argument_set.values()])
second_argument_set_specified = any([item is not None for item in second_argument_set.values()])
if first_argument_set_specified and second_argument_set_specified:
raise MlflowException(
message=(
"The following sets of parameters cannot be specified together: {first_set_keys}"
" and {second_set_keys}. All parameters in one set must be `None`. Instead, found"
" the following values: {first_set_entries} and {second_set_entries}".format(
first_set_keys=first_argument_set.keys(),
second_set_keys=second_argument_set.keys(),
first_set_entries=first_argument_set,
second_set_entries=second_argument_set)),
error_code=INVALID_PARAMETER_VALUE)
elif (loader_module is None) and (python_model is None):
raise MlflowException(
message="Either `loader_module` or `python_model` must be specified!",
error_code=INVALID_PARAMETER_VALUE)
if first_argument_set_specified:
return _save_model_with_loader_module_and_data_path(
path=path, loader_module=loader_module, data_path=data_path,
code_paths=code_path, conda_env=conda_env, mlflow_model=mlflow_model)
elif second_argument_set_specified:
return mlflow.pyfunc.model._save_model_with_class_artifacts_params(
path=path, python_model=python_model, artifacts=artifacts, conda_env=conda_env,
code_paths=code_path, mlflow_model=mlflow_model)
def log_model(artifact_path, loader_module=None, data_path=None, code_path=None, conda_env=None,
python_model=None, artifacts=None):
return Model.log(artifact_path=artifact_path,
flavor=mlflow.pyfunc,
loader_module=loader_module,
data_path=data_path,
code_path=code_path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env)
def _save_model_with_loader_module_and_data_path(path, loader_module, data_path=None,
code_paths=None, conda_env=None,
mlflow_model=Model()):
if os.path.exists(path):
raise MlflowException(
message="Path '{}' already exists".format(path),
error_code=RESOURCE_ALREADY_EXISTS)
os.makedirs(path)
code = None
data = None
env = None
if data_path is not None:
model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir="data")
data = model_file
if code_paths is not None:
for code_path in code_paths:
_copy_file_or_tree(src=code_path, dst=path, dst_dir="code")
code = "code"
if conda_env is not None:
shutil.copy(src=conda_env, dst=os.path.join(path, "mlflow_env.yml"))
env = "mlflow_env.yml"
mlflow.pyfunc.add_to_model(
mlflow_model, loader_module=loader_module, code=code, data=data, env=env)
mlflow_model.save(os.path.join(path, 'MLmodel'))
return mlflow_model
loader_template = """
import importlib
import os
import sys
def load_pyfunc():
{update_path}return importlib.import_module('{main}')._load_pyfunc('{data_path}')
"""
| true | true |
f7315a0806862ec68601ee3196af37bdc53af7a3 | 3,235 | py | Python | pypureclient/flasharray/FA_2_5/models/certificate_signing_request_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_5/models/certificate_signing_request_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_5/models/certificate_signing_request_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_5 import models
class CertificateSigningRequestResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[CertificateSigningRequest]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.CertificateSigningRequest]
):
"""
Keyword args:
items (list[CertificateSigningRequest])
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `CertificateSigningRequestResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CertificateSigningRequestResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CertificateSigningRequestResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.883929 | 105 | 0.559505 |
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_5 import models
class CertificateSigningRequestResponse(object):
swagger_types = {
'items': 'list[CertificateSigningRequest]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None,
):
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `CertificateSigningRequestResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CertificateSigningRequestResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, CertificateSigningRequestResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7315a1e62e3e9a93eec26f0b1922799b57e4925 | 6,815 | py | Python | resources.py | vermeulendivan/qgis-pipelineplanner | b0d0c05626401b874a193fe358eec6146a204fff | [
"MIT"
] | null | null | null | resources.py | vermeulendivan/qgis-pipelineplanner | b0d0c05626401b874a193fe358eec6146a204fff | [
"MIT"
] | null | null | null | resources.py | vermeulendivan/qgis-pipelineplanner | b0d0c05626401b874a193fe358eec6146a204fff | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.15.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x04\xa5\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x02\x00\x00\x00\x6f\x15\xaa\xaf\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0e\xc3\x00\x00\x0e\xc3\x01\xc7\x6f\
\xa8\x64\x00\x00\x04\x3a\x49\x44\x41\x54\x38\x4f\x75\x54\x4d\x6c\
\x1b\x45\x14\xde\x99\x59\xaf\x77\x37\xf6\xfa\x67\xe3\xfc\x37\x4e\
\xdb\x38\x71\xd4\xa2\x26\x8d\x15\x01\x25\x50\x21\x90\x80\x94\x22\
\x50\x85\x84\xf8\xb9\x14\x2e\x88\x6b\x6f\x08\x71\xe4\xcc\x95\x1e\
\xb9\x00\xe2\xc0\x21\x91\x88\xe0\xc0\x6f\xd5\x9f\x10\x08\x24\x4d\
\x71\x12\x8c\x9d\x1f\x37\xfe\x8f\x7f\xd6\xde\x5d\xef\xf0\x66\xd7\
\x4e\xd2\x24\x3c\x7d\x7e\xfe\xe6\xcd\x9b\x99\xf7\x66\xe7\x3d\xf4\
\xee\xcd\x87\xdc\x31\x41\x08\x51\x4a\x5b\x83\xb6\x80\xb1\xc5\x4e\
\x12\x0c\xd3\xc7\x05\x26\x5a\xec\x90\x38\x0b\x40\xf6\xa9\x43\x40\
\x03\x60\xa3\x16\x7b\x14\xf4\x10\x8e\x0c\x21\x52\xa6\x31\x86\x4d\
\x98\x76\x96\xfc\xdf\x46\x4c\x30\x66\xf1\x82\x3a\x4c\xda\xda\xb1\
\xb0\x7f\x1b\x1c\x30\xf8\x1d\x03\x01\x80\x27\x05\xe2\x22\x56\xc0\
\x5d\xf7\xf1\x55\x82\x2d\x8c\xed\x10\x08\xd8\x6d\xf0\x14\xb8\x0d\
\x84\xde\xff\x3c\x03\x11\xb2\x5c\xb9\xc3\x77\x09\x16\x36\x1c\xf1\
\xd5\xf3\xcb\xb3\xb7\x7f\x9a\xcf\x66\x33\x3d\xfd\xe1\xfe\xd8\x6b\
\xfc\xe0\x25\x0e\xb1\xac\x1c\x81\xf0\x1d\x21\x53\xd7\x6e\xb0\x5c\
\x20\xed\x76\xb6\x10\x89\x43\xc6\x54\x23\xbb\xf8\xe5\xad\xb5\xd2\
\xc0\xd3\xd7\x83\x82\x46\xd5\xd1\x95\xef\x6e\x7a\x04\x4b\xea\x3b\
\xcf\xe2\x6a\x39\xb7\x40\x1e\xbf\x76\xa3\xba\x1d\x2f\xaf\xdd\x31\
\x77\xe3\x8d\xc2\xa6\x4b\x09\x11\xc1\x0d\x3b\x89\xc8\xb8\x20\x6e\
\x7c\xbb\x52\x3e\x3b\xfd\xa6\x20\x2b\x21\xba\xe3\x1a\xb9\x12\x1c\
\x9e\xfa\x6b\xee\x53\xb5\xbb\x5f\x54\xc3\x70\x38\x66\x5b\x38\xa7\
\x22\x6c\x54\x0b\xb5\x74\x82\x77\x89\x1e\x91\xb8\xab\x49\xf3\x9f\
\x5f\x7a\x45\x3d\xe0\x6e\x0e\x07\xf4\x1f\x7e\xbd\x35\x18\x7b\xd9\
\xb9\x35\xf6\xa5\x30\xe7\xeb\x8b\x84\xa7\xdf\xc9\x2e\x7e\xcd\x59\
\x26\xb1\x37\xc2\x70\x8f\x8e\x9e\x54\x0a\x97\x2f\x86\x87\xd4\xa6\
\x80\x0d\x2c\x78\xdc\x54\xf3\xee\x3d\x38\x6d\x25\x55\x7d\xd3\xd0\
\xb9\x71\xef\xde\x98\x5c\x39\x23\xd5\x09\x35\x82\x82\x25\xf3\x5c\
\x78\xf2\x25\xf8\x40\xe5\xc4\x42\x23\x93\x34\x8b\x69\x4b\xaf\xd9\
\x1f\x10\x91\xf1\xd8\x93\x9a\x56\xee\x52\x88\x22\xd4\x2d\x4e\x28\
\x6c\xfd\xb1\x9d\x29\xf1\x84\x56\x8a\xe9\xfb\xcb\x4b\xc8\xac\x62\
\xa3\x24\x59\xe5\x52\x6e\x53\xd2\x8b\x11\x55\x1a\x10\x0d\xa3\x5e\
\xc2\x5a\xa1\xc7\x2b\xd5\x1f\xae\x15\xd6\x17\xb1\x1c\x14\x3c\x7e\
\xf4\xd4\xcc\x75\x49\x12\x89\x59\x10\x5c\x44\x19\x7a\x76\xe3\xee\
\x17\xb2\xbf\x27\x3a\x12\x11\x50\x23\xf1\x6f\xd2\x32\xaa\xc9\x64\
\xca\x34\xf4\xde\xfe\x53\x6e\x51\xf6\x07\x42\xb2\xc7\xcf\x51\x53\
\xea\x50\x46\xcf\x3d\xd1\x28\x6f\xaf\xef\x54\xca\xa6\xa0\xc6\x66\
\x48\xef\xe0\xd9\xc4\x46\x3c\x95\x88\xbb\xdc\x72\xa8\x37\x22\xba\
\xac\xe5\xfb\xeb\xe5\x9a\xde\xa8\x15\xe1\xfd\xa6\x52\x5b\x5a\xad\
\xaa\x35\x2c\x35\x18\xec\x1e\x1c\xf3\xfa\x3b\xe1\xb2\x8c\x86\x06\
\xd9\x99\x14\x69\xd5\xbc\xe0\x96\x75\xdd\x8a\xf6\xfb\xc9\xf4\xd5\
\xf7\xca\xc5\x82\x3f\x10\x98\x88\x5d\x1a\x9f\x98\x0c\xf5\x0e\xfd\
\x76\xef\xe7\xca\x5e\xae\xb2\x57\xec\xf4\xcb\xe9\x9c\xa6\x19\x96\
\x41\x85\xa9\xd8\xc4\xed\xc5\xf8\x46\x62\x2b\x57\xac\xbd\x7a\xf5\
\x85\x3b\x0b\xbf\x17\xca\xf5\xdd\xf4\xa6\x4e\x45\xd3\xa4\xb5\x7c\
\x8a\xc7\xc1\x33\xd1\x8b\x42\xb7\xd2\x9c\xb9\xf2\x3c\xe6\x9a\x4b\
\x4b\x5b\x13\xe3\xe7\x75\xc3\xd0\x6a\xb5\x6c\x76\xb3\xa7\xa7\x33\
\x93\x17\x9b\x4d\x8b\xe7\x79\xad\x5a\x84\x67\x2a\xf0\x56\x40\x91\
\x57\x56\xd7\x38\x9c\x94\x50\xad\xdb\x0a\xd5\x9b\xb5\xbd\xf4\x2a\
\x79\xf1\x83\x4f\x1a\xe9\xd5\x48\x34\xaa\xaa\x41\x77\xb0\xef\xde\
\x96\x29\x76\x85\x3b\x3a\x07\x7c\x5d\x7d\x16\x12\x2a\x3b\x7f\x47\
\xa2\xe7\x02\xc1\x60\x40\x91\x20\x17\x9f\xe2\x7d\xfb\x8d\xd7\x67\
\xe7\xbe\xa9\x68\x3a\xa6\x0d\x8f\x0c\xcf\x6e\x98\x5a\x0d\x53\xdb\
\x45\x1f\xce\xe7\x39\x4a\x11\x6d\xc2\xe3\x66\xc5\xd3\xae\x16\xe7\
\xe9\x2f\xcd\x7e\x96\xbe\x3b\x77\xe1\xf2\x2b\x5e\x62\x78\x4e\x3d\
\xe6\xa3\xd5\x3f\x97\x16\x74\x29\x64\xe5\x13\xd9\x74\x6a\x70\xec\
\x19\x57\x47\x97\x32\x36\xc9\x4b\x02\xfa\x68\x3e\x67\x2f\x39\x41\
\xe0\xc9\x82\x64\xe2\x0b\x6b\x3f\x7e\x65\x94\x76\x79\x97\xe0\x0e\
\x9d\x1e\x7d\xee\x2d\x8f\xda\xc7\x65\x1e\x34\xeb\xba\xd0\xd1\x4d\
\x3b\x7c\x54\x12\x99\xf3\xc7\xdf\xe7\xda\x15\x7a\x82\x80\x99\x4d\
\x22\xce\x6a\x9a\x50\x06\xd0\x2f\x8e\xfa\xb6\x07\xf0\x1d\xed\x86\
\xc0\xea\x00\xf4\x23\x40\x36\x60\x16\x34\x71\x11\xd6\x58\x0e\xec\
\xac\x05\x1c\xd4\x39\x5b\xde\x92\x03\x66\x0b\x6b\x5a\xc4\x21\x4e\
\xe7\x62\x86\x16\x01\x3b\xc1\xc8\x86\x43\xec\x23\x9c\x9a\x3c\xf0\
\x6a\x01\x84\xf5\x36\x1b\x4e\x9f\xe3\x08\x41\x0e\x5a\x3e\x87\xa6\
\x18\xda\xac\xdd\xf4\x6c\x10\xb6\x0c\x40\x89\x73\x8c\x3d\x3c\xe2\
\xd3\xbe\x81\xb6\x1b\x8f\x39\x06\x02\x40\xfb\x80\x09\x76\x38\x84\
\xed\xd8\xed\x44\xf6\xb5\xbd\x04\xb9\x6c\xbb\x33\xcb\x13\xf4\x1f\
\x7b\xd5\xbe\x4b\xfc\x0a\xbc\x91\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x10\
\x0f\x65\x03\xa2\
\x00\x70\
\x00\x69\x00\x70\x00\x65\x00\x6c\x00\x69\x00\x6e\x00\x65\x00\x5f\x00\x70\x00\x6c\x00\x61\x00\x6e\x00\x6e\x00\x65\x00\x72\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x3a\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x3a\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x7a\x2e\x14\x2c\xa2\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 49.028777 | 122 | 0.711812 |
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x04\xa5\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x02\x00\x00\x00\x6f\x15\xaa\xaf\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0e\xc3\x00\x00\x0e\xc3\x01\xc7\x6f\
\xa8\x64\x00\x00\x04\x3a\x49\x44\x41\x54\x38\x4f\x75\x54\x4d\x6c\
\x1b\x45\x14\xde\x99\x59\xaf\x77\x37\xf6\xfa\x67\xe3\xfc\x37\x4e\
\xdb\x38\x71\xd4\xa2\x26\x8d\x15\x01\x25\x50\x21\x90\x80\x94\x22\
\x50\x85\x84\xf8\xb9\x14\x2e\x88\x6b\x6f\x08\x71\xe4\xcc\x95\x1e\
\xb9\x00\xe2\xc0\x21\x91\x88\xe0\xc0\x6f\xd5\x9f\x10\x08\x24\x4d\
\x71\x12\x8c\x9d\x1f\x37\xfe\x8f\x7f\xd6\xde\x5d\xef\xf0\x66\xd7\
\x4e\xd2\x24\x3c\x7d\x7e\xfe\xe6\xcd\x9b\x99\xf7\x66\xe7\x3d\xf4\
\xee\xcd\x87\xdc\x31\x41\x08\x51\x4a\x5b\x83\xb6\x80\xb1\xc5\x4e\
\x12\x0c\xd3\xc7\x05\x26\x5a\xec\x90\x38\x0b\x40\xf6\xa9\x43\x40\
\x03\x60\xa3\x16\x7b\x14\xf4\x10\x8e\x0c\x21\x52\xa6\x31\x86\x4d\
\x98\x76\x96\xfc\xdf\x46\x4c\x30\x66\xf1\x82\x3a\x4c\xda\xda\xb1\
\xb0\x7f\x1b\x1c\x30\xf8\x1d\x03\x01\x80\x27\x05\xe2\x22\x56\xc0\
\x5d\xf7\xf1\x55\x82\x2d\x8c\xed\x10\x08\xd8\x6d\xf0\x14\xb8\x0d\
\x84\xde\xff\x3c\x03\x11\xb2\x5c\xb9\xc3\x77\x09\x16\x36\x1c\xf1\
\xd5\xf3\xcb\xb3\xb7\x7f\x9a\xcf\x66\x33\x3d\xfd\xe1\xfe\xd8\x6b\
\xfc\xe0\x25\x0e\xb1\xac\x1c\x81\xf0\x1d\x21\x53\xd7\x6e\xb0\x5c\
\x20\xed\x76\xb6\x10\x89\x43\xc6\x54\x23\xbb\xf8\xe5\xad\xb5\xd2\
\xc0\xd3\xd7\x83\x82\x46\xd5\xd1\x95\xef\x6e\x7a\x04\x4b\xea\x3b\
\xcf\xe2\x6a\x39\xb7\x40\x1e\xbf\x76\xa3\xba\x1d\x2f\xaf\xdd\x31\
\x77\xe3\x8d\xc2\xa6\x4b\x09\x11\xc1\x0d\x3b\x89\xc8\xb8\x20\x6e\
\x7c\xbb\x52\x3e\x3b\xfd\xa6\x20\x2b\x21\xba\xe3\x1a\xb9\x12\x1c\
\x9e\xfa\x6b\xee\x53\xb5\xbb\x5f\x54\xc3\x70\x38\x66\x5b\x38\xa7\
\x22\x6c\x54\x0b\xb5\x74\x82\x77\x89\x1e\x91\xb8\xab\x49\xf3\x9f\
\x5f\x7a\x45\x3d\xe0\x6e\x0e\x07\xf4\x1f\x7e\xbd\x35\x18\x7b\xd9\
\xb9\x35\xf6\xa5\x30\xe7\xeb\x8b\x84\xa7\xdf\xc9\x2e\x7e\xcd\x59\
\x26\xb1\x37\xc2\x70\x8f\x8e\x9e\x54\x0a\x97\x2f\x86\x87\xd4\xa6\
\x80\x0d\x2c\x78\xdc\x54\xf3\xee\x3d\x38\x6d\x25\x55\x7d\xd3\xd0\
\xb9\x71\xef\xde\x98\x5c\x39\x23\xd5\x09\x35\x82\x82\x25\xf3\x5c\
\x78\xf2\x25\xf8\x40\xe5\xc4\x42\x23\x93\x34\x8b\x69\x4b\xaf\xd9\
\x1f\x10\x91\xf1\xd8\x93\x9a\x56\xee\x52\x88\x22\xd4\x2d\x4e\x28\
\x6c\xfd\xb1\x9d\x29\xf1\x84\x56\x8a\xe9\xfb\xcb\x4b\xc8\xac\x62\
\xa3\x24\x59\xe5\x52\x6e\x53\xd2\x8b\x11\x55\x1a\x10\x0d\xa3\x5e\
\xc2\x5a\xa1\xc7\x2b\xd5\x1f\xae\x15\xd6\x17\xb1\x1c\x14\x3c\x7e\
\xf4\xd4\xcc\x75\x49\x12\x89\x59\x10\x5c\x44\x19\x7a\x76\xe3\xee\
\x17\xb2\xbf\x27\x3a\x12\x11\x50\x23\xf1\x6f\xd2\x32\xaa\xc9\x64\
\xca\x34\xf4\xde\xfe\x53\x6e\x51\xf6\x07\x42\xb2\xc7\xcf\x51\x53\
\xea\x50\x46\xcf\x3d\xd1\x28\x6f\xaf\xef\x54\xca\xa6\xa0\xc6\x66\
\x48\xef\xe0\xd9\xc4\x46\x3c\x95\x88\xbb\xdc\x72\xa8\x37\x22\xba\
\xac\xe5\xfb\xeb\xe5\x9a\xde\xa8\x15\xe1\xfd\xa6\x52\x5b\x5a\xad\
\xaa\x35\x2c\x35\x18\xec\x1e\x1c\xf3\xfa\x3b\xe1\xb2\x8c\x86\x06\
\xd9\x99\x14\x69\xd5\xbc\xe0\x96\x75\xdd\x8a\xf6\xfb\xc9\xf4\xd5\
\xf7\xca\xc5\x82\x3f\x10\x98\x88\x5d\x1a\x9f\x98\x0c\xf5\x0e\xfd\
\x76\xef\xe7\xca\x5e\xae\xb2\x57\xec\xf4\xcb\xe9\x9c\xa6\x19\x96\
\x41\x85\xa9\xd8\xc4\xed\xc5\xf8\x46\x62\x2b\x57\xac\xbd\x7a\xf5\
\x85\x3b\x0b\xbf\x17\xca\xf5\xdd\xf4\xa6\x4e\x45\xd3\xa4\xb5\x7c\
\x8a\xc7\xc1\x33\xd1\x8b\x42\xb7\xd2\x9c\xb9\xf2\x3c\xe6\x9a\x4b\
\x4b\x5b\x13\xe3\xe7\x75\xc3\xd0\x6a\xb5\x6c\x76\xb3\xa7\xa7\x33\
\x93\x17\x9b\x4d\x8b\xe7\x79\xad\x5a\x84\x67\x2a\xf0\x56\x40\x91\
\x57\x56\xd7\x38\x9c\x94\x50\xad\xdb\x0a\xd5\x9b\xb5\xbd\xf4\x2a\
\x79\xf1\x83\x4f\x1a\xe9\xd5\x48\x34\xaa\xaa\x41\x77\xb0\xef\xde\
\x96\x29\x76\x85\x3b\x3a\x07\x7c\x5d\x7d\x16\x12\x2a\x3b\x7f\x47\
\xa2\xe7\x02\xc1\x60\x40\x91\x20\x17\x9f\xe2\x7d\xfb\x8d\xd7\x67\
\xe7\xbe\xa9\x68\x3a\xa6\x0d\x8f\x0c\xcf\x6e\x98\x5a\x0d\x53\xdb\
\x45\x1f\xce\xe7\x39\x4a\x11\x6d\xc2\xe3\x66\xc5\xd3\xae\x16\xe7\
\xe9\x2f\xcd\x7e\x96\xbe\x3b\x77\xe1\xf2\x2b\x5e\x62\x78\x4e\x3d\
\xe6\xa3\xd5\x3f\x97\x16\x74\x29\x64\xe5\x13\xd9\x74\x6a\x70\xec\
\x19\x57\x47\x97\x32\x36\xc9\x4b\x02\xfa\x68\x3e\x67\x2f\x39\x41\
\xe0\xc9\x82\x64\xe2\x0b\x6b\x3f\x7e\x65\x94\x76\x79\x97\xe0\x0e\
\x9d\x1e\x7d\xee\x2d\x8f\xda\xc7\x65\x1e\x34\xeb\xba\xd0\xd1\x4d\
\x3b\x7c\x54\x12\x99\xf3\xc7\xdf\xe7\xda\x15\x7a\x82\x80\x99\x4d\
\x22\xce\x6a\x9a\x50\x06\xd0\x2f\x8e\xfa\xb6\x07\xf0\x1d\xed\x86\
\xc0\xea\x00\xf4\x23\x40\x36\x60\x16\x34\x71\x11\xd6\x58\x0e\xec\
\xac\x05\x1c\xd4\x39\x5b\xde\x92\x03\x66\x0b\x6b\x5a\xc4\x21\x4e\
\xe7\x62\x86\x16\x01\x3b\xc1\xc8\x86\x43\xec\x23\x9c\x9a\x3c\xf0\
\x6a\x01\x84\xf5\x36\x1b\x4e\x9f\xe3\x08\x41\x0e\x5a\x3e\x87\xa6\
\x18\xda\xac\xdd\xf4\x6c\x10\xb6\x0c\x40\x89\x73\x8c\x3d\x3c\xe2\
\xd3\xbe\x81\xb6\x1b\x8f\x39\x06\x02\x40\xfb\x80\x09\x76\x38\x84\
\xed\xd8\xed\x44\xf6\xb5\xbd\x04\xb9\x6c\xbb\x33\xcb\x13\xf4\x1f\
\x7b\xd5\xbe\x4b\xfc\x0a\xbc\x91\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x10\
\x0f\x65\x03\xa2\
\x00\x70\
\x00\x69\x00\x70\x00\x65\x00\x6c\x00\x69\x00\x6e\x00\x65\x00\x5f\x00\x70\x00\x6c\x00\x61\x00\x6e\x00\x6e\x00\x65\x00\x72\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x3a\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x3a\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x7a\x2e\x14\x2c\xa2\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| true | true |
f7315ba8d23659f4ee2ea63c31b028bd2c878b21 | 182 | py | Python | problem0153.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | problem0153.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | problem0153.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | ###########################
#
# #153 Investigating Gaussian Integers - Project Euler
# https://projecteuler.net/problem=153
#
# Code by Kevin Marciniak
#
###########################
| 20.222222 | 54 | 0.516484 | true | true | |
f7315cfdfc21364f14a0e03bb86bc6f1b502fcd9 | 349 | py | Python | Code/Python2.7/Kattis/08synlists.py | nicholasz2510/General | e2783cad4da7f9b50c952c2b91ef311d22b1d56f | [
"MIT"
] | 1 | 2019-11-21T15:56:03.000Z | 2019-11-21T15:56:03.000Z | Code/Python2.7/Kattis/08synlists.py | nicholasz2510/General | e2783cad4da7f9b50c952c2b91ef311d22b1d56f | [
"MIT"
] | 12 | 2019-11-21T21:00:57.000Z | 2022-02-27T01:46:56.000Z | Code/Python2.7/Kattis/08synlists.py | nicholasz2510/General | e2783cad4da7f9b50c952c2b91ef311d22b1d56f | [
"MIT"
] | 1 | 2019-11-21T20:49:18.000Z | 2019-11-21T20:49:18.000Z | import sys
shifts = int(sys.stdin[0])
file_ = sys.stdin[1]
overwritten = sys.stdin[2]
checker = []
for icmfcr in shifts:
for i in file_:
if i == "1":
checker.append("0")
elif i == "0":
checker.append("1")
if str(checker) == overwritten:
print "Deletion succeeded"
else:
print "Deletion failed"
| 18.368421 | 31 | 0.578797 | import sys
shifts = int(sys.stdin[0])
file_ = sys.stdin[1]
overwritten = sys.stdin[2]
checker = []
for icmfcr in shifts:
for i in file_:
if i == "1":
checker.append("0")
elif i == "0":
checker.append("1")
if str(checker) == overwritten:
print "Deletion succeeded"
else:
print "Deletion failed"
| false | true |
f7315e9ebfe36931ca1757fcf4e6d4f8dd4ad184 | 2,810 | py | Python | .history/src/modules/test_plot/test_plot_20190927183934.py | mattzakh/mattplotlib | 5e9bc779d8c1b7074549615ab6790a9f7163cd59 | [
"MIT"
] | null | null | null | .history/src/modules/test_plot/test_plot_20190927183934.py | mattzakh/mattplotlib | 5e9bc779d8c1b7074549615ab6790a9f7163cd59 | [
"MIT"
] | 5 | 2020-03-24T17:44:10.000Z | 2021-08-23T20:22:20.000Z | .history/src/modules/test_plot/test_plot_20190927183934.py | mattzakh/mattplotlib | 5e9bc779d8c1b7074549615ab6790a9f7163cd59 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
# plt.style.use('../notebooks/test.mplstyle')
import seaborn as sns
from logs import logDecorator as lD
import jsonref, pprint
config = jsonref.load(open('../config/config.json'))
logBase = config['logging']['logBase'] + '.modules.test_plot.test_plot'
@lD.log(logBase + '.doSomething')
def doSomething(logger):
'''print a line
This function simply prints a single line
Parameters
----------
logger : {logging.Logger}
The logger used for logging error information
'''
with plt.style.context('../notebooks/test.mplstyle'):
w = 7.2
fig = plt.figure(figsize=(w, w/1.6)) #, edgecolor='k', linewidth=2)
ax = {}
ax[0] = plt.axes([0.10, 0.10, 0.35, 0.30])
ax[1] = plt.axes([0.55, 0.10, 0.35, 0.30])
ax[2] = plt.axes([0.10, 0.57, 0.35, 0.30])
ax[3] = plt.axes([0.55, 0.57, 0.35, 0.30])
[ax[0].plot([1,2,3],np.random.randint([1,2,3],[10,9,8], size=3), marker='', label=f'line {i}') for i in range(4)]
[ax[1].plot([1,2,3],np.random.randint([1,2,3],[10,9,8], size=3), linestyle='', label=f'marker {i}') for i in range(4)]
params = ((10, 10), (4, 12), (50, 12), (6, 55))
for a, b in params:
values = np.random.beta(a, b, size=10000)
ax[2].hist(values, histtype="stepfilled", bins=30,
alpha=0.2, density=True)
mean, cov = [0, 2], [(1, .5), (.5, 1)]
x, y = np.random.multivariate_normal(mean, cov, size=50).T
# ax[3] = sns.kdeplot(x, linestyle='-', marker='', label='hist')#, marker='')
fig.suptitle('Times New Roman')
[ax[i].set_title(f'ax{i} Title') for i in range(4)]
[ax[i].set_xlabel(f'ax{i} xlabel') for i in range(4)]
[ax[i].set_ylabel(f'ax{i} ylabel') for i in range(4)]
[ax[i].legend(loc='upper right') for i in range(4)]
ax[3].set_xlabel(r'ax3 $a_i \sin(2\pi fx_i)$ label');
plt.savefig('test.svg')
return
@lD.log(logBase + '.main')
def main(logger, resultsDict):
'''main function for module1
This function finishes all the tasks for the
main function. This is a way in which a
particular module is going to be executed.
Parameters
----------
logger : {logging.Logger}
The logger used for logging error information
resultsDict: {dict}
A dintionary containing information about the
command line arguments. These can be used for
overwriting command line arguments as needed.
'''
print('='*30)
print('Main function of module 1')
print('='*30)
print('We get a copy of the result dictionary over here ...')
doSomething()
print('Getting out of Module 1')
print('-'*30)
return
| 30.543478 | 126 | 0.580071 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from logs import logDecorator as lD
import jsonref, pprint
config = jsonref.load(open('../config/config.json'))
logBase = config['logging']['logBase'] + '.modules.test_plot.test_plot'
@lD.log(logBase + '.doSomething')
def doSomething(logger):
with plt.style.context('../notebooks/test.mplstyle'):
w = 7.2
fig = plt.figure(figsize=(w, w/1.6))
ax = {}
ax[0] = plt.axes([0.10, 0.10, 0.35, 0.30])
ax[1] = plt.axes([0.55, 0.10, 0.35, 0.30])
ax[2] = plt.axes([0.10, 0.57, 0.35, 0.30])
ax[3] = plt.axes([0.55, 0.57, 0.35, 0.30])
[ax[0].plot([1,2,3],np.random.randint([1,2,3],[10,9,8], size=3), marker='', label=f'line {i}') for i in range(4)]
[ax[1].plot([1,2,3],np.random.randint([1,2,3],[10,9,8], size=3), linestyle='', label=f'marker {i}') for i in range(4)]
params = ((10, 10), (4, 12), (50, 12), (6, 55))
for a, b in params:
values = np.random.beta(a, b, size=10000)
ax[2].hist(values, histtype="stepfilled", bins=30,
alpha=0.2, density=True)
mean, cov = [0, 2], [(1, .5), (.5, 1)]
x, y = np.random.multivariate_normal(mean, cov, size=50).T
.suptitle('Times New Roman')
[ax[i].set_title(f'ax{i} Title') for i in range(4)]
[ax[i].set_xlabel(f'ax{i} xlabel') for i in range(4)]
[ax[i].set_ylabel(f'ax{i} ylabel') for i in range(4)]
[ax[i].legend(loc='upper right') for i in range(4)]
ax[3].set_xlabel(r'ax3 $a_i \sin(2\pi fx_i)$ label');
plt.savefig('test.svg')
return
@lD.log(logBase + '.main')
def main(logger, resultsDict):
print('='*30)
print('Main function of module 1')
print('='*30)
print('We get a copy of the result dictionary over here ...')
doSomething()
print('Getting out of Module 1')
print('-'*30)
return
| true | true |
f7315faf3403c57b71fcb4aae306f4d130e43b2b | 666 | py | Python | algorithms/python/21.py | scream7/leetcode | 1fe0f5df3ca0019a4d99979cc663993d2492272d | [
"Apache-2.0"
] | null | null | null | algorithms/python/21.py | scream7/leetcode | 1fe0f5df3ca0019a4d99979cc663993d2492272d | [
"Apache-2.0"
] | null | null | null | algorithms/python/21.py | scream7/leetcode | 1fe0f5df3ca0019a4d99979cc663993d2492272d | [
"Apache-2.0"
] | null | null | null | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
dummy = ListNode(0)
cur = dummy
while l1 and l2:
if l1.val <= l2.val:
cur.next = l1
l1 = l1.next
elif l2.val < l1.val:
cur.next = l2
l2 = l2.next
cur = cur.next
cur.next = l1 if l1 is not None else l2
return dummy.next
| 25.615385 | 47 | 0.472973 |
class Solution(object):
def mergeTwoLists(self, l1, l2):
dummy = ListNode(0)
cur = dummy
while l1 and l2:
if l1.val <= l2.val:
cur.next = l1
l1 = l1.next
elif l2.val < l1.val:
cur.next = l2
l2 = l2.next
cur = cur.next
cur.next = l1 if l1 is not None else l2
return dummy.next
| true | true |
f73160aca8239aa3448982a0bb3d29924fd55682 | 4,190 | py | Python | apps/external_apps/django_openid/signed.py | indro/t2c | 56482ad4aed150f29353e054db2c97b567243bf8 | [
"MIT"
] | 3 | 2015-12-25T14:45:36.000Z | 2016-11-28T09:58:03.000Z | apps/external_apps/django_openid/signed.py | indro/t2c | 56482ad4aed150f29353e054db2c97b567243bf8 | [
"MIT"
] | null | null | null | apps/external_apps/django_openid/signed.py | indro/t2c | 56482ad4aed150f29353e054db2c97b567243bf8 | [
"MIT"
] | null | null | null | """
Functions for creating and restoring url-safe signed pickled objects.
The format used looks like this:
>>> signed.dumps("hello")
'UydoZWxsbycKcDAKLg.AfZVu7tE6T1K1AecbLiLOGSqZ-A'
There are two components here, separatad by a '.'. The first component is a
URLsafe base64 encoded pickle of the object passed to dumps(). The second
component is a base64 encoded SHA1 hash of "$first_component.$secret"
Calling signed.loads(s) checks the signature BEFORE unpickling the object -
this protects against malformed pickle attacks. If the signature fails, a
ValueError subclass is raised (actually a BadSignature):
>>> signed.loads('UydoZWxsbycKcDAKLg.AfZVu7tE6T1K1AecbLiLOGSqZ-A')
'hello'
>>> signed.loads('UydoZWxsbycKcDAKLg.AfZVu7tE6T1K1AecbLiLOGSqZ-A-modified')
...
BadSignature: Signature failed: AfZVu7tE6T1K1AecbLiLOGSqZ-A-modified
You can optionally compress the pickle prior to base64 encoding it to save
space, using the compress=True argument. This checks if compression actually
helps and only applies compression if the result is a shorter string:
>>> signed.dumps(range(1, 10), compress=True)
'.eJzTyCkw4PI05Er0NAJiYyA2AWJTIDYDYnMgtgBiS65EPQDQyQme.EQpzZCCMd3mIa4RXDGnAuMCCAx0'
The fact that the string is compressed is signalled by the prefixed '.' at the
start of the base64 pickle.
There are 65 url-safe characters: the 64 used by url-safe base64 and the '.'.
These functions make use of all of them.
"""
import pickle, base64, hashlib
from django.conf import settings
def dumps(obj, secret = None, compress = False, extra_salt = ''):
"""
Returns URL-safe, sha1 signed base64 compressed pickle. If secret is
None, settings.SECRET_KEY is used instead.
If compress is True (not the default) checks if compressing using zlib can
save some space. Prepends a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
"""
pickled = pickle.dumps(obj)
is_compressed = False # Flag for if it's been compressed or not
if compress:
import zlib # Avoid zlib dependency unless compress is being used
compressed = zlib.compress(pickled)
if len(compressed) < (len(pickled) - 1):
pickled = compressed
is_compressed = True
base64d = encode(pickled).strip('=')
if is_compressed:
base64d = '.' + base64d
return sign(base64d, (secret or settings.SECRET_KEY) + extra_salt)
def loads(s, secret = None, extra_salt = ''):
"Reverse of dumps(), raises ValueError if signature fails"
if isinstance(s, unicode):
s = s.encode('utf8') # base64 works on bytestrings, not on unicodes
try:
base64d = unsign(s, (secret or settings.SECRET_KEY) + extra_salt)
except ValueError:
raise
decompress = False
if base64d[0] == '.':
# It's compressed; uncompress it first
base64d = base64d[1:]
decompress = True
pickled = decode(base64d)
if decompress:
import zlib
pickled = zlib.decompress(pickled)
return pickle.loads(pickled)
def encode(s):
return base64.urlsafe_b64encode(s).strip('=')
def decode(s):
return base64.urlsafe_b64decode(s + '=' * (len(s) % 4))
class BadSignature(ValueError):
# Extends ValueError, which makes it more convenient to catch and has
# basically the correct semantics.
pass
def sign(value, key = None):
if isinstance(value, unicode):
raise TypeError, \
'sign() needs bytestring, not unicode: %s' % repr(value)
if key is None:
key = settings.SECRET_KEY
return value + '.' + base64_sha1(value + key)
def unsign(signed_value, key = None):
if isinstance(signed_value, unicode):
raise TypeError, 'unsign() needs bytestring, not unicode'
if key is None:
key = settings.SECRET_KEY
if not '.' in signed_value:
raise BadSignature, 'Missing sig (no . found in value)'
value, sig = signed_value.rsplit('.', 1)
if base64_sha1(value + key) == sig:
return value
else:
raise BadSignature, 'Signature failed: %s' % sig
def base64_sha1(s):
return base64.urlsafe_b64encode(hashlib.sha1(s).digest()).strip('=')
| 36.434783 | 83 | 0.699761 | """
Functions for creating and restoring url-safe signed pickled objects.
The format used looks like this:
>>> signed.dumps("hello")
'UydoZWxsbycKcDAKLg.AfZVu7tE6T1K1AecbLiLOGSqZ-A'
There are two components here, separatad by a '.'. The first component is a
URLsafe base64 encoded pickle of the object passed to dumps(). The second
component is a base64 encoded SHA1 hash of "$first_component.$secret"
Calling signed.loads(s) checks the signature BEFORE unpickling the object -
this protects against malformed pickle attacks. If the signature fails, a
ValueError subclass is raised (actually a BadSignature):
>>> signed.loads('UydoZWxsbycKcDAKLg.AfZVu7tE6T1K1AecbLiLOGSqZ-A')
'hello'
>>> signed.loads('UydoZWxsbycKcDAKLg.AfZVu7tE6T1K1AecbLiLOGSqZ-A-modified')
...
BadSignature: Signature failed: AfZVu7tE6T1K1AecbLiLOGSqZ-A-modified
You can optionally compress the pickle prior to base64 encoding it to save
space, using the compress=True argument. This checks if compression actually
helps and only applies compression if the result is a shorter string:
>>> signed.dumps(range(1, 10), compress=True)
'.eJzTyCkw4PI05Er0NAJiYyA2AWJTIDYDYnMgtgBiS65EPQDQyQme.EQpzZCCMd3mIa4RXDGnAuMCCAx0'
The fact that the string is compressed is signalled by the prefixed '.' at the
start of the base64 pickle.
There are 65 url-safe characters: the 64 used by url-safe base64 and the '.'.
These functions make use of all of them.
"""
import pickle, base64, hashlib
from django.conf import settings
def dumps(obj, secret = None, compress = False, extra_salt = ''):
"""
Returns URL-safe, sha1 signed base64 compressed pickle. If secret is
None, settings.SECRET_KEY is used instead.
If compress is True (not the default) checks if compressing using zlib can
save some space. Prepends a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
"""
pickled = pickle.dumps(obj)
is_compressed = False
if compress:
import zlib # Avoid zlib dependency unless compress is being used
compressed = zlib.compress(pickled)
if len(compressed) < (len(pickled) - 1):
pickled = compressed
is_compressed = True
base64d = encode(pickled).strip('=')
if is_compressed:
base64d = '.' + base64d
return sign(base64d, (secret or settings.SECRET_KEY) + extra_salt)
def loads(s, secret = None, extra_salt = ''):
"Reverse of dumps(), raises ValueError if signature fails"
if isinstance(s, unicode):
s = s.encode('utf8') # base64 works on bytestrings, not on unicodes
try:
base64d = unsign(s, (secret or settings.SECRET_KEY) + extra_salt)
except ValueError:
raise
decompress = False
if base64d[0] == '.':
# It's compressed; uncompress it first
base64d = base64d[1:]
decompress = True
pickled = decode(base64d)
if decompress:
import zlib
pickled = zlib.decompress(pickled)
return pickle.loads(pickled)
def encode(s):
return base64.urlsafe_b64encode(s).strip('=')
def decode(s):
return base64.urlsafe_b64decode(s + '=' * (len(s) % 4))
class BadSignature(ValueError):
pass
def sign(value, key = None):
if isinstance(value, unicode):
raise TypeError, \
'sign() needs bytestring, not unicode: %s' % repr(value)
if key is None:
key = settings.SECRET_KEY
return value + '.' + base64_sha1(value + key)
def unsign(signed_value, key = None):
if isinstance(signed_value, unicode):
raise TypeError, 'unsign() needs bytestring, not unicode'
if key is None:
key = settings.SECRET_KEY
if not '.' in signed_value:
raise BadSignature, 'Missing sig (no . found in value)'
value, sig = signed_value.rsplit('.', 1)
if base64_sha1(value + key) == sig:
return value
else:
raise BadSignature, 'Signature failed: %s' % sig
def base64_sha1(s):
return base64.urlsafe_b64encode(hashlib.sha1(s).digest()).strip('=')
| false | true |
f7316132008490fabdc6c23eaeae95ebcafde9b9 | 2,856 | py | Python | setup.py | vikrammodh0111/vectorai | 0ca0adf1599639035603af8158477972b0902784 | [
"Apache-2.0"
] | null | null | null | setup.py | vikrammodh0111/vectorai | 0ca0adf1599639035603af8158477972b0902784 | [
"Apache-2.0"
] | null | null | null | setup.py | vikrammodh0111/vectorai | 0ca0adf1599639035603af8158477972b0902784 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
core_req = ["requests", "numpy", "pandas", "appdirs>=1.4.4", "tqdm>=4.27.0", "plotly>=4.0.0"]
extras_req = {
"dev" : ["twine", "black", "pytest", "pytest-cov"],
"test" : ["pytest", "pytest-cov"],
"docs" : ["sphinx-rtd-theme>=0.5.0", "nbsphinx>=0.7.1"]
}
extras_req["all"] = [p for r in extras_req.values() for p in r]
if 'IS_VECTORAI_NIGHTLY' in os.environ.keys():
from datetime import datetime
name = 'vectorai-nightly'
version = '0.2.2' + '.' + datetime.today().date().__str__().replace('-', '.')
else:
name = 'vectorai'
version = '0.2.2'
setup(
name=name,
version=version,
author="OnSearch Pty Ltd",
author_email="dev@vctr.ai",
description="A Python framework for building vector based applications. Encode, query and analyse data using vectors.",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="vector, embeddings, machinelearning, ai, artificialintelligence, nlp, tensorflow, pytorch, nearestneighbors, search, analytics, clustering, dimensionalityreduction",
url="https://github.com/vector-ai/vectorai",
license="Apache",
packages=find_packages(exclude=["tests*"]),
python_requires=">=3",
install_requires=core_req,
extras_require=extras_req,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Intended Audience :: Information Technology",
"Intended Audience :: Financial and Insurance Industry",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Manufacturing",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Database",
"Topic :: Internet :: WWW/HTTP :: Indexing/Search",
"Topic :: Multimedia :: Sound/Audio :: Conversion",
"Topic :: Multimedia :: Video :: Conversion",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Recognition",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development :: Libraries :: Application Frameworks",
],
)
| 42.626866 | 179 | 0.640056 |
from setuptools import setup, find_packages
import os
core_req = ["requests", "numpy", "pandas", "appdirs>=1.4.4", "tqdm>=4.27.0", "plotly>=4.0.0"]
extras_req = {
"dev" : ["twine", "black", "pytest", "pytest-cov"],
"test" : ["pytest", "pytest-cov"],
"docs" : ["sphinx-rtd-theme>=0.5.0", "nbsphinx>=0.7.1"]
}
extras_req["all"] = [p for r in extras_req.values() for p in r]
if 'IS_VECTORAI_NIGHTLY' in os.environ.keys():
from datetime import datetime
name = 'vectorai-nightly'
version = '0.2.2' + '.' + datetime.today().date().__str__().replace('-', '.')
else:
name = 'vectorai'
version = '0.2.2'
setup(
name=name,
version=version,
author="OnSearch Pty Ltd",
author_email="dev@vctr.ai",
description="A Python framework for building vector based applications. Encode, query and analyse data using vectors.",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="vector, embeddings, machinelearning, ai, artificialintelligence, nlp, tensorflow, pytorch, nearestneighbors, search, analytics, clustering, dimensionalityreduction",
url="https://github.com/vector-ai/vectorai",
license="Apache",
packages=find_packages(exclude=["tests*"]),
python_requires=">=3",
install_requires=core_req,
extras_require=extras_req,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Intended Audience :: Information Technology",
"Intended Audience :: Financial and Insurance Industry",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Manufacturing",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Database",
"Topic :: Internet :: WWW/HTTP :: Indexing/Search",
"Topic :: Multimedia :: Sound/Audio :: Conversion",
"Topic :: Multimedia :: Video :: Conversion",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Recognition",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development :: Libraries :: Application Frameworks",
],
)
| true | true |
f7316151b54b3cb3affdbe30c1d0eafb1b8f3f72 | 23,981 | py | Python | scripts/casava_data_delivery.py | ssjunnebo/scilifelab | 79960f7042118f900bd1eaabe4902ee76abd8020 | [
"MIT"
] | 1 | 2016-03-21T14:04:09.000Z | 2016-03-21T14:04:09.000Z | scripts/casava_data_delivery.py | ssjunnebo/scilifelab | 79960f7042118f900bd1eaabe4902ee76abd8020 | [
"MIT"
] | 35 | 2015-01-22T08:25:02.000Z | 2020-02-17T12:09:12.000Z | scripts/casava_data_delivery.py | ssjunnebo/scilifelab | 79960f7042118f900bd1eaabe4902ee76abd8020 | [
"MIT"
] | 6 | 2015-01-16T15:32:08.000Z | 2020-01-30T14:34:40.000Z | # A script to help doing the deliveries.
# Now using the Casava directory structure
# The user is asked to provide a project ID, a run name, and an UPPMAX project
import sys
import os
import glob
import re
import grp
from datetime import datetime
import argparse
import stat
from subprocess import check_call, CalledProcessError
from scilifelab.utils.misc import filtered_walk, query_yes_no, touch_file
from scilifelab.utils.timestamp import utc_time
def fixProjName(pname):
newname = pname[0].upper()
postperiod = False
for i in range(1, len(pname)):
if pname[i] == ".":
newname += pname[i]
postperiod = True
elif postperiod:
newname += pname[i].upper()
postperiod = False
else:
newname += pname[i]
postperiod = False
return newname
def is_fastq(fname):
fastq_ext = [".fastq.gz",
".fastq",
"_fastq.txt.gz",
"_fastq.txt",
".fastq..gz",
"_fastq.txt..gz"
]
for ext in fastq_ext:
if fname.endswith(ext):
return True
return False
def create_final_name(fname, date, fc_id, sample_name):
"""Create the final name of the delivered file
"""
# Split the file name according to CASAVA convention
m = re.match(r'(\S+?)_(?:[ACGTN\-]+|NoIndex|Undetermined)_L0*(\d+)_R(\d)_\d+\.fastq(.*)', fname)
if m is not None:
lane = m.group(2)
read = m.group(3)
ext = m.group(4)
else:
# Split the file name according to bcbb convention
m = re.match(r'(\d+)_(\d+)_([^_]+)_(\d+)_(?:nophix_)?(\d+)_fastq.txt(.*)', fname)
if m is None:
raise ValueError("Could not parse file name {:s} correctly!".format(fname))
lane = m.group(1)
read = m.group(5)
ext = m.group(6)
dest_file_name = "{:s}.fastq{:s}".format("_".join([lane,
date,
fc_id,
sample_name,
read]),
ext.replace('..','.'))
return dest_file_name
def get_file_copy_list(proj_base_dir, dest_proj_path, fcid, deliver_all_fcs, deliver_nophix, skip_list):
to_copy = []
for fqfile in filtered_walk(proj_base_dir,
is_fastq,
include_dirs=[fcid] if not deliver_all_fcs else None,
exclude_dirs=skip_list):
# Get the run_name and sample_name from the path
sample_name, run_name, _ = os.path.relpath(fqfile,proj_base_dir).split(os.sep,2)
date, fc_id = run_name.split('_')
# Skip if we deliver from nophix and the parent dir is not nophix (or vice versa)
pdir = os.path.basename(os.path.dirname(fqfile))
if deliver_nophix and pdir != "nophix":
continue
if not deliver_nophix and pdir != run_name:
continue
# Skip if a compressed version of the current file exists
if os.path.exists("{:s}.gz".format(fqfile)):
print("WARNING: Both compressed and non-compressed versions of {:s} exists! " \
"Is compression/decompression in progress? Will deliver compressed version " \
"but you should make sure that the delivered files are complete!".format(fqfile))
continue
print("DEBUG: source_delivery_path = {:s}".format(os.path.dirname(fqfile)))
fname = os.path.basename(fqfile)
print(fname)
dest_run_path = os.path.join(dest_proj_path, sample_name, run_name)
dest_file_name = create_final_name(fname,date,fc_id,sample_name)
to_copy.append([fqfile,
dest_run_path,
dest_file_name])
return to_copy
def rsync_files(to_copy, logfile, group, dry):
# Iterate over the files to copy and create directories and copy files as necessary
successful = 0
uid = os.getuid()
gid = os.getgid()
if group is not None and len(group) > 0:
gid = grp.getgrnam(group).gr_gid
for src_file, dst_dir, dst_name in to_copy:
dst_file = os.path.join(dst_dir, dst_name)
print "Will copy (rsync) ", src_file, "to ", dst_file
if not dry:
# Create the destination directory if necessary
logfile.write("[{:s}] - Creating run-level delivery directory: {:s} " \
"(or leaving it in place if already present)\n".format(utc_time(),
dst_dir))
if os.path.exists(dst_dir):
print("Directory {:s} already exists!".format(dst_dir))
else:
try:
# Create directory hierarchy with ug+rwX permissions
os.makedirs(dst_dir, 0770)
except:
print("Could not create run-level delivery directory!")
clean_exit(1,logfile,dry)
# Rsync the file across
command_to_execute = ['rsync',
'-ac',
src_file,
dst_file]
logfile.write("[{:s}] - Executing command: {:s}\n".format(utc_time(), " ".join(command_to_execute)))
logfile.flush()
try:
check_call(command_to_execute)
except CalledProcessError, e:
logfile.write("[{:s}] - rsync exited with exit code {:d}\n".format(utc_time(), e.returncode))
raise e
logfile.write("[{:s}] - rsync exited with exit code 0\n".format(utc_time()))
successful += 1
print("{:d} of {:d} files copied successfully".format(successful,len(to_copy)))
# Modify the permissions to ug+rw
os.chmod(dst_file, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)
def main():
parser = argparse.ArgumentParser(description="A script to help doing the deliveries, now using the Casava directory structure. " \
"The user is asked to provide a project ID, a run name, and an UPPMAX project")
parser.add_argument('-c', '--casava-path', action="store", dest="caspath", default='/proj/a2010002/nobackup/illumina/',
help="Specify a path to a Casava directory manually")
parser.add_argument('-l', '--log-path', action="store", dest="logpath", default='/proj/a2010002/private/delivery_logs',
help="Specify a path to a log file")
parser.add_argument('-i', '--interactive', action="store_true", dest="interactive", default=False,
help="Interactively select samples to be delivered")
parser.add_argument('-d', '--dry-run', action="store_true", dest="dry", default=False,
help="Dry run: nothing will be done")
parser.add_argument('-a', '--deliver-all-fcs', action="store_true", dest="deliver_all_fcs", default=False,
help="rsync samples from all flow cells. Default is to only deliver from specified flowcell")
parser.add_argument('-p', '--nophix', action="store_true", dest="deliver_nophix", default=False,
help="Deliver fastq files from nophix subdirectory. Default is to deliver from run directory")
parser.add_argument('-g', '--group', action="store", dest="group", default="uppmax",
help="Group membership to set on copied files")
parser.add_argument('project_name', action='store', help="Project name to deliver, e.g. J.Doe_10_01")
parser.add_argument('flowcell_id', action='store', help="Flowcell id to deliver, e.g. 120824_BD1915ACXX")
parser.add_argument('uppmax_id', action='store', help="UPPMAX project id to deliver to, e.g. b2012001")
args = parser.parse_args()
print("""\n****** Deprication ******\nPlease note that this script is deprecated and the functionality has been replaced by 'pm deliver raw-data'\n""")
if not args.project_name in os.listdir(args.caspath):
print("Could not find project. Check directory listing:")
for f in os.listdir(args.caspath):
print(f)
clean_exit(0,None,args.dry)
fcid = args.flowcell_id
fcid_comp = fcid.split('_')
if len(fcid_comp) > 2:
fcid = fcid_comp[0] + '_' + fcid_comp[-1]
print("FCID format too long, trying {:s}".format(fcid))
dt = datetime.now()
time_str = "_".join([str(dt.year),
str(dt.month),
str(dt.day),
str(dt.hour),
str(dt.minute),
str(dt.second)])
logfilename = os.path.join(os.path.normpath(args.logpath),"{:s}.log".format(time_str))
if not args.dry:
logfile = open(logfilename, "w")
else:
logfile = sys.stdout
logfile.write("[{:s}] - Project to move files for:\n{:s}\n".format(utc_time(), args.project_name))
logfile.flush()
proj_base_dir = os.path.join(args.caspath, args.project_name)
skip_list = []
if args.interactive:
for sample_dir in os.listdir(proj_base_dir):
if not os.path.isdir(os.path.join(proj_base_dir,sample_dir)):
continue
if not query_yes_no("Deliver sample {:s}?".format(sample_dir), default="no"):
skip_list.append(sample_dir)
created_proj_dir_name = fixProjName(args.project_name)
del_path_top = '/proj/' + args.uppmax_id + "/INBOX/" + created_proj_dir_name
to_copy = get_file_copy_list(proj_base_dir,
del_path_top,
fcid,
args.deliver_all_fcs,
args.deliver_nophix,
skip_list)
# Prompt user if any of the files are non-compressed
for fqfile, _, _ in to_copy:
if os.path.splitext(fqfile)[1] == ".gz":
continue
print("WARNING: The file {:s}, which you are about to deliver, does not seem to be compressed. " \
"It is recommended that you compress files prior to delivery.".format(fqfile))
if query_yes_no("Do you wish to continue delivering " \
"uncompressed fastq files?", default="yes"):
break
clean_exit(1,logfile,args.dry)
rsync_files(to_copy,
logfile,
args.group,
args.dry)
# Touch the flag for the Uppmax cronjob to fix the INBOX permissions
touch_file(os.path.join("/sw","uppmax","var","inboxfix","schedule",args.uppmax_id))
clean_exit(0,logfile,args.dry)
def clean_exit(exitcode, logfile, dry=False):
"""Close the logfile and exit with the given exit code
"""
if not dry and logfile is not None:
logfile.close()
sys.exit(exitcode)
if __name__ == "__main__":
main()
########## Tests ###########
import unittest
import shutil
import tempfile
import random
import uuid
class TestDataDelivery(unittest.TestCase):
def test_fixProjName(self):
"""Fix project name
"""
test_pnames = [("j.doe_11_01","J.Doe_11_01"),
("j.Doe_11_01","J.Doe_11_01"),
("J.doe_11_01","J.Doe_11_01"),
("J.Doe_11_01","J.Doe_11_01"),
("doe_11_01","Doe_11_01"),
("j.d.doe_11_01","J.D.Doe_11_01"),]
for test_pname, exp_pname in test_pnames:
obs_pname = fixProjName(test_pname)
self.assertEqual(obs_pname,
exp_pname,
"Did not get the expected fix ({:s}) for project name {:s} (got {:s})".format(exp_pname,test_pname,obs_pname))
def test_is_fastq(self):
"""Determine if a file name corresponds to a fastq file
"""
test_fnames = [("foo.fastq",True),
("foo.fastq.gz",True),
("foo_fastq.txt",True),
("foo_fastq.txt.gz",True),
("foo.fastq.bar",False),
("foo.txt",False),]
for test_fname, exp_result in test_fnames:
obs_result = is_fastq(test_fname)
self.assertEqual(obs_result,
exp_result,
"Did not get expected result ({:s}) for file name {:s}".format(str(exp_result),test_fname))
def _create_test_files(self, root):
to_copy = []
for n in xrange(10):
fd, sfile = tempfile.mkstemp(suffix=".tmp", prefix="rsync_test_", dir=root)
os.close(fd)
# Generate destination file hierarchies
ddir = root
for l in xrange(random.randint(1,5)):
ddir = os.path.join(ddir,str(uuid.uuid4()))
to_copy.append([sfile,ddir,"{:s}.tmp".format(str(uuid.uuid4()))])
return to_copy
def test_rsync_files(self):
"""Test the rsync functionality
"""
root = tempfile.mkdtemp(prefix="rsync_test_")
# Create some files to move
to_copy = self._create_test_files(root)
# Run rsync
with open(os.devnull, 'w') as f:
old_stdout = sys.stdout
sys.stdout = f
rsync_files(to_copy,sys.stdout,None,False)
sys.stdout = old_stdout
# Verify the copy process
for src, ddir, dname in to_copy:
self.assertTrue(os.path.exists(src),
"The rsync process have removed source file")
self.assertTrue(os.path.exists(ddir) and os.path.isdir(ddir),
"The expected destination directory was not created")
dfile = os.path.join(ddir,dname)
self.assertTrue(os.path.exists(dfile) and os.path.isfile(dfile),
"The expected destination file was not created")
exp_stat = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP
obs_stat = stat.S_IMODE(os.stat(dfile).st_mode)
self.assertEqual(obs_stat,
exp_stat,
"The mode of the created file is not as expected")
shutil.rmtree(root)
def test_rsync_set_group(self):
"""Test setting the group membership on rsync'd files
"""
root = tempfile.mkdtemp(prefix="rsync_test_set_group_")
avail_groups = os.getgroups()
exp_group = grp.getgrgid(avail_groups[random.randint(1,len(avail_groups))-1])[0]
# Create some files to move
to_copy = self._create_test_files(root)
# Run rsync
with open(os.devnull, 'w') as f:
old_stdout = sys.stdout
sys.stdout = f
rsync_files(to_copy,sys.stdout,exp_group,False)
sys.stdout = old_stdout
# Verify the copy process set the correct group on created directories
for ddir in set([d[1] for d in to_copy]):
gid = os.stat(ddir).st_gid
obs_group = grp.getgrgid(gid)[0]
self.assertEqual(obs_group,
exp_group,
"Failed to set group '{}' on directory. Group is {}".format(exp_group,
obs_group))
# Verify the copy process set the correct group
for src, ddir, dname in to_copy:
dfile = os.path.join(ddir,dname)
gid = os.stat(dfile).st_gid
obs_group = grp.getgrgid(gid)[0]
self.assertEqual(obs_group,
exp_group,
"Failed to set group '{}' on file. Group is {}".format(exp_group,
obs_group))
def test_create_final_name(self):
"""Create the destination file name
"""
date = "111111"
fcid = "A11A22BCXX"
sample_name = "P101_150B_index5"
test_names = [("1_{}_{}_1_nophix_1_fastq.txt.gz".format(date,fcid),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name)),
("1_{}_{}_1_nophix_1_fastq.txt".format(date,fcid),
"1_{}_{}_{}_1.fastq".format(date,fcid,sample_name)),
("1_{}_{}_1_1_fastq.txt.gz".format(date,fcid),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name)),
("{}_CGATGT_L001_R1_001.fastq.gz".format(sample_name),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name)),
("{}_NoIndex_L001_R2_001.fastq.gz".format(sample_name),
"1_{}_{}_{}_2.fastq.gz".format(date,fcid,sample_name)),
("{}_CGATGT_L001_R1_001.fastq..gz".format(sample_name),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name)),
("{}_CGATGT_L001_R1_001.fastq".format(sample_name),
"1_{}_{}_{}_1.fastq".format(date,fcid,sample_name))]
for test_fname, exp_result in test_names:
obs_result = create_final_name(test_fname,date,fcid,sample_name)
self.assertEqual(obs_result,
exp_result,
"Did not get expected final name ({:s}) for file name {:s}".format(exp_result,test_fname))
# Try without the _index part of file name
sample_name_noindex = "P101_150"
test_names = [("1_{}_{}_1_nophix_1_fastq.txt.gz".format(date,fcid),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name_noindex)),
("{}_CGATGT_L001_R1_001.fastq.gz".format(sample_name_noindex),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name_noindex)),
("{}_NoIndex_L001_R2_001.fastq.gz".format(sample_name_noindex),
"1_{}_{}_{}_2.fastq.gz".format(date,fcid,sample_name_noindex))]
for test_fname, exp_result in test_names:
obs_result = create_final_name(test_fname,date,fcid,sample_name_noindex)
self.assertEqual(obs_result,
exp_result,
"Did not get expected final name ({:s}) for file name {:s}".format(exp_result,test_fname))
# Try some illegal file names and assert that they raise exceptions
test_names = ["1_{}_{}_1_nophix_1_fastq.gz".format(date,fcid),
"a_{}_{}_1_nophix_1_fastq.txt".format(date,fcid),
"{}_CGATRGT_L1_R1_001.fastq.gz".format(sample_name)]
for test_name in test_names:
with self.assertRaises(ValueError):
create_final_name(test_name,date,fcid,sample_name)
# Try a file with undetermined reads
sample_name = "lane1"
test_names = [("{}_Undetermined_L001_R1_001.fastq.gz".format(sample_name),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name)),]
for test_fname, exp_result in test_names:
obs_result = create_final_name(test_fname,date,fcid,sample_name)
self.assertEqual(obs_result,
exp_result,
"Did not get expected final name ({:s}) for file name {:s}".format(exp_result,test_fname))
def test_get_file_copy_list(self):
"""Get list of files to copy and the destinations
"""
so = sys.stdout
dn = open(os.devnull,"w")
# Create a file hierarchy to search for files
root = tempfile.mkdtemp(prefix="test_casava_data_delivery_")
date = "111111"
fcs = ["{}_{}".format(date,fcid) for fcid in ["FCA","FCB"]]
# Create some sample files
exp_files = []
samples = []
for n in xrange(2):
sample = tempfile.mkdtemp(dir=root)
samples.append(os.path.basename(sample))
for fcid in fcs:
fcdir = os.path.join(sample,fcid)
nophixdir = os.path.join(fcdir,"nophix")
for d in [fcdir,nophixdir]:
os.makedirs(d)
test_names = ["{:d}_{:s}_1_1_fastq.txt.gz".format(random.randint(1,8),
fcid),
"{}_CGATGT_L001_R1_001.fastq.gz".format(samples[-1]),
"{}_CGATGT_L001_R1_001.fastq..gz".format(samples[-1]),]
for test_name in test_names:
test_file = os.path.join(d,test_name)
open(test_file,"w").close()
exp_files.append([samples[-1],
fcid,
os.path.basename(d) == "nophix",
test_file,
os.path.join(samples[-1],fcid),
create_final_name(os.path.basename(test_name),date,fcid.split("_")[-1],samples[-1])])
# Get the list of files to copy under various conditions
for deliver_all_fcs in [False, True]:
for fcid in fcs:
for deliver_nophix in [False, True]:
for skip_sample_list in [[],[samples[0]],[samples[1]],samples]:
sys.stdout = dn
obs_to_copy = sorted(get_file_copy_list(root,"",fcid,deliver_all_fcs,deliver_nophix,skip_sample_list))
sys.stdout = so
exp_to_copy = sorted([ef[3:6] for ef in exp_files if (deliver_all_fcs or ef[1] == fcid) and \
deliver_nophix == ef[2] and \
ef[0] not in skip_sample_list])
#import pdb; pdb.set_trace()
self.assertListEqual(obs_to_copy,
exp_to_copy,
"The files to copy result did not match the expected for " \
"{:s}".format(", ".join(["{:s}: {:s}".format(k,v) for k, v in \
dict(zip(["deliver_all_fcs",
"fcid",
"deliver_nophix",
"skip_samples"],
[str(deliver_all_fcs),
fcid,
str(deliver_nophix),
" ".join(skip_sample_list)])).items()])))
| 46.206166 | 155 | 0.517285 |
import sys
import os
import glob
import re
import grp
from datetime import datetime
import argparse
import stat
from subprocess import check_call, CalledProcessError
from scilifelab.utils.misc import filtered_walk, query_yes_no, touch_file
from scilifelab.utils.timestamp import utc_time
def fixProjName(pname):
newname = pname[0].upper()
postperiod = False
for i in range(1, len(pname)):
if pname[i] == ".":
newname += pname[i]
postperiod = True
elif postperiod:
newname += pname[i].upper()
postperiod = False
else:
newname += pname[i]
postperiod = False
return newname
def is_fastq(fname):
fastq_ext = [".fastq.gz",
".fastq",
"_fastq.txt.gz",
"_fastq.txt",
".fastq..gz",
"_fastq.txt..gz"
]
for ext in fastq_ext:
if fname.endswith(ext):
return True
return False
def create_final_name(fname, date, fc_id, sample_name):
"""Create the final name of the delivered file
"""
m = re.match(r'(\S+?)_(?:[ACGTN\-]+|NoIndex|Undetermined)_L0*(\d+)_R(\d)_\d+\.fastq(.*)', fname)
if m is not None:
lane = m.group(2)
read = m.group(3)
ext = m.group(4)
else:
m = re.match(r'(\d+)_(\d+)_([^_]+)_(\d+)_(?:nophix_)?(\d+)_fastq.txt(.*)', fname)
if m is None:
raise ValueError("Could not parse file name {:s} correctly!".format(fname))
lane = m.group(1)
read = m.group(5)
ext = m.group(6)
dest_file_name = "{:s}.fastq{:s}".format("_".join([lane,
date,
fc_id,
sample_name,
read]),
ext.replace('..','.'))
return dest_file_name
def get_file_copy_list(proj_base_dir, dest_proj_path, fcid, deliver_all_fcs, deliver_nophix, skip_list):
to_copy = []
for fqfile in filtered_walk(proj_base_dir,
is_fastq,
include_dirs=[fcid] if not deliver_all_fcs else None,
exclude_dirs=skip_list):
sample_name, run_name, _ = os.path.relpath(fqfile,proj_base_dir).split(os.sep,2)
date, fc_id = run_name.split('_')
pdir = os.path.basename(os.path.dirname(fqfile))
if deliver_nophix and pdir != "nophix":
continue
if not deliver_nophix and pdir != run_name:
continue
if os.path.exists("{:s}.gz".format(fqfile)):
print("WARNING: Both compressed and non-compressed versions of {:s} exists! " \
"Is compression/decompression in progress? Will deliver compressed version " \
"but you should make sure that the delivered files are complete!".format(fqfile))
continue
print("DEBUG: source_delivery_path = {:s}".format(os.path.dirname(fqfile)))
fname = os.path.basename(fqfile)
print(fname)
dest_run_path = os.path.join(dest_proj_path, sample_name, run_name)
dest_file_name = create_final_name(fname,date,fc_id,sample_name)
to_copy.append([fqfile,
dest_run_path,
dest_file_name])
return to_copy
def rsync_files(to_copy, logfile, group, dry):
successful = 0
uid = os.getuid()
gid = os.getgid()
if group is not None and len(group) > 0:
gid = grp.getgrnam(group).gr_gid
for src_file, dst_dir, dst_name in to_copy:
dst_file = os.path.join(dst_dir, dst_name)
print "Will copy (rsync) ", src_file, "to ", dst_file
if not dry:
logfile.write("[{:s}] - Creating run-level delivery directory: {:s} " \
"(or leaving it in place if already present)\n".format(utc_time(),
dst_dir))
if os.path.exists(dst_dir):
print("Directory {:s} already exists!".format(dst_dir))
else:
try:
os.makedirs(dst_dir, 0770)
except:
print("Could not create run-level delivery directory!")
clean_exit(1,logfile,dry)
command_to_execute = ['rsync',
'-ac',
src_file,
dst_file]
logfile.write("[{:s}] - Executing command: {:s}\n".format(utc_time(), " ".join(command_to_execute)))
logfile.flush()
try:
check_call(command_to_execute)
except CalledProcessError, e:
logfile.write("[{:s}] - rsync exited with exit code {:d}\n".format(utc_time(), e.returncode))
raise e
logfile.write("[{:s}] - rsync exited with exit code 0\n".format(utc_time()))
successful += 1
print("{:d} of {:d} files copied successfully".format(successful,len(to_copy)))
os.chmod(dst_file, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)
def main():
parser = argparse.ArgumentParser(description="A script to help doing the deliveries, now using the Casava directory structure. " \
"The user is asked to provide a project ID, a run name, and an UPPMAX project")
parser.add_argument('-c', '--casava-path', action="store", dest="caspath", default='/proj/a2010002/nobackup/illumina/',
help="Specify a path to a Casava directory manually")
parser.add_argument('-l', '--log-path', action="store", dest="logpath", default='/proj/a2010002/private/delivery_logs',
help="Specify a path to a log file")
parser.add_argument('-i', '--interactive', action="store_true", dest="interactive", default=False,
help="Interactively select samples to be delivered")
parser.add_argument('-d', '--dry-run', action="store_true", dest="dry", default=False,
help="Dry run: nothing will be done")
parser.add_argument('-a', '--deliver-all-fcs', action="store_true", dest="deliver_all_fcs", default=False,
help="rsync samples from all flow cells. Default is to only deliver from specified flowcell")
parser.add_argument('-p', '--nophix', action="store_true", dest="deliver_nophix", default=False,
help="Deliver fastq files from nophix subdirectory. Default is to deliver from run directory")
parser.add_argument('-g', '--group', action="store", dest="group", default="uppmax",
help="Group membership to set on copied files")
parser.add_argument('project_name', action='store', help="Project name to deliver, e.g. J.Doe_10_01")
parser.add_argument('flowcell_id', action='store', help="Flowcell id to deliver, e.g. 120824_BD1915ACXX")
parser.add_argument('uppmax_id', action='store', help="UPPMAX project id to deliver to, e.g. b2012001")
args = parser.parse_args()
print("""\n****** Deprication ******\nPlease note that this script is deprecated and the functionality has been replaced by 'pm deliver raw-data'\n""")
if not args.project_name in os.listdir(args.caspath):
print("Could not find project. Check directory listing:")
for f in os.listdir(args.caspath):
print(f)
clean_exit(0,None,args.dry)
fcid = args.flowcell_id
fcid_comp = fcid.split('_')
if len(fcid_comp) > 2:
fcid = fcid_comp[0] + '_' + fcid_comp[-1]
print("FCID format too long, trying {:s}".format(fcid))
dt = datetime.now()
time_str = "_".join([str(dt.year),
str(dt.month),
str(dt.day),
str(dt.hour),
str(dt.minute),
str(dt.second)])
logfilename = os.path.join(os.path.normpath(args.logpath),"{:s}.log".format(time_str))
if not args.dry:
logfile = open(logfilename, "w")
else:
logfile = sys.stdout
logfile.write("[{:s}] - Project to move files for:\n{:s}\n".format(utc_time(), args.project_name))
logfile.flush()
proj_base_dir = os.path.join(args.caspath, args.project_name)
skip_list = []
if args.interactive:
for sample_dir in os.listdir(proj_base_dir):
if not os.path.isdir(os.path.join(proj_base_dir,sample_dir)):
continue
if not query_yes_no("Deliver sample {:s}?".format(sample_dir), default="no"):
skip_list.append(sample_dir)
created_proj_dir_name = fixProjName(args.project_name)
del_path_top = '/proj/' + args.uppmax_id + "/INBOX/" + created_proj_dir_name
to_copy = get_file_copy_list(proj_base_dir,
del_path_top,
fcid,
args.deliver_all_fcs,
args.deliver_nophix,
skip_list)
for fqfile, _, _ in to_copy:
if os.path.splitext(fqfile)[1] == ".gz":
continue
print("WARNING: The file {:s}, which you are about to deliver, does not seem to be compressed. " \
"It is recommended that you compress files prior to delivery.".format(fqfile))
if query_yes_no("Do you wish to continue delivering " \
"uncompressed fastq files?", default="yes"):
break
clean_exit(1,logfile,args.dry)
rsync_files(to_copy,
logfile,
args.group,
args.dry)
touch_file(os.path.join("/sw","uppmax","var","inboxfix","schedule",args.uppmax_id))
clean_exit(0,logfile,args.dry)
def clean_exit(exitcode, logfile, dry=False):
"""Close the logfile and exit with the given exit code
"""
if not dry and logfile is not None:
logfile.close()
sys.exit(exitcode)
if __name__ == "__main__":
main()
("j.Doe_11_01","J.Doe_11_01"),
("J.doe_11_01","J.Doe_11_01"),
("J.Doe_11_01","J.Doe_11_01"),
("doe_11_01","Doe_11_01"),
("j.d.doe_11_01","J.D.Doe_11_01"),]
for test_pname, exp_pname in test_pnames:
obs_pname = fixProjName(test_pname)
self.assertEqual(obs_pname,
exp_pname,
"Did not get the expected fix ({:s}) for project name {:s} (got {:s})".format(exp_pname,test_pname,obs_pname))
def test_is_fastq(self):
"""Determine if a file name corresponds to a fastq file
"""
test_fnames = [("foo.fastq",True),
("foo.fastq.gz",True),
("foo_fastq.txt",True),
("foo_fastq.txt.gz",True),
("foo.fastq.bar",False),
("foo.txt",False),]
for test_fname, exp_result in test_fnames:
obs_result = is_fastq(test_fname)
self.assertEqual(obs_result,
exp_result,
"Did not get expected result ({:s}) for file name {:s}".format(str(exp_result),test_fname))
def _create_test_files(self, root):
to_copy = []
for n in xrange(10):
fd, sfile = tempfile.mkstemp(suffix=".tmp", prefix="rsync_test_", dir=root)
os.close(fd)
ddir = root
for l in xrange(random.randint(1,5)):
ddir = os.path.join(ddir,str(uuid.uuid4()))
to_copy.append([sfile,ddir,"{:s}.tmp".format(str(uuid.uuid4()))])
return to_copy
def test_rsync_files(self):
"""Test the rsync functionality
"""
root = tempfile.mkdtemp(prefix="rsync_test_")
to_copy = self._create_test_files(root)
with open(os.devnull, 'w') as f:
old_stdout = sys.stdout
sys.stdout = f
rsync_files(to_copy,sys.stdout,None,False)
sys.stdout = old_stdout
for src, ddir, dname in to_copy:
self.assertTrue(os.path.exists(src),
"The rsync process have removed source file")
self.assertTrue(os.path.exists(ddir) and os.path.isdir(ddir),
"The expected destination directory was not created")
dfile = os.path.join(ddir,dname)
self.assertTrue(os.path.exists(dfile) and os.path.isfile(dfile),
"The expected destination file was not created")
exp_stat = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP
obs_stat = stat.S_IMODE(os.stat(dfile).st_mode)
self.assertEqual(obs_stat,
exp_stat,
"The mode of the created file is not as expected")
shutil.rmtree(root)
def test_rsync_set_group(self):
"""Test setting the group membership on rsync'd files
"""
root = tempfile.mkdtemp(prefix="rsync_test_set_group_")
avail_groups = os.getgroups()
exp_group = grp.getgrgid(avail_groups[random.randint(1,len(avail_groups))-1])[0]
# Create some files to move
to_copy = self._create_test_files(root)
# Run rsync
with open(os.devnull, 'w') as f:
old_stdout = sys.stdout
sys.stdout = f
rsync_files(to_copy,sys.stdout,exp_group,False)
sys.stdout = old_stdout
# Verify the copy process set the correct group on created directories
for ddir in set([d[1] for d in to_copy]):
gid = os.stat(ddir).st_gid
obs_group = grp.getgrgid(gid)[0]
self.assertEqual(obs_group,
exp_group,
"Failed to set group '{}' on directory. Group is {}".format(exp_group,
obs_group))
# Verify the copy process set the correct group
for src, ddir, dname in to_copy:
dfile = os.path.join(ddir,dname)
gid = os.stat(dfile).st_gid
obs_group = grp.getgrgid(gid)[0]
self.assertEqual(obs_group,
exp_group,
"Failed to set group '{}' on file. Group is {}".format(exp_group,
obs_group))
def test_create_final_name(self):
"""Create the destination file name
"""
date = "111111"
fcid = "A11A22BCXX"
sample_name = "P101_150B_index5"
test_names = [("1_{}_{}_1_nophix_1_fastq.txt.gz".format(date,fcid),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name)),
("1_{}_{}_1_nophix_1_fastq.txt".format(date,fcid),
"1_{}_{}_{}_1.fastq".format(date,fcid,sample_name)),
("1_{}_{}_1_1_fastq.txt.gz".format(date,fcid),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name)),
("{}_CGATGT_L001_R1_001.fastq.gz".format(sample_name),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name)),
("{}_NoIndex_L001_R2_001.fastq.gz".format(sample_name),
"1_{}_{}_{}_2.fastq.gz".format(date,fcid,sample_name)),
("{}_CGATGT_L001_R1_001.fastq..gz".format(sample_name),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name)),
("{}_CGATGT_L001_R1_001.fastq".format(sample_name),
"1_{}_{}_{}_1.fastq".format(date,fcid,sample_name))]
for test_fname, exp_result in test_names:
obs_result = create_final_name(test_fname,date,fcid,sample_name)
self.assertEqual(obs_result,
exp_result,
"Did not get expected final name ({:s}) for file name {:s}".format(exp_result,test_fname))
# Try without the _index part of file name
sample_name_noindex = "P101_150"
test_names = [("1_{}_{}_1_nophix_1_fastq.txt.gz".format(date,fcid),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name_noindex)),
("{}_CGATGT_L001_R1_001.fastq.gz".format(sample_name_noindex),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name_noindex)),
("{}_NoIndex_L001_R2_001.fastq.gz".format(sample_name_noindex),
"1_{}_{}_{}_2.fastq.gz".format(date,fcid,sample_name_noindex))]
for test_fname, exp_result in test_names:
obs_result = create_final_name(test_fname,date,fcid,sample_name_noindex)
self.assertEqual(obs_result,
exp_result,
"Did not get expected final name ({:s}) for file name {:s}".format(exp_result,test_fname))
# Try some illegal file names and assert that they raise exceptions
test_names = ["1_{}_{}_1_nophix_1_fastq.gz".format(date,fcid),
"a_{}_{}_1_nophix_1_fastq.txt".format(date,fcid),
"{}_CGATRGT_L1_R1_001.fastq.gz".format(sample_name)]
for test_name in test_names:
with self.assertRaises(ValueError):
create_final_name(test_name,date,fcid,sample_name)
# Try a file with undetermined reads
sample_name = "lane1"
test_names = [("{}_Undetermined_L001_R1_001.fastq.gz".format(sample_name),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name)),]
for test_fname, exp_result in test_names:
obs_result = create_final_name(test_fname,date,fcid,sample_name)
self.assertEqual(obs_result,
exp_result,
"Did not get expected final name ({:s}) for file name {:s}".format(exp_result,test_fname))
def test_get_file_copy_list(self):
"""Get list of files to copy and the destinations
"""
so = sys.stdout
dn = open(os.devnull,"w")
# Create a file hierarchy to search for files
root = tempfile.mkdtemp(prefix="test_casava_data_delivery_")
date = "111111"
fcs = ["{}_{}".format(date,fcid) for fcid in ["FCA","FCB"]]
# Create some sample files
exp_files = []
samples = []
for n in xrange(2):
sample = tempfile.mkdtemp(dir=root)
samples.append(os.path.basename(sample))
for fcid in fcs:
fcdir = os.path.join(sample,fcid)
nophixdir = os.path.join(fcdir,"nophix")
for d in [fcdir,nophixdir]:
os.makedirs(d)
test_names = ["{:d}_{:s}_1_1_fastq.txt.gz".format(random.randint(1,8),
fcid),
"{}_CGATGT_L001_R1_001.fastq.gz".format(samples[-1]),
"{}_CGATGT_L001_R1_001.fastq..gz".format(samples[-1]),]
for test_name in test_names:
test_file = os.path.join(d,test_name)
open(test_file,"w").close()
exp_files.append([samples[-1],
fcid,
os.path.basename(d) == "nophix",
test_file,
os.path.join(samples[-1],fcid),
create_final_name(os.path.basename(test_name),date,fcid.split("_")[-1],samples[-1])])
# Get the list of files to copy under various conditions
for deliver_all_fcs in [False, True]:
for fcid in fcs:
for deliver_nophix in [False, True]:
for skip_sample_list in [[],[samples[0]],[samples[1]],samples]:
sys.stdout = dn
obs_to_copy = sorted(get_file_copy_list(root,"",fcid,deliver_all_fcs,deliver_nophix,skip_sample_list))
sys.stdout = so
exp_to_copy = sorted([ef[3:6] for ef in exp_files if (deliver_all_fcs or ef[1] == fcid) and \
deliver_nophix == ef[2] and \
ef[0] not in skip_sample_list])
#import pdb; pdb.set_trace()
self.assertListEqual(obs_to_copy,
exp_to_copy,
"The files to copy result did not match the expected for " \
"{:s}".format(", ".join(["{:s}: {:s}".format(k,v) for k, v in \
dict(zip(["deliver_all_fcs",
"fcid",
"deliver_nophix",
"skip_samples"],
[str(deliver_all_fcs),
fcid,
str(deliver_nophix),
" ".join(skip_sample_list)])).items()])))
| false | true |
f7316298bcfb66abd224d102bd05d708bceccc59 | 647 | py | Python | SAMparser.py | camaclean/bella | c80c012cda05bc15b69db7fd54424823f75b5a21 | [
"BSD-3-Clause-LBNL"
] | 36 | 2018-11-07T14:21:20.000Z | 2020-07-21T03:52:20.000Z | SAMparser.py | camaclean/bella | c80c012cda05bc15b69db7fd54424823f75b5a21 | [
"BSD-3-Clause-LBNL"
] | 5 | 2018-11-09T11:03:36.000Z | 2019-09-10T18:39:39.000Z | SAMparser.py | camaclean/bella | c80c012cda05bc15b69db7fd54424823f75b5a21 | [
"BSD-3-Clause-LBNL"
] | 6 | 2019-05-21T01:15:02.000Z | 2020-06-17T16:34:36.000Z | from simplesam import Reader, Writer
import inspect
import sys, os, fileinput, string
in_file = open(sys.argv[1], 'r')
in_sam = Reader(in_file)
out_file = open('full_ecoli_mapped_q10_truth.txt', 'w')
# out_sam = Writer(out_file)
x = next(in_sam)
try:
while(x.qname != ''):
#if(x.reverse):
# out_file.write("+" + " ")
#else:
# out_file.write("-" + " ")
out_file.write(x.rname + " ")
out_file.write(x.qname + " ")
out_file.write(str(x.pos) + " ")
out_file.write(str(x.pos + len(x.seq)) + "\n")
#print str(type(x))
x = next(in_sam)
except:
print("Long read alignment ground truth generated")
in_file.close()
out_file.close() | 23.107143 | 55 | 0.650696 | from simplesam import Reader, Writer
import inspect
import sys, os, fileinput, string
in_file = open(sys.argv[1], 'r')
in_sam = Reader(in_file)
out_file = open('full_ecoli_mapped_q10_truth.txt', 'w')
x = next(in_sam)
try:
while(x.qname != ''):
out_file.write(x.rname + " ")
out_file.write(x.qname + " ")
out_file.write(str(x.pos) + " ")
out_file.write(str(x.pos + len(x.seq)) + "\n")
x = next(in_sam)
except:
print("Long read alignment ground truth generated")
in_file.close()
out_file.close() | true | true |
f73162a75addcf3c375f9aa1bcc038bdb5b9598e | 16,255 | py | Python | XBNet/main.py | tusharsarkar3/XBNet | 01e385f1c0a446eb38f4dd59ee9c510170bf096b | [
"MIT"
] | 167 | 2021-06-03T18:45:12.000Z | 2022-03-30T10:50:35.000Z | XBNet/main.py | tusharsarkar3/XBNet | 01e385f1c0a446eb38f4dd59ee9c510170bf096b | [
"MIT"
] | 13 | 2021-06-12T04:11:16.000Z | 2022-03-18T15:56:36.000Z | XBNet/main.py | tusharsarkar3/XBNet | 01e385f1c0a446eb38f4dd59ee9c510170bf096b | [
"MIT"
] | 27 | 2021-06-11T08:44:05.000Z | 2022-02-26T11:54:43.000Z | from kivymd.app import MDApp
from kivy.uix.widget import Widget
from kivy.uix.actionbar import ActionBar
from kivy.uix.scrollview import ScrollView
from kivy.uix.boxlayout import BoxLayout
from kivymd.theming import ThemableBehavior
from kivymd.uix.list import OneLineListItem, MDList, TwoLineListItem, ThreeLineListItem
from kivymd.uix.list import MDList
from kivymd.uix.textfield import MDTextField
from kivy.uix.button import Button
from kivy.lang import Builder
from kivymd.toast import toast
from kivy.uix.screenmanager import Screen, ScreenManager
import time
from kivy.core.window import Window
from kivymd.uix.label import MDLabel
from kivy.uix.modalview import ModalView
from kivymd.uix.filemanager import MDFileManager
from kivymd.theming import ThemeManager
import requests
from kivy.uix.popup import Popup
import os
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from lightgbm import LGBMClassifier
import torch
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from XBNet.training_utils import training,predict
from XBNet.models import XBNETClassifier
from XBNet.run import run_XBNET
from os import environ
import pickle
def suppress_qt_warnings():
environ["QT_DEVICE_PIXEL_RATIO"] = "0"
environ["QT_AUTO_SCREEN_SCALE_FACTOR"] = "1"
environ["QT_SCREEN_SCALE_FACTORS"] = "1"
environ["QT_SCALE_FACTOR"] = "1"
Login_Page = """
ScreenManager:
LoginPage
ModelDetails
FileManage
<LoginPage>:
name:"Login"
MDFloatLayout:
Image:
id: imageView
source: 'Untitled.png'
allow_stretch: True
halign: 'center'
pos_hint: {"center_x":0.23, "center_y":0.5}
MDRoundFlatIconButton:
id: filemanage
text: "Select Dataset"
icon: "folder"
pos_hint: {'center_x': .77, 'center_y': .85}
on_release: root.manager.current = "File"
MDTextField:
id: modelname
hint_text:"Enter the model name: "
pos_hint:{"center_x":0.77,"center_y":0.7}
current_hint_text_color:0,0,0,1
size_hint_x:0.4
required: True
MDTextField:
id: layers
hint_text:"Enter number of layers(For XBNet or NN): "
pos_hint:{"center_x":0.77,"center_y":0.55}
current_hint_text_color:0,0,0,1
size_hint_x:0.4
MDTextField:
id: target
hint_text:"Enter name of target feature: "
pos_hint:{"center_x":0.77,"center_y":0.40}
current_hint_text_color:0,0,0,1
size_hint_x:0.4
required: True
MDRaisedButton:
text:"Build model"
pos_hint:{"center_x":0.77,"center_y":0.25}
size_hint_x:0.3
on_release: root.manager.current = "Model"
on_press: app.get_model(modelname.text,target.text,layers.text)
theme_text_color:"Custom"
text_color:0,0,0,1
<ModelDetails>:
name:"Model"
MDFloatLayout:
Image:
id: imageView
source: 'Untitled.png'
allow_stretch: True
halign: 'center'
pos_hint: {"center_x":0.23, "center_y":0.5}
MDRaisedButton:
text:"Train"
pos_hint:{"center_x":0.63,"center_y":0.15}
size_hint_x:0.2
# on_release: root.manager.current = "Model"
on_press: app.get_layers()
theme_text_color:"Custom"
text_color:0,0,0,1
MDRaisedButton:
text:"Predict"
pos_hint:{"center_x":0.88,"center_y":0.15}
size_hint_x:0.2
# on_release: root.manager.current = "Model"
on_press: app.predict()
theme_text_color:"Custom"
text_color:0,0,0,1
<FileManage>:
name:"File"
BoxLayout:
FileChooserListView:
canvas.before:
Color:
rgb: 0.1, 0.2, 0.5
Rectangle:
pos: self.pos
size: self.size
on_selection: app.get_path(*args)
"""
class LoginPage(Screen):
pass
class ModelDetails(Screen):
pass
class CustomDropDown(BoxLayout):
pass
class FileManage(Screen):
pass
sm = ScreenManager()
sm.add_widget(LoginPage(name="Login"))
sm.add_widget(ModelDetails(name="Model"))
sm.add_widget(FileManage(name="File"))
class XBNetGUI(MDApp):
def __init__(self):
super(XBNetGUI, self).__init__()
self.predict_phase = False
class ContentNavigationDrawer(BoxLayout):
pass
class DrawerList(ThemableBehavior, MDList):
pass
def build(self):
self.theme_cls.primary_palette = "Blue"
login_page = Builder.load_string(Login_Page)
return login_page
def get_layers(self):
self.layers_dims = []
if self.model == "xbnet" or self.model == "neural network":
for i,j in self.fields.items():
self.layers_dims.append(int(j.text))
print(j.text)
elif (self.model == "xgboost" or self.model == "randomforest"
or self.model == "decision tree" or self.model == "lightgbm"):
for i,j in self.fields.items():
try:
self.layers_dims.append(int(j.text))
except:
self.layers_dims.append(float(j.text))
self.train()
def process_input(self):
suppress_qt_warnings()
column_to_predict = self.target
data = pd.read_csv(self.file_selected)
n_df = len(data)
label_encoded = {}
imputations = {}
for i in data.columns:
imputations[i] = data[i].mode()
if data[i].isnull().sum() / n_df >= 0.15:
data.drop(i, axis=1, inplace=True)
elif data[i].isnull().sum() / n_df < 0.15 and data[i].isnull().sum() / n_df > 0:
data[i].fillna(data[i].mode(), inplace=True)
imputations[i] = data[i].mode()
columns_object = list(data.dtypes[data.dtypes == object].index)
for i in columns_object:
if i != column_to_predict:
if data[i].nunique() / n_df < 0.4:
le = LabelEncoder()
data[i] = le.fit_transform(data[i])
label_encoded[i] = le
else:
data.drop(i, axis=1, inplace=True)
x_data = data.drop(column_to_predict, axis=1).to_numpy()
self.columns_finally_used = data.drop(column_to_predict, axis=1).columns
y_data = data[column_to_predict].to_numpy()
self.label_y = False
if y_data.dtype == object:
self.label_y = True
self.y_label_encoder = LabelEncoder()
y_data = self.y_label_encoder.fit_transform(y_data)
self.label_encoded = label_encoded
self.imputations = imputations
toast("Number of features are: " + str(x_data.shape[1]) +
" classes are: "+ str(len(np.unique(y_data))),duration=5)
self.x_data = x_data
self.y_data = y_data
def train(self):
X_train, X_test, y_train, y_test = train_test_split(self.x_data, self.y_data,
test_size=0.3, random_state=0)
if self.model == "xbnet" or self.model =="neural network":
print(self.layers_dims)
m = self.model
model = XBNETClassifier( X_train, y_train, self.layers,
input_through_cmd=True, inputs_for_gui=self.layers_dims,
num_layers_boosted=self.n_layers_boosted
)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
self.model, self.acc, self.lo, self.val_ac, self.val_lo = run_XBNET(X_train, X_test, y_train, y_test, model, criterion, optimizer, 32, 10)
model.save(m+"_testAccuracy_" +str(max(self.val_ac))[:4] +"_trainAccuracy_" +
str(max(self.acc))[:4]+ ".pt",)
toast("Test Accuracy is: " +str(max(self.val_ac))[:4] +" and Training Accuracy is: " +
str(max(self.acc))[:4] + " and model is saved.",duration= 10)
elif (self.model == "xgboost" or self.model == "randomforest"
or self.model == "decision tree" or self.model == "lightgbm"):
if self.model == "xgboost":
self.model_tree = XGBClassifier(n_estimators=self.layers_dims[0],
max_depth=self.layers_dims[1],
learning_rate= self.layers_dims[2],
subsample= self.layers_dims[3],
colsample_bylevel = self.layers_dims[4],
random_state=0,n_jobs=-1,
)
self.model_tree.fit(X_train, y_train,eval_metric="mlogloss")
training_acc = self.model_tree.score(X_train, y_train)
testing_acc = self.model_tree.score(X_test,y_test)
elif self.model == "randomforest":
self.model_tree = RandomForestClassifier(n_estimators=self.layers_dims[0],
max_depth=self.layers_dims[1],
random_state=0,n_jobs=-1)
self.model_tree.fit(X_train, y_train)
training_acc = self.model_tree.score(X_train, y_train)
testing_acc = self.model_tree.score(X_test,y_test)
elif self.model == "decision tree":
self.model_tree = DecisionTreeClassifier(max_depth=self.layers_dims[1],random_state=0)
self.model_tree.fit(X_train, y_train)
training_acc = self.model_tree.score(X_train, y_train)
testing_acc = self.model_tree.score(X_test,y_test)
elif self.model == "lightgbm":
self.model_tree = LGBMClassifier(n_estimators=self.layers_dims[0],
max_depth=self.layers_dims[1],
learning_rate= self.layers_dims[2],
subsample= self.layers_dims[3],
colsample_bylevel = self.layers_dims[4],
random_state=0,n_jobs=-1,)
self.model_tree.fit(X_train, y_train,eval_metric="mlogloss")
training_acc = self.model_tree.score(X_train, y_train)
testing_acc = self.model_tree.score(X_test,y_test)
toast(text="Training and Testing accuracies are "+str(training_acc*100)
+" "+str(testing_acc*100) + " respectively and model is stored",duration=7)
with open(self.model+"_testAccuracy_" +str(testing_acc)[:4] +"_trainAccuracy_" +
str(training_acc)[:4]+ ".pkl", 'wb') as outfile:
pickle.dump(self.model_tree,outfile)
def predict(self):
self.predict_phase = True
self.root.current = "File"
def predict_results(self):
df = pd.read_csv(self.file_selected)
data = df[self.columns_finally_used]
for i in data.columns:
if data[i].isnull().sum() > 0:
data[i].fillna(self.imputations[i], inplace=True)
if i in self.label_encoded.keys():
data[i] = self.label_encoded[i].transform(data[i])
if (self.model == "xgboost" or self.model == "randomforest"
or self.model == "decision tree" or self.model == "lightgbm"):
predictions = self.model_tree.predict(data.to_numpy())
else:
predictions = predict(self.model, data.to_numpy())
if self.label_y == True:
df[self.target] = self.y_label_encoder.inverse_transform(predictions)
else:
df[self.target] = predictions
df.to_csv("Predicted_Results.csv",index=False)
toast(text="Predicted_Results.csv in this directory has the results",
duration = 10)
def get_model(self,model,target,layers):
self.model = model.lower()
if len(layers) > 0:
self.layers = int(layers)
self.target = target
if self.model.lower() == "xbnet":
self.n_layers_boosted = 1
self.net_model()
elif (self.model == "xgboost" or self.model == "randomforest"
or self.model == "decision tree" or self.model == "lightgbm"):
self.tree_model()
elif self.model.lower() == "neural network":
self.n_layers_boosted = 0
self.net_model()
self.process_input()
def net_model(self):
layout = self.root.get_screen('Model')
gap = 1/(2*self.layers+2)
counter = 1
self.fields = {}
for i in range(self.layers):
lab1 = MDTextField(hint_text="Enter input dimensions of layer "+ str(i+1) +":",
pos_hint={"center_x":0.77,"center_y":1-gap*(counter)},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
counter+=1
lab2 = MDTextField(hint_text="Enter output dimensions of layer "+ str(i+1) +":",
pos_hint={"center_x":0.77,"center_y":1-gap*(counter)},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
counter +=1
layout.add_widget(lab1)
layout.add_widget(lab2)
self.fields["input_"+str(i+1)] = lab1
self.fields["output_" + str(i+1)] = lab2
def tree_model(self):
layout = self.root.get_screen('Model')
self.fields = {}
lab1 = MDTextField(hint_text="Enter number of estimators: ",
pos_hint={"center_x":0.77,"center_y":0.85},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
lab2 = MDTextField(hint_text="Enter depth of trees[default:6](Typical 3-10): ",
pos_hint={"center_x":0.77,"center_y":0.7},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
lab3 = MDTextField(hint_text="Enter learning rate forr XGBoost(eta)[default:0.3]: ",
pos_hint={"center_x":0.77,"center_y":0.55},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
lab4 = MDTextField(hint_text="Enter size of subsample[default:1](Typical 0.5-1): ",
pos_hint={"center_x":0.77,"center_y":0.4},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
lab5 = MDTextField(hint_text="Enter size of colsample_bytree[default:1](Typical 0.5-1): ",
pos_hint={"center_x":0.77,"center_y":0.25},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
layout.add_widget(lab1)
layout.add_widget(lab2)
layout.add_widget(lab3)
layout.add_widget(lab4)
layout.add_widget(lab5)
self.fields["no_trees"] = lab1
self.fields["depth"] = lab2
self.fields["learning_rate"] = lab3
self.fields["subsample"] = lab4
self.fields["colsample_bytree"] = lab5
def get_path(self,*args):
print(args)
self.file_selected = args[1][0]
print(self.file_selected)
if self.predict_phase:
self.root.current = "Model"
print("hellooo")
self.predict_results()
else:
self.root.current = "Login"
if __name__ == "__main__":
XBNetGUI().run() | 40.235149 | 150 | 0.562781 | from kivymd.app import MDApp
from kivy.uix.widget import Widget
from kivy.uix.actionbar import ActionBar
from kivy.uix.scrollview import ScrollView
from kivy.uix.boxlayout import BoxLayout
from kivymd.theming import ThemableBehavior
from kivymd.uix.list import OneLineListItem, MDList, TwoLineListItem, ThreeLineListItem
from kivymd.uix.list import MDList
from kivymd.uix.textfield import MDTextField
from kivy.uix.button import Button
from kivy.lang import Builder
from kivymd.toast import toast
from kivy.uix.screenmanager import Screen, ScreenManager
import time
from kivy.core.window import Window
from kivymd.uix.label import MDLabel
from kivy.uix.modalview import ModalView
from kivymd.uix.filemanager import MDFileManager
from kivymd.theming import ThemeManager
import requests
from kivy.uix.popup import Popup
import os
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from lightgbm import LGBMClassifier
import torch
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from XBNet.training_utils import training,predict
from XBNet.models import XBNETClassifier
from XBNet.run import run_XBNET
from os import environ
import pickle
def suppress_qt_warnings():
environ["QT_DEVICE_PIXEL_RATIO"] = "0"
environ["QT_AUTO_SCREEN_SCALE_FACTOR"] = "1"
environ["QT_SCREEN_SCALE_FACTORS"] = "1"
environ["QT_SCALE_FACTOR"] = "1"
Login_Page = """
ScreenManager:
LoginPage
ModelDetails
FileManage
<LoginPage>:
name:"Login"
MDFloatLayout:
Image:
id: imageView
source: 'Untitled.png'
allow_stretch: True
halign: 'center'
pos_hint: {"center_x":0.23, "center_y":0.5}
MDRoundFlatIconButton:
id: filemanage
text: "Select Dataset"
icon: "folder"
pos_hint: {'center_x': .77, 'center_y': .85}
on_release: root.manager.current = "File"
MDTextField:
id: modelname
hint_text:"Enter the model name: "
pos_hint:{"center_x":0.77,"center_y":0.7}
current_hint_text_color:0,0,0,1
size_hint_x:0.4
required: True
MDTextField:
id: layers
hint_text:"Enter number of layers(For XBNet or NN): "
pos_hint:{"center_x":0.77,"center_y":0.55}
current_hint_text_color:0,0,0,1
size_hint_x:0.4
MDTextField:
id: target
hint_text:"Enter name of target feature: "
pos_hint:{"center_x":0.77,"center_y":0.40}
current_hint_text_color:0,0,0,1
size_hint_x:0.4
required: True
MDRaisedButton:
text:"Build model"
pos_hint:{"center_x":0.77,"center_y":0.25}
size_hint_x:0.3
on_release: root.manager.current = "Model"
on_press: app.get_model(modelname.text,target.text,layers.text)
theme_text_color:"Custom"
text_color:0,0,0,1
<ModelDetails>:
name:"Model"
MDFloatLayout:
Image:
id: imageView
source: 'Untitled.png'
allow_stretch: True
halign: 'center'
pos_hint: {"center_x":0.23, "center_y":0.5}
MDRaisedButton:
text:"Train"
pos_hint:{"center_x":0.63,"center_y":0.15}
size_hint_x:0.2
# on_release: root.manager.current = "Model"
on_press: app.get_layers()
theme_text_color:"Custom"
text_color:0,0,0,1
MDRaisedButton:
text:"Predict"
pos_hint:{"center_x":0.88,"center_y":0.15}
size_hint_x:0.2
# on_release: root.manager.current = "Model"
on_press: app.predict()
theme_text_color:"Custom"
text_color:0,0,0,1
<FileManage>:
name:"File"
BoxLayout:
FileChooserListView:
canvas.before:
Color:
rgb: 0.1, 0.2, 0.5
Rectangle:
pos: self.pos
size: self.size
on_selection: app.get_path(*args)
"""
class LoginPage(Screen):
pass
class ModelDetails(Screen):
pass
class CustomDropDown(BoxLayout):
pass
class FileManage(Screen):
pass
sm = ScreenManager()
sm.add_widget(LoginPage(name="Login"))
sm.add_widget(ModelDetails(name="Model"))
sm.add_widget(FileManage(name="File"))
class XBNetGUI(MDApp):
def __init__(self):
super(XBNetGUI, self).__init__()
self.predict_phase = False
class ContentNavigationDrawer(BoxLayout):
pass
class DrawerList(ThemableBehavior, MDList):
pass
def build(self):
self.theme_cls.primary_palette = "Blue"
login_page = Builder.load_string(Login_Page)
return login_page
def get_layers(self):
self.layers_dims = []
if self.model == "xbnet" or self.model == "neural network":
for i,j in self.fields.items():
self.layers_dims.append(int(j.text))
print(j.text)
elif (self.model == "xgboost" or self.model == "randomforest"
or self.model == "decision tree" or self.model == "lightgbm"):
for i,j in self.fields.items():
try:
self.layers_dims.append(int(j.text))
except:
self.layers_dims.append(float(j.text))
self.train()
def process_input(self):
suppress_qt_warnings()
column_to_predict = self.target
data = pd.read_csv(self.file_selected)
n_df = len(data)
label_encoded = {}
imputations = {}
for i in data.columns:
imputations[i] = data[i].mode()
if data[i].isnull().sum() / n_df >= 0.15:
data.drop(i, axis=1, inplace=True)
elif data[i].isnull().sum() / n_df < 0.15 and data[i].isnull().sum() / n_df > 0:
data[i].fillna(data[i].mode(), inplace=True)
imputations[i] = data[i].mode()
columns_object = list(data.dtypes[data.dtypes == object].index)
for i in columns_object:
if i != column_to_predict:
if data[i].nunique() / n_df < 0.4:
le = LabelEncoder()
data[i] = le.fit_transform(data[i])
label_encoded[i] = le
else:
data.drop(i, axis=1, inplace=True)
x_data = data.drop(column_to_predict, axis=1).to_numpy()
self.columns_finally_used = data.drop(column_to_predict, axis=1).columns
y_data = data[column_to_predict].to_numpy()
self.label_y = False
if y_data.dtype == object:
self.label_y = True
self.y_label_encoder = LabelEncoder()
y_data = self.y_label_encoder.fit_transform(y_data)
self.label_encoded = label_encoded
self.imputations = imputations
toast("Number of features are: " + str(x_data.shape[1]) +
" classes are: "+ str(len(np.unique(y_data))),duration=5)
self.x_data = x_data
self.y_data = y_data
def train(self):
X_train, X_test, y_train, y_test = train_test_split(self.x_data, self.y_data,
test_size=0.3, random_state=0)
if self.model == "xbnet" or self.model =="neural network":
print(self.layers_dims)
m = self.model
model = XBNETClassifier( X_train, y_train, self.layers,
input_through_cmd=True, inputs_for_gui=self.layers_dims,
num_layers_boosted=self.n_layers_boosted
)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
self.model, self.acc, self.lo, self.val_ac, self.val_lo = run_XBNET(X_train, X_test, y_train, y_test, model, criterion, optimizer, 32, 10)
model.save(m+"_testAccuracy_" +str(max(self.val_ac))[:4] +"_trainAccuracy_" +
str(max(self.acc))[:4]+ ".pt",)
toast("Test Accuracy is: " +str(max(self.val_ac))[:4] +" and Training Accuracy is: " +
str(max(self.acc))[:4] + " and model is saved.",duration= 10)
elif (self.model == "xgboost" or self.model == "randomforest"
or self.model == "decision tree" or self.model == "lightgbm"):
if self.model == "xgboost":
self.model_tree = XGBClassifier(n_estimators=self.layers_dims[0],
max_depth=self.layers_dims[1],
learning_rate= self.layers_dims[2],
subsample= self.layers_dims[3],
colsample_bylevel = self.layers_dims[4],
random_state=0,n_jobs=-1,
)
self.model_tree.fit(X_train, y_train,eval_metric="mlogloss")
training_acc = self.model_tree.score(X_train, y_train)
testing_acc = self.model_tree.score(X_test,y_test)
elif self.model == "randomforest":
self.model_tree = RandomForestClassifier(n_estimators=self.layers_dims[0],
max_depth=self.layers_dims[1],
random_state=0,n_jobs=-1)
self.model_tree.fit(X_train, y_train)
training_acc = self.model_tree.score(X_train, y_train)
testing_acc = self.model_tree.score(X_test,y_test)
elif self.model == "decision tree":
self.model_tree = DecisionTreeClassifier(max_depth=self.layers_dims[1],random_state=0)
self.model_tree.fit(X_train, y_train)
training_acc = self.model_tree.score(X_train, y_train)
testing_acc = self.model_tree.score(X_test,y_test)
elif self.model == "lightgbm":
self.model_tree = LGBMClassifier(n_estimators=self.layers_dims[0],
max_depth=self.layers_dims[1],
learning_rate= self.layers_dims[2],
subsample= self.layers_dims[3],
colsample_bylevel = self.layers_dims[4],
random_state=0,n_jobs=-1,)
self.model_tree.fit(X_train, y_train,eval_metric="mlogloss")
training_acc = self.model_tree.score(X_train, y_train)
testing_acc = self.model_tree.score(X_test,y_test)
toast(text="Training and Testing accuracies are "+str(training_acc*100)
+" "+str(testing_acc*100) + " respectively and model is stored",duration=7)
with open(self.model+"_testAccuracy_" +str(testing_acc)[:4] +"_trainAccuracy_" +
str(training_acc)[:4]+ ".pkl", 'wb') as outfile:
pickle.dump(self.model_tree,outfile)
def predict(self):
self.predict_phase = True
self.root.current = "File"
def predict_results(self):
df = pd.read_csv(self.file_selected)
data = df[self.columns_finally_used]
for i in data.columns:
if data[i].isnull().sum() > 0:
data[i].fillna(self.imputations[i], inplace=True)
if i in self.label_encoded.keys():
data[i] = self.label_encoded[i].transform(data[i])
if (self.model == "xgboost" or self.model == "randomforest"
or self.model == "decision tree" or self.model == "lightgbm"):
predictions = self.model_tree.predict(data.to_numpy())
else:
predictions = predict(self.model, data.to_numpy())
if self.label_y == True:
df[self.target] = self.y_label_encoder.inverse_transform(predictions)
else:
df[self.target] = predictions
df.to_csv("Predicted_Results.csv",index=False)
toast(text="Predicted_Results.csv in this directory has the results",
duration = 10)
def get_model(self,model,target,layers):
self.model = model.lower()
if len(layers) > 0:
self.layers = int(layers)
self.target = target
if self.model.lower() == "xbnet":
self.n_layers_boosted = 1
self.net_model()
elif (self.model == "xgboost" or self.model == "randomforest"
or self.model == "decision tree" or self.model == "lightgbm"):
self.tree_model()
elif self.model.lower() == "neural network":
self.n_layers_boosted = 0
self.net_model()
self.process_input()
def net_model(self):
layout = self.root.get_screen('Model')
gap = 1/(2*self.layers+2)
counter = 1
self.fields = {}
for i in range(self.layers):
lab1 = MDTextField(hint_text="Enter input dimensions of layer "+ str(i+1) +":",
pos_hint={"center_x":0.77,"center_y":1-gap*(counter)},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
counter+=1
lab2 = MDTextField(hint_text="Enter output dimensions of layer "+ str(i+1) +":",
pos_hint={"center_x":0.77,"center_y":1-gap*(counter)},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
counter +=1
layout.add_widget(lab1)
layout.add_widget(lab2)
self.fields["input_"+str(i+1)] = lab1
self.fields["output_" + str(i+1)] = lab2
def tree_model(self):
layout = self.root.get_screen('Model')
self.fields = {}
lab1 = MDTextField(hint_text="Enter number of estimators: ",
pos_hint={"center_x":0.77,"center_y":0.85},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
lab2 = MDTextField(hint_text="Enter depth of trees[default:6](Typical 3-10): ",
pos_hint={"center_x":0.77,"center_y":0.7},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
lab3 = MDTextField(hint_text="Enter learning rate forr XGBoost(eta)[default:0.3]: ",
pos_hint={"center_x":0.77,"center_y":0.55},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
lab4 = MDTextField(hint_text="Enter size of subsample[default:1](Typical 0.5-1): ",
pos_hint={"center_x":0.77,"center_y":0.4},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
lab5 = MDTextField(hint_text="Enter size of colsample_bytree[default:1](Typical 0.5-1): ",
pos_hint={"center_x":0.77,"center_y":0.25},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
layout.add_widget(lab1)
layout.add_widget(lab2)
layout.add_widget(lab3)
layout.add_widget(lab4)
layout.add_widget(lab5)
self.fields["no_trees"] = lab1
self.fields["depth"] = lab2
self.fields["learning_rate"] = lab3
self.fields["subsample"] = lab4
self.fields["colsample_bytree"] = lab5
def get_path(self,*args):
print(args)
self.file_selected = args[1][0]
print(self.file_selected)
if self.predict_phase:
self.root.current = "Model"
print("hellooo")
self.predict_results()
else:
self.root.current = "Login"
if __name__ == "__main__":
XBNetGUI().run() | true | true |
f7316351492d58d868c0577a2c53428d4e7bd48c | 815 | py | Python | apiapp/views.py | cansati/api-project | 9760025d84e91997ee9d3e141263e903ec95d6df | [
"MIT"
] | null | null | null | apiapp/views.py | cansati/api-project | 9760025d84e91997ee9d3e141263e903ec95d6df | [
"MIT"
] | null | null | null | apiapp/views.py | cansati/api-project | 9760025d84e91997ee9d3e141263e903ec95d6df | [
"MIT"
] | null | null | null | from rest_framework import viewsets
from . import serializers, models, permissions
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
class UserModelViewSet(viewsets.ModelViewSet):
serializer_class = serializers.ModelSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile, IsAuthenticated)
filter_backends = (filters.SearchFilter,)
search_fields = ['id', 'name', 'surname', 'email']
class LoginViewSet(ObtainAuthToken):
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
| 42.894737 | 72 | 0.825767 | from rest_framework import viewsets
from . import serializers, models, permissions
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
class UserModelViewSet(viewsets.ModelViewSet):
serializer_class = serializers.ModelSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile, IsAuthenticated)
filter_backends = (filters.SearchFilter,)
search_fields = ['id', 'name', 'surname', 'email']
class LoginViewSet(ObtainAuthToken):
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
| true | true |
f73163bebf2ce9fdff591feac06da38b26c56b96 | 851 | py | Python | ooobuild/dyn/sdb/definition_content.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/sdb/definition_content.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/sdb/definition_content.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.sdb
from ...lo.sdb.definition_content import DefinitionContent as DefinitionContent
__all__ = ['DefinitionContent']
| 32.730769 | 79 | 0.760282 |
from ...lo.sdb.definition_content import DefinitionContent as DefinitionContent
__all__ = ['DefinitionContent']
| true | true |
f731655548ca300269d3b7f542881d9a8eb93c2a | 4,830 | py | Python | src/pykeen/models/unimodal/trans_e.py | DJRavinszkha/pykeen | d79fe39f83bc2831137f22be6421b37568694cf4 | [
"MIT"
] | null | null | null | src/pykeen/models/unimodal/trans_e.py | DJRavinszkha/pykeen | d79fe39f83bc2831137f22be6421b37568694cf4 | [
"MIT"
] | null | null | null | src/pykeen/models/unimodal/trans_e.py | DJRavinszkha/pykeen | d79fe39f83bc2831137f22be6421b37568694cf4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""TransE."""
from typing import Any, ClassVar, Mapping, Optional
import torch
import torch.autograd
from torch.nn import functional
from ..base import EntityRelationEmbeddingModel
from ...constants import DEFAULT_EMBEDDING_HPO_EMBEDDING_DIM_RANGE
from ...losses import Loss
from ...nn.emb import EmbeddingSpecification
from ...nn.init import xavier_uniform_, xavier_uniform_norm_
from ...regularizers import Regularizer
from ...triples import TriplesFactory
from ...typing import Constrainer, DeviceHint, Hint, Initializer
__all__ = [
'TransE',
]
class TransE(EntityRelationEmbeddingModel):
r"""An implementation of TransE [bordes2013]_.
TransE models relations as a translation from head to tail entities in :math:`\textbf{e}`:
.. math::
\textbf{e}_h + \textbf{e}_r \approx \textbf{e}_t
This equation is rearranged and the :math:`l_p` norm is applied to create the TransE interaction function.
.. math::
f(h, r, t) = - \|\textbf{e}_h + \textbf{e}_r - \textbf{e}_t\|_{p}
While this formulation is computationally efficient, it inherently cannot model one-to-many, many-to-one, and
many-to-many relationships. For triples :math:`(h,r,t_1), (h,r,t_2) \in \mathcal{K}` where :math:`t_1 \neq t_2`,
the model adapts the embeddings in order to ensure :math:`\textbf{e}_h + \textbf{e}_r \approx \textbf{e}_{t_1}`
and :math:`\textbf{e}_h + \textbf{e}_r \approx \textbf{e}_{t_2}` which results in
:math:`\textbf{e}_{t_1} \approx \textbf{e}_{t_2}`.
---
citation:
author: Bordes
year: 2013
link: http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-relational-data.pdf
"""
#: The default strategy for optimizing the model's hyper-parameters
hpo_default: ClassVar[Mapping[str, Any]] = dict(
embedding_dim=DEFAULT_EMBEDDING_HPO_EMBEDDING_DIM_RANGE,
scoring_fct_norm=dict(type=int, low=1, high=2),
)
def __init__(
self,
triples_factory: TriplesFactory,
embedding_dim: int = 50,
scoring_fct_norm: int = 1,
loss: Optional[Loss] = None,
preferred_device: DeviceHint = None,
random_seed: Optional[int] = None,
regularizer: Optional[Regularizer] = None,
entity_initializer: Hint[Initializer] = xavier_uniform_,
entity_constrainer: Hint[Constrainer] = functional.normalize,
relation_initializer: Hint[Initializer] = xavier_uniform_norm_,
) -> None:
r"""Initialize TransE.
:param embedding_dim: The entity embedding dimension $d$. Is usually $d \in [50, 300]$.
:param scoring_fct_norm: The :math:`l_p` norm applied in the interaction function. Is usually ``1`` or ``2.``.
.. seealso::
- OpenKE `implementation of TransE <https://github.com/thunlp/OpenKE/blob/OpenKE-PyTorch/models/TransE.py>`_
"""
super().__init__(
triples_factory=triples_factory,
loss=loss,
preferred_device=preferred_device,
random_seed=random_seed,
regularizer=regularizer,
entity_representations=EmbeddingSpecification(
embedding_dim=embedding_dim,
initializer=entity_initializer,
constrainer=entity_constrainer,
),
relation_representations=EmbeddingSpecification(
embedding_dim=embedding_dim,
initializer=relation_initializer,
),
)
self.scoring_fct_norm = scoring_fct_norm
def score_hrt(self, hrt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
# Get embeddings
h = self.entity_embeddings(indices=hrt_batch[:, 0])
r = self.relation_embeddings(indices=hrt_batch[:, 1])
t = self.entity_embeddings(indices=hrt_batch[:, 2])
# TODO: Use torch.dist
return -torch.norm(h + r - t, dim=-1, p=self.scoring_fct_norm, keepdim=True)
def score_t(self, hr_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
# Get embeddings
h = self.entity_embeddings(indices=hr_batch[:, 0])
r = self.relation_embeddings(indices=hr_batch[:, 1])
t = self.entity_embeddings(indices=None)
# TODO: Use torch.cdist
return -torch.norm(h[:, None, :] + r[:, None, :] - t[None, :, :], dim=-1, p=self.scoring_fct_norm)
def score_h(self, rt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
# Get embeddings
h = self.entity_embeddings(indices=None)
r = self.relation_embeddings(indices=rt_batch[:, 0])
t = self.entity_embeddings(indices=rt_batch[:, 1])
# TODO: Use torch.cdist
return -torch.norm(h[None, :, :] + r[:, None, :] - t[:, None, :], dim=-1, p=self.scoring_fct_norm)
| 38.951613 | 119 | 0.654658 |
from typing import Any, ClassVar, Mapping, Optional
import torch
import torch.autograd
from torch.nn import functional
from ..base import EntityRelationEmbeddingModel
from ...constants import DEFAULT_EMBEDDING_HPO_EMBEDDING_DIM_RANGE
from ...losses import Loss
from ...nn.emb import EmbeddingSpecification
from ...nn.init import xavier_uniform_, xavier_uniform_norm_
from ...regularizers import Regularizer
from ...triples import TriplesFactory
from ...typing import Constrainer, DeviceHint, Hint, Initializer
__all__ = [
'TransE',
]
class TransE(EntityRelationEmbeddingModel):
hpo_default: ClassVar[Mapping[str, Any]] = dict(
embedding_dim=DEFAULT_EMBEDDING_HPO_EMBEDDING_DIM_RANGE,
scoring_fct_norm=dict(type=int, low=1, high=2),
)
def __init__(
self,
triples_factory: TriplesFactory,
embedding_dim: int = 50,
scoring_fct_norm: int = 1,
loss: Optional[Loss] = None,
preferred_device: DeviceHint = None,
random_seed: Optional[int] = None,
regularizer: Optional[Regularizer] = None,
entity_initializer: Hint[Initializer] = xavier_uniform_,
entity_constrainer: Hint[Constrainer] = functional.normalize,
relation_initializer: Hint[Initializer] = xavier_uniform_norm_,
) -> None:
super().__init__(
triples_factory=triples_factory,
loss=loss,
preferred_device=preferred_device,
random_seed=random_seed,
regularizer=regularizer,
entity_representations=EmbeddingSpecification(
embedding_dim=embedding_dim,
initializer=entity_initializer,
constrainer=entity_constrainer,
),
relation_representations=EmbeddingSpecification(
embedding_dim=embedding_dim,
initializer=relation_initializer,
),
)
self.scoring_fct_norm = scoring_fct_norm
def score_hrt(self, hrt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
# Get embeddings
h = self.entity_embeddings(indices=hrt_batch[:, 0])
r = self.relation_embeddings(indices=hrt_batch[:, 1])
t = self.entity_embeddings(indices=hrt_batch[:, 2])
# TODO: Use torch.dist
return -torch.norm(h + r - t, dim=-1, p=self.scoring_fct_norm, keepdim=True)
def score_t(self, hr_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
# Get embeddings
h = self.entity_embeddings(indices=hr_batch[:, 0])
r = self.relation_embeddings(indices=hr_batch[:, 1])
t = self.entity_embeddings(indices=None)
# TODO: Use torch.cdist
return -torch.norm(h[:, None, :] + r[:, None, :] - t[None, :, :], dim=-1, p=self.scoring_fct_norm)
def score_h(self, rt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
# Get embeddings
h = self.entity_embeddings(indices=None)
r = self.relation_embeddings(indices=rt_batch[:, 0])
t = self.entity_embeddings(indices=rt_batch[:, 1])
# TODO: Use torch.cdist
return -torch.norm(h[None, :, :] + r[:, None, :] - t[:, None, :], dim=-1, p=self.scoring_fct_norm)
| true | true |
f73165f0c8a4ee6043789cf0356dafa3edf3bfa8 | 1,392 | py | Python | main.py | tejasvicsr1/Jumble-Solver | 12980394f6f0b7a5a580a56389559266f3825d6a | [
"MIT"
] | null | null | null | main.py | tejasvicsr1/Jumble-Solver | 12980394f6f0b7a5a580a56389559266f3825d6a | [
"MIT"
] | null | null | null | main.py | tejasvicsr1/Jumble-Solver | 12980394f6f0b7a5a580a56389559266f3825d6a | [
"MIT"
] | null | null | null | # Python code to unscramble a jumbled word using the Enchant dictionary(US) and itertools in Python.
from itertools import permutations
import enchant
word_list = enchant.Dict("en_US")
# Taking the input word and converting it into lowercase words.
word = input("Enter the letters: ")
word = word.lower()
word_length = len(word)
# A function to print the unjumbled words according to the length of the word.
def output_fnc(length):
temp = []
for word in ans:
if len(word) == length:
temp.append(word)
if len(temp) != 0:
print("Words of length " + str(length) + " are:")
for temps in temp:
print(temps)
else:
print("No words of length " + str(length))
# Variables to store the final correct words and to store all the possible permutations.
ans = []
perms = []
# Finding and adding all the permutations to the list.
for i in range(1, word_length + 1):
for p in permutations(word, i):
striing = ''
len_p = len(p)
for letter in range(0, len_p):
striing += p[letter]
perms.append(striing)
# Removing duplicates.
perms = list(set(perms))
# Checking if the permutation created is an actual English(US) word.
for perm in perms:
if word_list.check(perm):
ans.append(perm)
#Printing the final results.
for j in range(2, word_length + 1):
output_fnc(j)
| 28.408163 | 101 | 0.655891 |
from itertools import permutations
import enchant
word_list = enchant.Dict("en_US")
word = input("Enter the letters: ")
word = word.lower()
word_length = len(word)
def output_fnc(length):
temp = []
for word in ans:
if len(word) == length:
temp.append(word)
if len(temp) != 0:
print("Words of length " + str(length) + " are:")
for temps in temp:
print(temps)
else:
print("No words of length " + str(length))
ans = []
perms = []
for i in range(1, word_length + 1):
for p in permutations(word, i):
striing = ''
len_p = len(p)
for letter in range(0, len_p):
striing += p[letter]
perms.append(striing)
perms = list(set(perms))
for perm in perms:
if word_list.check(perm):
ans.append(perm)
for j in range(2, word_length + 1):
output_fnc(j)
| true | true |
f731664d01602fc4bc98d2d3a1dca73625c63b84 | 14,176 | py | Python | file-access/static/usr/bin/setup-users-and-groups.py | aisbergg/dockerfiles | 3cf24d2667a75d6eda8b8fb7df835b97c6a13348 | [
"MIT"
] | 1 | 2019-10-23T06:54:06.000Z | 2019-10-23T06:54:06.000Z | file-access/static/usr/bin/setup-users-and-groups.py | aisbergg/dockerfiles | 3cf24d2667a75d6eda8b8fb7df835b97c6a13348 | [
"MIT"
] | null | null | null | file-access/static/usr/bin/setup-users-and-groups.py | aisbergg/dockerfiles | 3cf24d2667a75d6eda8b8fb7df835b97c6a13348 | [
"MIT"
] | null | null | null | import argparse
import crypt
import json
import os
import pwd
import random
import re
import string
import subprocess
import sys
import traceback
from itertools import product
import yaml
class ACL:
@staticmethod
def get_file_acl(path):
if not os.path.exists(path):
raise IOError("The directory or file '{0}' does not exist".format(path))
cmd_result = execute_command(['getfacl', '-p', path])
if cmd_result['returncode'] != 0:
raise Exception("Failed to get ACL of file or directory '{0}': {1}".format(path, cmd_result['output']))
raw_acl = cmd_result['output'].splitlines()
owner = re.match(r'# owner: (.+)', raw_acl[1]).group(1)
group = re.match(r'# group: (.+)', raw_acl[2]).group(1)
acl = {'users': [], 'groups': [], 'other': None}
for a in raw_acl[3:]:
match_acl = re.match(r'user::([rwx-]+)', a)
if match_acl:
acl['users'].append({'name': '', 'permissions': match_acl.group(1)})
# explicitly add owner (e.g. webserver), so sub directories created
# by different user will still be readable by the original owner
acl['owner'] = {'name': owner, 'permissions': match_acl.group(1)}
continue
match_acl = re.match(r'user:([^:]+):([rwx-]+)', a)
if match_acl:
acl['users'].append({'name': match_acl.group(1), 'permissions': match_acl.group(2)})
continue
match_acl = re.match(r'group::([rwx-]+)', a)
if match_acl:
acl['groups'].append({'name': '', 'permissions': match_acl.group(1)})
acl['group'] = {'name': group, 'permissions': match_acl.group(1)}
continue
match_acl = re.match(r'group:([^:]+):([rwx-]+)', a)
if match_acl:
acl['groups'].append({'name': match_acl.group(1), 'permissions': match_acl.group(2)})
continue
match_acl = re.match(r'other::([rwx-]+)', a)
if match_acl:
acl['other'] = match_acl.group(1)
continue
return acl
@staticmethod
def file_acl_differs(path, new_acl):
old_acl = ACL.get_file_acl(path)
return json.dumps(old_acl, sort_keys=True) != json.dumps(new_acl, sort_keys=True)
@staticmethod
def set_file_acl(path, new_acl, force=False):
def format_acl_spec(prefix, name, permissions):
acl_spec = list()
acl_spec.append("{0}:{1}:{2}".format(prefix, name, permissions))
if os.path.isdir(path):
acl_spec.append("d:{0}:{1}:{2}".format(prefix, name, permissions))
return ','.join(acl_spec)
old_acl = ACL.get_file_acl(path)
if force or json.dumps(old_acl, sort_keys=True) != json.dumps(new_acl, sort_keys=True):
print("Setting ACLs of '{0}...".format(path))
# modify ACLs
setfacl_cmd = ['setfacl', '-R', '-m']
acl_spec = list()
for uacl in new_acl['users']:
acl_spec.append(format_acl_spec('u', uacl['name'], uacl['permissions']))
# explicitly add owner (e.g. webserver), so sub directories created
# by different user will still be readable by the original owner
acl_spec.append(format_acl_spec('u', new_acl['owner']['name'], new_acl['owner']['permissions']))
for gacl in new_acl['groups']:
acl_spec.append(format_acl_spec('g', gacl['name'], gacl['permissions']))
acl_spec.append(format_acl_spec('g', new_acl['group']['name'], new_acl['group']['permissions']))
acl_spec.append(format_acl_spec('o', '', new_acl['other']))
setfacl_cmd.append(','.join(acl_spec))
setfacl_cmd.append(path)
cmd_result = execute_command(setfacl_cmd)
if cmd_result['returncode'] != 0:
raise Exception("Failed to set ACL of file or directory '{0}': {1}".format(path, cmd_result['output']))
# remove ACLs
setfacl_cmd = ['setfacl', '-R', '-x']
acl_spec = list()
users_to_remove = list(
set([x['name'] for x in old_acl['users']]) - set([x['name'] for x in new_acl['users']]))
groups_to_remove = list(
set([x['name'] for x in old_acl['groups']]) - set([x['name'] for x in new_acl['groups']]))
for u in users_to_remove:
acl_spec.append(format_acl_spec('u', u, ''))
for g in groups_to_remove:
acl_spec.append(format_acl_spec('g', g, ''))
if acl_spec:
setfacl_cmd.append(','.join(acl_spec))
setfacl_cmd.append(path)
cmd_result = execute_command(setfacl_cmd)
if cmd_result['returncode'] != 0:
raise Exception(
"Failed to remove ACL from file or directory '{0}': {1}".format(path, cmd_result['output']))
def get_arg(config, arg, dtype, default=None, required=False):
if required and not arg in config:
raise ValueError("Missing key '{0}'".format(arg))
if not arg in config:
return default
if type(config[arg]) is not dtype:
raise ValueError("'{0}' must be of type '{1}', got '{2}'".format(arg, str(dtype), str(config[arg])))
return config[arg]
def execute_command(cmd):
try:
return {'returncode': 0,
'output': subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)}
except subprocess.CalledProcessError as e:
return {'returncode': e.returncode, 'output': e.output}
def recursive_chown(path, uid, gid):
os.chown(path, uid, gid)
for item in os.listdir(path):
itempath = os.path.join(path, item)
if os.path.isfile(itempath):
os.chown(itempath, uid, gid)
elif os.path.isdir(itempath):
os.chown(itempath, uid, gid)
recursive_chown(itempath, uid, gid)
def main():
# parse arguments
parser = argparse.ArgumentParser(
prog='setup-users-and-groups',
description='According to a configuration file this script creates Linux users/groups and grants permissions on resources.',
add_help=True)
parser.add_argument('-f', '--force', dest='force',
action='store_true', default=False, help="Force the setting the ACLs.")
parser.add_argument('-c', '--create-dir', dest='create_dir',
action='store_true', default=False, help="Create a directory for a path that does not exists.")
parser.add_argument('configuration_file', help="File that defines what to do.")
args = parser.parse_args(sys.argv[1:])
try:
# load configuration either from file or from stdin
if args.configuration_file == '-':
inp = sys.stdin.read()
config = yaml.load(inp) or dict()
else:
if not os.path.exists(args.configuration_file):
raise IOError("The configuration file '{0}' does not exist".format(args.configuration_file))
with open(file=args.configuration_file, mode='r', encoding='utf8') as f:
config = yaml.load(f.read())
# parse arguments
groups = get_arg(config, "groups", dict, dict())
users = get_arg(config, "users", dict, dict())
defaults = get_arg(config, "defaults", dict, None) or dict()
defaults = {
'owner_permissions': get_arg(defaults, "owner_permissions", str, None),
'owner_group_permissions': get_arg(defaults, "owner_group_permissions", str, None),
'user_permissions': get_arg(defaults, "user_permissions", str, 'rwx'),
'group_permissions': get_arg(defaults, "group_permissions", str, 'rwx'),
}
acls = dict()
# create groups
for group, gdef in groups.items():
if type(gdef) != dict:
raise ValueError("The group definition of '{0}' must be of type dict".format(group))
gid = get_arg(gdef, 'gid', int, None)
permissions = get_arg(gdef, 'permissions', list, list())
# add group if it doesn't already exists
if execute_command(['getent', 'group', group])['returncode'] == 0:
print("Group '{0}' already exists, skipping...".format(group))
else:
print("Creating group '{0}'...".format(group))
groupadd_cmd = ['groupadd']
if gid:
groupadd_cmd += ['-g', str(gid)]
groupadd_cmd.append(group)
cmd_result = execute_command(groupadd_cmd)
if cmd_result['returncode'] != 0:
raise Exception("Failed to create group '{0}': {1}".format(group, cmd_result['output']))
# parse permissions
for perm in permissions:
path = get_arg(perm, "path", str, None, required=True)
if not os.path.exists(path):
if args.create_dir:
os.makedirs(path, 0o750);
else:
raise IOError("The directory or file '{0}' does not exist".format(path))
path_permissions = get_arg(perm, 'permissions', str, defaults['group_permissions'])
new_acl = {'name': group, 'permissions': path_permissions}
if path in acls:
acls[path]['groups'].append(new_acl)
else:
user_group_default = {'name': '', 'permissions': defaults['group_permissions']}
acls[path] = {'users': [user_group_default], 'groups': [user_group_default, new_acl],
'other': '---'}
# create users
for user, udef in users.items():
if type(udef) != dict:
raise ValueError("The user definition of '{0}' must be of type dict".format(user))
uid = get_arg(udef, 'uid', int, None)
groups = get_arg(udef, 'groups', list, None)
home = get_arg(udef, 'home', str, None)
random_string = ''.join(
random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(64))
hashed_password = crypt.crypt(get_arg(udef, 'password', str, random_string),
crypt.mksalt(crypt.METHOD_SHA512))
ssh_public_key = get_arg(udef, 'ssh_public_key', str, '')
permissions = get_arg(udef, 'permissions', list, list())
# add user if it doesn't already exists
if execute_command(['getent', 'passwd', user])['returncode'] == 0:
print("User '{0}' already exists, skipping...".format(user))
else:
print("Creating user '{0}'...".format(user))
useradd_cmd = ['useradd', '-m', '-p', hashed_password, '-U', '-s', '/bin/bash']
if uid:
useradd_cmd += ['-u', str(uid)]
if groups:
useradd_cmd += ['-G', ','.join(groups)]
if home:
useradd_cmd += ['-d', home]
useradd_cmd.append(user)
cmd_result = execute_command(useradd_cmd)
if cmd_result['returncode'] != 0:
raise Exception("Failed to create user '{0}': {1}".format(user, cmd_result['output']))
# set SSH public key
user_info = pwd.getpwnam(user)
ak_file = os.path.join(user_info.pw_dir, '.ssh/authorized_keys')
authorized_key_string = "## !!! DO NOT EDIT THIS FILE !!!\n## This file is generated automatically. Any changes will eventually be lost.\n## If you like to add a SSH Public Key contact your administrator.\n" + ssh_public_key
os.makedirs(os.path.dirname(ak_file), 0o750, True)
with open(file=ak_file, mode='w', encoding='utf8') as f:
f.write(authorized_key_string)
os.chmod(ak_file, 0o400)
recursive_chown(user_info.pw_dir, user_info.pw_uid, user_info.pw_gid)
# parse permissions
for perm in permissions:
path = get_arg(perm, "path", str, None, required=True)
if not os.path.exists(path):
if args.create_dir:
os.makedirs(path, 0o750)
else:
raise IOError("The directory or file '{0}' does not exist".format(path))
path_permissions = get_arg(perm, 'permissions', str, defaults['user_permissions'])
new_acl = {'name': user, 'permissions': path_permissions}
if path in acls:
acls[path]['users'].append(new_acl)
else:
user_group_default = {'name': '', 'permissions': defaults['user_permissions']}
acls[path] = {'users': [user_group_default, new_acl], 'groups': [user_group_default],
'other': '---'}
# set ACLs
paths = list(acls.keys())
paths.sort()
# find prefix paths and append permissions, otherwise longer paths will overwrite the shorter paths permissions
for p1, p2 in product(paths, paths):
if p1 != p2 and p2.startswith(p1):
acls[p2]['users'] += acls[p1]['users']
acls[p2]['groups'] += acls[p1]['groups']
for path in paths:
old_acl = ACL.get_file_acl(path)
acls[path]['owner'] = {'name': old_acl['owner']['name'], 'permissions': defaults['owner_permissions'] or old_acl['owner']['permissions']}
acls[path]['group'] = {'name': old_acl['group']['name'], 'permissions': defaults['owner_group_permissions'] or old_acl['group']['permissions']}
ACL.set_file_acl(path, acls[path], args.force)
except Exception as e:
sys.stderr.write(str(e) + '\n\n')
traceback.print_exc(5)
exit(1)
if __name__ == '__main__':
main()
| 46.175896 | 236 | 0.55862 | import argparse
import crypt
import json
import os
import pwd
import random
import re
import string
import subprocess
import sys
import traceback
from itertools import product
import yaml
class ACL:
@staticmethod
def get_file_acl(path):
if not os.path.exists(path):
raise IOError("The directory or file '{0}' does not exist".format(path))
cmd_result = execute_command(['getfacl', '-p', path])
if cmd_result['returncode'] != 0:
raise Exception("Failed to get ACL of file or directory '{0}': {1}".format(path, cmd_result['output']))
raw_acl = cmd_result['output'].splitlines()
owner = re.match(r'# owner: (.+)', raw_acl[1]).group(1)
group = re.match(r'# group: (.+)', raw_acl[2]).group(1)
acl = {'users': [], 'groups': [], 'other': None}
for a in raw_acl[3:]:
match_acl = re.match(r'user::([rwx-]+)', a)
if match_acl:
acl['users'].append({'name': '', 'permissions': match_acl.group(1)})
acl['owner'] = {'name': owner, 'permissions': match_acl.group(1)}
continue
match_acl = re.match(r'user:([^:]+):([rwx-]+)', a)
if match_acl:
acl['users'].append({'name': match_acl.group(1), 'permissions': match_acl.group(2)})
continue
match_acl = re.match(r'group::([rwx-]+)', a)
if match_acl:
acl['groups'].append({'name': '', 'permissions': match_acl.group(1)})
acl['group'] = {'name': group, 'permissions': match_acl.group(1)}
continue
match_acl = re.match(r'group:([^:]+):([rwx-]+)', a)
if match_acl:
acl['groups'].append({'name': match_acl.group(1), 'permissions': match_acl.group(2)})
continue
match_acl = re.match(r'other::([rwx-]+)', a)
if match_acl:
acl['other'] = match_acl.group(1)
continue
return acl
@staticmethod
def file_acl_differs(path, new_acl):
old_acl = ACL.get_file_acl(path)
return json.dumps(old_acl, sort_keys=True) != json.dumps(new_acl, sort_keys=True)
@staticmethod
def set_file_acl(path, new_acl, force=False):
def format_acl_spec(prefix, name, permissions):
acl_spec = list()
acl_spec.append("{0}:{1}:{2}".format(prefix, name, permissions))
if os.path.isdir(path):
acl_spec.append("d:{0}:{1}:{2}".format(prefix, name, permissions))
return ','.join(acl_spec)
old_acl = ACL.get_file_acl(path)
if force or json.dumps(old_acl, sort_keys=True) != json.dumps(new_acl, sort_keys=True):
print("Setting ACLs of '{0}...".format(path))
# modify ACLs
setfacl_cmd = ['setfacl', '-R', '-m']
acl_spec = list()
for uacl in new_acl['users']:
acl_spec.append(format_acl_spec('u', uacl['name'], uacl['permissions']))
# explicitly add owner (e.g. webserver), so sub directories created
# by different user will still be readable by the original owner
acl_spec.append(format_acl_spec('u', new_acl['owner']['name'], new_acl['owner']['permissions']))
for gacl in new_acl['groups']:
acl_spec.append(format_acl_spec('g', gacl['name'], gacl['permissions']))
acl_spec.append(format_acl_spec('g', new_acl['group']['name'], new_acl['group']['permissions']))
acl_spec.append(format_acl_spec('o', '', new_acl['other']))
setfacl_cmd.append(','.join(acl_spec))
setfacl_cmd.append(path)
cmd_result = execute_command(setfacl_cmd)
if cmd_result['returncode'] != 0:
raise Exception("Failed to set ACL of file or directory '{0}': {1}".format(path, cmd_result['output']))
# remove ACLs
setfacl_cmd = ['setfacl', '-R', '-x']
acl_spec = list()
users_to_remove = list(
set([x['name'] for x in old_acl['users']]) - set([x['name'] for x in new_acl['users']]))
groups_to_remove = list(
set([x['name'] for x in old_acl['groups']]) - set([x['name'] for x in new_acl['groups']]))
for u in users_to_remove:
acl_spec.append(format_acl_spec('u', u, ''))
for g in groups_to_remove:
acl_spec.append(format_acl_spec('g', g, ''))
if acl_spec:
setfacl_cmd.append(','.join(acl_spec))
setfacl_cmd.append(path)
cmd_result = execute_command(setfacl_cmd)
if cmd_result['returncode'] != 0:
raise Exception(
"Failed to remove ACL from file or directory '{0}': {1}".format(path, cmd_result['output']))
def get_arg(config, arg, dtype, default=None, required=False):
if required and not arg in config:
raise ValueError("Missing key '{0}'".format(arg))
if not arg in config:
return default
if type(config[arg]) is not dtype:
raise ValueError("'{0}' must be of type '{1}', got '{2}'".format(arg, str(dtype), str(config[arg])))
return config[arg]
def execute_command(cmd):
try:
return {'returncode': 0,
'output': subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)}
except subprocess.CalledProcessError as e:
return {'returncode': e.returncode, 'output': e.output}
def recursive_chown(path, uid, gid):
os.chown(path, uid, gid)
for item in os.listdir(path):
itempath = os.path.join(path, item)
if os.path.isfile(itempath):
os.chown(itempath, uid, gid)
elif os.path.isdir(itempath):
os.chown(itempath, uid, gid)
recursive_chown(itempath, uid, gid)
def main():
# parse arguments
parser = argparse.ArgumentParser(
prog='setup-users-and-groups',
description='According to a configuration file this script creates Linux users/groups and grants permissions on resources.',
add_help=True)
parser.add_argument('-f', '--force', dest='force',
action='store_true', default=False, help="Force the setting the ACLs.")
parser.add_argument('-c', '--create-dir', dest='create_dir',
action='store_true', default=False, help="Create a directory for a path that does not exists.")
parser.add_argument('configuration_file', help="File that defines what to do.")
args = parser.parse_args(sys.argv[1:])
try:
# load configuration either from file or from stdin
if args.configuration_file == '-':
inp = sys.stdin.read()
config = yaml.load(inp) or dict()
else:
if not os.path.exists(args.configuration_file):
raise IOError("The configuration file '{0}' does not exist".format(args.configuration_file))
with open(file=args.configuration_file, mode='r', encoding='utf8') as f:
config = yaml.load(f.read())
# parse arguments
groups = get_arg(config, "groups", dict, dict())
users = get_arg(config, "users", dict, dict())
defaults = get_arg(config, "defaults", dict, None) or dict()
defaults = {
'owner_permissions': get_arg(defaults, "owner_permissions", str, None),
'owner_group_permissions': get_arg(defaults, "owner_group_permissions", str, None),
'user_permissions': get_arg(defaults, "user_permissions", str, 'rwx'),
'group_permissions': get_arg(defaults, "group_permissions", str, 'rwx'),
}
acls = dict()
# create groups
for group, gdef in groups.items():
if type(gdef) != dict:
raise ValueError("The group definition of '{0}' must be of type dict".format(group))
gid = get_arg(gdef, 'gid', int, None)
permissions = get_arg(gdef, 'permissions', list, list())
# add group if it doesn't already exists
if execute_command(['getent', 'group', group])['returncode'] == 0:
print("Group '{0}' already exists, skipping...".format(group))
else:
print("Creating group '{0}'...".format(group))
groupadd_cmd = ['groupadd']
if gid:
groupadd_cmd += ['-g', str(gid)]
groupadd_cmd.append(group)
cmd_result = execute_command(groupadd_cmd)
if cmd_result['returncode'] != 0:
raise Exception("Failed to create group '{0}': {1}".format(group, cmd_result['output']))
for perm in permissions:
path = get_arg(perm, "path", str, None, required=True)
if not os.path.exists(path):
if args.create_dir:
os.makedirs(path, 0o750);
else:
raise IOError("The directory or file '{0}' does not exist".format(path))
path_permissions = get_arg(perm, 'permissions', str, defaults['group_permissions'])
new_acl = {'name': group, 'permissions': path_permissions}
if path in acls:
acls[path]['groups'].append(new_acl)
else:
user_group_default = {'name': '', 'permissions': defaults['group_permissions']}
acls[path] = {'users': [user_group_default], 'groups': [user_group_default, new_acl],
'other': '---'}
for user, udef in users.items():
if type(udef) != dict:
raise ValueError("The user definition of '{0}' must be of type dict".format(user))
uid = get_arg(udef, 'uid', int, None)
groups = get_arg(udef, 'groups', list, None)
home = get_arg(udef, 'home', str, None)
random_string = ''.join(
random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(64))
hashed_password = crypt.crypt(get_arg(udef, 'password', str, random_string),
crypt.mksalt(crypt.METHOD_SHA512))
ssh_public_key = get_arg(udef, 'ssh_public_key', str, '')
permissions = get_arg(udef, 'permissions', list, list())
if execute_command(['getent', 'passwd', user])['returncode'] == 0:
print("User '{0}' already exists, skipping...".format(user))
else:
print("Creating user '{0}'...".format(user))
useradd_cmd = ['useradd', '-m', '-p', hashed_password, '-U', '-s', '/bin/bash']
if uid:
useradd_cmd += ['-u', str(uid)]
if groups:
useradd_cmd += ['-G', ','.join(groups)]
if home:
useradd_cmd += ['-d', home]
useradd_cmd.append(user)
cmd_result = execute_command(useradd_cmd)
if cmd_result['returncode'] != 0:
raise Exception("Failed to create user '{0}': {1}".format(user, cmd_result['output']))
# set SSH public key
user_info = pwd.getpwnam(user)
ak_file = os.path.join(user_info.pw_dir, '.ssh/authorized_keys')
authorized_key_string = "## !!! DO NOT EDIT THIS FILE !!!\n## This file is generated automatically. Any changes will eventually be lost.\n## If you like to add a SSH Public Key contact your administrator.\n" + ssh_public_key
os.makedirs(os.path.dirname(ak_file), 0o750, True)
with open(file=ak_file, mode='w', encoding='utf8') as f:
f.write(authorized_key_string)
os.chmod(ak_file, 0o400)
recursive_chown(user_info.pw_dir, user_info.pw_uid, user_info.pw_gid)
# parse permissions
for perm in permissions:
path = get_arg(perm, "path", str, None, required=True)
if not os.path.exists(path):
if args.create_dir:
os.makedirs(path, 0o750)
else:
raise IOError("The directory or file '{0}' does not exist".format(path))
path_permissions = get_arg(perm, 'permissions', str, defaults['user_permissions'])
new_acl = {'name': user, 'permissions': path_permissions}
if path in acls:
acls[path]['users'].append(new_acl)
else:
user_group_default = {'name': '', 'permissions': defaults['user_permissions']}
acls[path] = {'users': [user_group_default, new_acl], 'groups': [user_group_default],
'other': '---'}
# set ACLs
paths = list(acls.keys())
paths.sort()
# find prefix paths and append permissions, otherwise longer paths will overwrite the shorter paths permissions
for p1, p2 in product(paths, paths):
if p1 != p2 and p2.startswith(p1):
acls[p2]['users'] += acls[p1]['users']
acls[p2]['groups'] += acls[p1]['groups']
for path in paths:
old_acl = ACL.get_file_acl(path)
acls[path]['owner'] = {'name': old_acl['owner']['name'], 'permissions': defaults['owner_permissions'] or old_acl['owner']['permissions']}
acls[path]['group'] = {'name': old_acl['group']['name'], 'permissions': defaults['owner_group_permissions'] or old_acl['group']['permissions']}
ACL.set_file_acl(path, acls[path], args.force)
except Exception as e:
sys.stderr.write(str(e) + '\n\n')
traceback.print_exc(5)
exit(1)
if __name__ == '__main__':
main()
| true | true |
f73167cc38566b53bef2f4ba63993e95af6bb2c0 | 3,587 | py | Python | brian2/only.py | awillats/brian2 | e1107ed0cc4a7d6c69c1e2634b675ba09edfd9fc | [
"BSD-2-Clause"
] | 1 | 2021-06-10T15:28:51.000Z | 2021-06-10T15:28:51.000Z | brian2/only.py | awillats/brian2 | e1107ed0cc4a7d6c69c1e2634b675ba09edfd9fc | [
"BSD-2-Clause"
] | null | null | null | brian2/only.py | awillats/brian2 | e1107ed0cc4a7d6c69c1e2634b675ba09edfd9fc | [
"BSD-2-Clause"
] | null | null | null | '''
A dummy package to allow wildcard import from brian2 without also importing
the pylab (numpy + matplotlib) namespace.
Usage: ``from brian2.only import *``
'''
# To minimize the problems with imports, import the packages in a sensible
# order
# The units and utils package does not depend on any other Brian package and
# should be imported first
from brian2.units import *
from brian2.utils import *
from brian2.core.tracking import *
from brian2.core.names import *
from brian2.core.spikesource import *
# The following packages only depend on something in the above set
from brian2.core.variables import linked_var
from brian2.core.functions import *
from brian2.core.preferences import *
from brian2.core.clocks import *
from brian2.equations import *
# The base class only depends on the above sets
from brian2.core.base import *
# The rest...
from brian2.core.network import *
from brian2.core.magic import *
from brian2.core.operations import *
from brian2.stateupdaters import *
from brian2.codegen import *
from brian2.core.namespace import *
from brian2.groups import *
from brian2.groups.subgroup import *
from brian2.synapses import *
from brian2.monitors import *
from brian2.importexport import *
from brian2.input import *
from brian2.spatialneuron import *
from brian2.devices import set_device, get_device, device, all_devices, seed
import brian2.devices.cpp_standalone as _cpp_standalone
# preferences
import brian2.core.core_preferences as _core_preferences
prefs.load_preferences()
prefs.do_validation()
prefs._backup()
set_device(all_devices['runtime'])
def restore_initial_state():
'''
Restores internal Brian variables to the state they are in when Brian is imported
Resets ``defaultclock.dt = 0.1*ms``,
`BrianGlobalPreferences._restore` preferences, and set
`BrianObject._scope_current_key` back to 0.
'''
import gc
prefs._restore()
BrianObject._scope_current_key = 0
defaultclock.dt = 0.1*ms
gc.collect()
# make the test suite available via brian2.test()
from brian2.tests import run as test
from brian2.units import __all__ as _all_units
__all__ = [
'get_logger', 'BrianLogger', 'std_silent',
'Trackable',
'Nameable',
'SpikeSource',
'linked_var',
'DEFAULT_FUNCTIONS', 'Function', 'implementation', 'declare_types',
'PreferenceError', 'BrianPreference', 'prefs', 'brian_prefs',
'Clock', 'defaultclock',
'Equations', 'Expression', 'Statements',
'BrianObject',
'BrianObjectException',
'Network', 'profiling_summary', 'scheduling_summary',
'MagicNetwork', 'magic_network',
'MagicError',
'run', 'stop', 'collect', 'store', 'restore',
'start_scope',
'NetworkOperation', 'network_operation',
'StateUpdateMethod',
'linear', 'exact', 'independent',
'milstein', 'heun', 'euler', 'rk2', 'rk4', 'ExplicitStateUpdater',
'exponential_euler',
'gsl_rk2', 'gsl_rk4', 'gsl_rkf45', 'gsl_rkck', 'gsl_rk8pd',
'NumpyCodeObject', 'CythonCodeObject',
'get_local_namespace', 'DEFAULT_FUNCTIONS', 'DEFAULT_UNITS',
'DEFAULT_CONSTANTS',
'CodeRunner', 'Group', 'VariableOwner', 'NeuronGroup',
'Subgroup',
'Synapses',
'SpikeMonitor', 'EventMonitor', 'StateMonitor',
'PopulationRateMonitor',
'ImportExport',
'BinomialFunction', 'PoissonGroup', 'PoissonInput',
'SpikeGeneratorGroup', 'TimedArray',
'Morphology', 'Soma', 'Cylinder', 'Section', 'SpatialNeuron',
'set_device', 'get_device', 'device', 'all_devices', 'seed',
'restore_initial_state',
'test'
]
__all__.extend(_all_units)
| 31.191304 | 85 | 0.726234 |
from brian2.units import *
from brian2.utils import *
from brian2.core.tracking import *
from brian2.core.names import *
from brian2.core.spikesource import *
from brian2.core.variables import linked_var
from brian2.core.functions import *
from brian2.core.preferences import *
from brian2.core.clocks import *
from brian2.equations import *
from brian2.core.base import *
from brian2.core.network import *
from brian2.core.magic import *
from brian2.core.operations import *
from brian2.stateupdaters import *
from brian2.codegen import *
from brian2.core.namespace import *
from brian2.groups import *
from brian2.groups.subgroup import *
from brian2.synapses import *
from brian2.monitors import *
from brian2.importexport import *
from brian2.input import *
from brian2.spatialneuron import *
from brian2.devices import set_device, get_device, device, all_devices, seed
import brian2.devices.cpp_standalone as _cpp_standalone
import brian2.core.core_preferences as _core_preferences
prefs.load_preferences()
prefs.do_validation()
prefs._backup()
set_device(all_devices['runtime'])
def restore_initial_state():
import gc
prefs._restore()
BrianObject._scope_current_key = 0
defaultclock.dt = 0.1*ms
gc.collect()
from brian2.tests import run as test
from brian2.units import __all__ as _all_units
__all__ = [
'get_logger', 'BrianLogger', 'std_silent',
'Trackable',
'Nameable',
'SpikeSource',
'linked_var',
'DEFAULT_FUNCTIONS', 'Function', 'implementation', 'declare_types',
'PreferenceError', 'BrianPreference', 'prefs', 'brian_prefs',
'Clock', 'defaultclock',
'Equations', 'Expression', 'Statements',
'BrianObject',
'BrianObjectException',
'Network', 'profiling_summary', 'scheduling_summary',
'MagicNetwork', 'magic_network',
'MagicError',
'run', 'stop', 'collect', 'store', 'restore',
'start_scope',
'NetworkOperation', 'network_operation',
'StateUpdateMethod',
'linear', 'exact', 'independent',
'milstein', 'heun', 'euler', 'rk2', 'rk4', 'ExplicitStateUpdater',
'exponential_euler',
'gsl_rk2', 'gsl_rk4', 'gsl_rkf45', 'gsl_rkck', 'gsl_rk8pd',
'NumpyCodeObject', 'CythonCodeObject',
'get_local_namespace', 'DEFAULT_FUNCTIONS', 'DEFAULT_UNITS',
'DEFAULT_CONSTANTS',
'CodeRunner', 'Group', 'VariableOwner', 'NeuronGroup',
'Subgroup',
'Synapses',
'SpikeMonitor', 'EventMonitor', 'StateMonitor',
'PopulationRateMonitor',
'ImportExport',
'BinomialFunction', 'PoissonGroup', 'PoissonInput',
'SpikeGeneratorGroup', 'TimedArray',
'Morphology', 'Soma', 'Cylinder', 'Section', 'SpatialNeuron',
'set_device', 'get_device', 'device', 'all_devices', 'seed',
'restore_initial_state',
'test'
]
__all__.extend(_all_units)
| true | true |
f73168e6d9bed2cc8b08603295293be0c8f914d9 | 5,134 | py | Python | test/test_keytool_parse.py | cccs-rs/assemblyline-v4-service | ed53dedaa6f3c4e3850defd9f68b0d57407153bf | [
"MIT"
] | 6 | 2020-06-30T13:54:44.000Z | 2021-05-28T19:36:32.000Z | test/test_keytool_parse.py | cccs-rs/assemblyline-v4-service | ed53dedaa6f3c4e3850defd9f68b0d57407153bf | [
"MIT"
] | 17 | 2020-06-19T03:02:21.000Z | 2022-03-01T18:19:07.000Z | test/test_keytool_parse.py | cccs-rs/assemblyline-v4-service | ed53dedaa6f3c4e3850defd9f68b0d57407153bf | [
"MIT"
] | 8 | 2020-04-30T16:11:52.000Z | 2021-07-16T12:11:40.000Z | import pytest
class TestKeytoolParse:
@staticmethod
@pytest.mark.parametrize("printcert, correct_certs",
[
('Owner: CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA\nIssuer: CN=root, OU=root, O=root, L=root, ST=root, C=CA\nSerial number: 5f822698\nValid from: Wed Apr 14 13:40:13 EDT 2021 until: Tue Jul 13 13:40:13 EDT 2021\nCertificate fingerprints:\n SHA1: 59:7C:A0:72:5D:98:9F:61:B9:9F:29:20:C8:73:60:9C:0E:02:EB:DF\n SHA256: AE:56:E7:5E:49:F2:1B:4B:FF:7A:76:12:6E:72:84:1C:6B:D3:E7:FA:D9:84:43:53:C7:24:A9:2F:3E:12:63:7F\nSignature algorithm name: SHA256withDSA\nSubject Public Key Algorithm: 2048-bit DSA key\nVersion: 3\n\nExtensions:\n\n#1: ObjectId: 2.5.29.35 Criticality=false\nAuthorityKeyIdentifier [\nKeyIdentifier [\n0000: 9D 76 79 BA 97 17 06 07 75 A6 5C E1 E6 98 09 F0 .vy.....u.\.....\n0010: D8 42 F6 C1 .B..\n]\n]\n\n#2: ObjectId: 2.5.29.19 Criticality=false\nBasicConstraints:[\n CA:true\n PathLen:0\n]\n\n#3: ObjectId: 2.5.29.14 Criticality=false\nSubjectKeyIdentifier [\nKeyIdentifier [\n0000: C2 BF E5 BF 85 2B ED 82 D2 F1 49 89 06 5B 5E 90 .....+....I..[^.\n0010: 64 FC C3 16 d...\n]\n]\n', [{'Owner': 'CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA', 'Issuer': 'CN=root, OU=root, O=root, L=root, ST=root, C=CA', 'Country': 'CA', 'ValidFrom': 'Wed Apr 14 13:40:13 EDT 2021', 'ValidTo': 'Tue Jul 13 13:40:13 EDT 2021'}]),
('Certificate[1]:\nOwner: CN=server, OU=server, O=server, L=server, ST=server, C=CA\nIssuer: CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA\nSerial number: 4e2d045a\nValid from: Wed Apr 14 13:42:22 EDT 2021 until: Tue Jul 13 13:42:22 EDT 2021\nCertificate fingerprints:\n SHA1: 0B:BE:A7:40:20:F4:F0:DE:D1:C8:99:26:32:A8:33:7A:EB:E8:87:70\n SHA256: 83:C1:8D:49:A4:98:3F:73:66:97:63:78:4C:E5:70:BF:0C:A2:71:4A:58:CE:B0:4E:65:87:39:F0:06:1F:7F:2C\nSignature algorithm name: SHA256withDSA\nSubject Public Key Algorithm: 2048-bit DSA key\nVersion: 3\n\nExtensions:\n\n#1: ObjectId: 2.5.29.35 Criticality=false\nAuthorityKeyIdentifier [\nKeyIdentifier [\n0000: C2 BF E5 BF 85 2B ED 82 D2 F1 49 89 06 5B 5E 90 .....+....I..[^.\n0010: 64 FC C3 16 d...\n]\n]\n\n#2: ObjectId: 2.5.29.15 Criticality=true\nKeyUsage [\n DigitalSignature\n Key_Encipherment\n]\n\n#3: ObjectId: 2.5.29.14 Criticality=false\nSubjectKeyIdentifier [\nKeyIdentifier [\n0000: 9B 06 D8 13 2E 6F 2F 62 85 66 42 A9 AC 86 2E A8 .....o/b.fB.....\n0010: 25 89 AB FC %...\n]\n]\n\n\nCertificate[2]:\nOwner: CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA\nIssuer: CN=root, OU=root, O=root, L=root, ST=root, C=CA\nSerial number: 5f822698\nValid from: Wed Apr 14 13:40:13 EDT 2021 until: Tue Jul 13 13:40:13 EDT 2021\nCertificate fingerprints:\n SHA1: 59:7C:A0:72:5D:98:9F:61:B9:9F:29:20:C8:73:60:9C:0E:02:EB:DF\n SHA256: AE:56:E7:5E:49:F2:1B:4B:FF:7A:76:12:6E:72:84:1C:6B:D3:E7:FA:D9:84:43:53:C7:24:A9:2F:3E:12:63:7F\nSignature algorithm name: SHA256withDSA\nSubject Public Key Algorithm: 2048-bit DSA key\nVersion: 3\n\nExtensions:\n\n#1: ObjectId: 2.5.29.35 Criticality=false\nAuthorityKeyIdentifier [\nKeyIdentifier [\n0000: 9D 76 79 BA 97 17 06 07 75 A6 5C E1 E6 98 09 F0 .vy.....u.\.....\n0010: D8 42 F6 C1 .B..\n]\n]\n\n#2: ObjectId: 2.5.29.19 Criticality=false\nBasicConstraints:[\n CA:true\n PathLen:0\n]\n\n#3: ObjectId: 2.5.29.14 Criticality=false\nSubjectKeyIdentifier [\nKeyIdentifier [\n0000: C2 BF E5 BF 85 2B ED 82 D2 F1 49 89 06 5B 5E 90 .....+....I..[^.\n0010: 64 FC C3 16 d...\n]\n]\n', [{'Owner': 'CN=server, OU=server, O=server, L=server, ST=server, C=CA', 'Issuer': 'CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA', 'Country': 'CA', 'ValidFrom': 'Wed Apr 14 13:42:22 EDT 2021', 'ValidTo': 'Tue Jul 13 13:42:22 EDT 2021'}, {'Owner': 'CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA', 'Issuer': 'CN=root, OU=root, O=root, L=root, ST=root, C=CA', 'Country': 'CA', 'ValidFrom': 'Wed Apr 14 13:40:13 EDT 2021', 'ValidTo': 'Tue Jul 13 13:40:13 EDT 2021'}]),
]
)
def test_certificate_chain_from_printcert(printcert, correct_certs):
"""
This function tests that a printcert output is properly parsed by certificate_chain_from_printcert.
The certificates used come from running the commands in section 'Generate Certificates for an SSL Server'
in the keytool docs: https://docs.oracle.com/javase/8/docs/technotes/tools/windows/keytool.html
"""
from assemblyline_v4_service.common.keytool_parse import certificate_chain_from_printcert
certs = certificate_chain_from_printcert(printcert)
assert len(certs) == len(correct_certs)
for cert, correct in zip(certs, correct_certs):
assert cert.country == correct['Country']
assert cert.issuer == correct['Issuer']
assert cert.owner == correct['Owner']
assert cert.valid_from == correct['ValidFrom']
assert cert.valid_to == correct['ValidTo'] | 190.148148 | 2,706 | 0.639462 | import pytest
class TestKeytoolParse:
@staticmethod
@pytest.mark.parametrize("printcert, correct_certs",
[
('Owner: CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA\nIssuer: CN=root, OU=root, O=root, L=root, ST=root, C=CA\nSerial number: 5f822698\nValid from: Wed Apr 14 13:40:13 EDT 2021 until: Tue Jul 13 13:40:13 EDT 2021\nCertificate fingerprints:\n SHA1: 59:7C:A0:72:5D:98:9F:61:B9:9F:29:20:C8:73:60:9C:0E:02:EB:DF\n SHA256: AE:56:E7:5E:49:F2:1B:4B:FF:7A:76:12:6E:72:84:1C:6B:D3:E7:FA:D9:84:43:53:C7:24:A9:2F:3E:12:63:7F\nSignature algorithm name: SHA256withDSA\nSubject Public Key Algorithm: 2048-bit DSA key\nVersion: 3\n\nExtensions:\n\n#1: ObjectId: 2.5.29.35 Criticality=false\nAuthorityKeyIdentifier [\nKeyIdentifier [\n0000: 9D 76 79 BA 97 17 06 07 75 A6 5C E1 E6 98 09 F0 .vy.....u.\.....\n0010: D8 42 F6 C1 .B..\n]\n]\n\n#2: ObjectId: 2.5.29.19 Criticality=false\nBasicConstraints:[\n CA:true\n PathLen:0\n]\n\n#3: ObjectId: 2.5.29.14 Criticality=false\nSubjectKeyIdentifier [\nKeyIdentifier [\n0000: C2 BF E5 BF 85 2B ED 82 D2 F1 49 89 06 5B 5E 90 .....+....I..[^.\n0010: 64 FC C3 16 d...\n]\n]\n', [{'Owner': 'CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA', 'Issuer': 'CN=root, OU=root, O=root, L=root, ST=root, C=CA', 'Country': 'CA', 'ValidFrom': 'Wed Apr 14 13:40:13 EDT 2021', 'ValidTo': 'Tue Jul 13 13:40:13 EDT 2021'}]),
('Certificate[1]:\nOwner: CN=server, OU=server, O=server, L=server, ST=server, C=CA\nIssuer: CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA\nSerial number: 4e2d045a\nValid from: Wed Apr 14 13:42:22 EDT 2021 until: Tue Jul 13 13:42:22 EDT 2021\nCertificate fingerprints:\n SHA1: 0B:BE:A7:40:20:F4:F0:DE:D1:C8:99:26:32:A8:33:7A:EB:E8:87:70\n SHA256: 83:C1:8D:49:A4:98:3F:73:66:97:63:78:4C:E5:70:BF:0C:A2:71:4A:58:CE:B0:4E:65:87:39:F0:06:1F:7F:2C\nSignature algorithm name: SHA256withDSA\nSubject Public Key Algorithm: 2048-bit DSA key\nVersion: 3\n\nExtensions:\n\n#1: ObjectId: 2.5.29.35 Criticality=false\nAuthorityKeyIdentifier [\nKeyIdentifier [\n0000: C2 BF E5 BF 85 2B ED 82 D2 F1 49 89 06 5B 5E 90 .....+....I..[^.\n0010: 64 FC C3 16 d...\n]\n]\n\n#2: ObjectId: 2.5.29.15 Criticality=true\nKeyUsage [\n DigitalSignature\n Key_Encipherment\n]\n\n#3: ObjectId: 2.5.29.14 Criticality=false\nSubjectKeyIdentifier [\nKeyIdentifier [\n0000: 9B 06 D8 13 2E 6F 2F 62 85 66 42 A9 AC 86 2E A8 .....o/b.fB.....\n0010: 25 89 AB FC %...\n]\n]\n\n\nCertificate[2]:\nOwner: CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA\nIssuer: CN=root, OU=root, O=root, L=root, ST=root, C=CA\nSerial number: 5f822698\nValid from: Wed Apr 14 13:40:13 EDT 2021 until: Tue Jul 13 13:40:13 EDT 2021\nCertificate fingerprints:\n SHA1: 59:7C:A0:72:5D:98:9F:61:B9:9F:29:20:C8:73:60:9C:0E:02:EB:DF\n SHA256: AE:56:E7:5E:49:F2:1B:4B:FF:7A:76:12:6E:72:84:1C:6B:D3:E7:FA:D9:84:43:53:C7:24:A9:2F:3E:12:63:7F\nSignature algorithm name: SHA256withDSA\nSubject Public Key Algorithm: 2048-bit DSA key\nVersion: 3\n\nExtensions:\n\n#1: ObjectId: 2.5.29.35 Criticality=false\nAuthorityKeyIdentifier [\nKeyIdentifier [\n0000: 9D 76 79 BA 97 17 06 07 75 A6 5C E1 E6 98 09 F0 .vy.....u.\.....\n0010: D8 42 F6 C1 .B..\n]\n]\n\n#2: ObjectId: 2.5.29.19 Criticality=false\nBasicConstraints:[\n CA:true\n PathLen:0\n]\n\n#3: ObjectId: 2.5.29.14 Criticality=false\nSubjectKeyIdentifier [\nKeyIdentifier [\n0000: C2 BF E5 BF 85 2B ED 82 D2 F1 49 89 06 5B 5E 90 .....+....I..[^.\n0010: 64 FC C3 16 d...\n]\n]\n', [{'Owner': 'CN=server, OU=server, O=server, L=server, ST=server, C=CA', 'Issuer': 'CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA', 'Country': 'CA', 'ValidFrom': 'Wed Apr 14 13:42:22 EDT 2021', 'ValidTo': 'Tue Jul 13 13:42:22 EDT 2021'}, {'Owner': 'CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA', 'Issuer': 'CN=root, OU=root, O=root, L=root, ST=root, C=CA', 'Country': 'CA', 'ValidFrom': 'Wed Apr 14 13:40:13 EDT 2021', 'ValidTo': 'Tue Jul 13 13:40:13 EDT 2021'}]),
]
)
def test_certificate_chain_from_printcert(printcert, correct_certs):
from assemblyline_v4_service.common.keytool_parse import certificate_chain_from_printcert
certs = certificate_chain_from_printcert(printcert)
assert len(certs) == len(correct_certs)
for cert, correct in zip(certs, correct_certs):
assert cert.country == correct['Country']
assert cert.issuer == correct['Issuer']
assert cert.owner == correct['Owner']
assert cert.valid_from == correct['ValidFrom']
assert cert.valid_to == correct['ValidTo'] | true | true |
f7316970b0437dcd982b168d5fcd16d064c8cb16 | 8,044 | py | Python | tempest/api/identity/admin/v3/test_domain_configuration.py | mail2nsrajesh/tempest | 1a3b3dc50b418d3a15839830d7d1ff88c8c76cff | [
"Apache-2.0"
] | null | null | null | tempest/api/identity/admin/v3/test_domain_configuration.py | mail2nsrajesh/tempest | 1a3b3dc50b418d3a15839830d7d1ff88c8c76cff | [
"Apache-2.0"
] | null | null | null | tempest/api/identity/admin/v3/test_domain_configuration.py | mail2nsrajesh/tempest | 1a3b3dc50b418d3a15839830d7d1ff88c8c76cff | [
"Apache-2.0"
] | 5 | 2016-06-24T20:03:52.000Z | 2020-02-05T10:14:54.000Z | # Copyright 2017 AT&T Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
class DomainConfigurationTestJSON(base.BaseIdentityV3AdminTest):
custom_config = {
"identity": {
"driver": "ldap"
},
"ldap": {
"url": "ldap://myldap.com:389/",
"user_tree_dn": "ou=Users,dc=my_new_root,dc=org"
}
}
@classmethod
def setup_clients(cls):
super(DomainConfigurationTestJSON, cls).setup_clients()
cls.client = cls.domain_config_client
@classmethod
def resource_setup(cls):
super(DomainConfigurationTestJSON, cls).resource_setup()
cls.group = cls.groups_client.create_group(
name=data_utils.rand_name('group'),
description=data_utils.rand_name('group-desc'))['group']
@classmethod
def resource_cleanup(cls):
cls.groups_client.delete_group(cls.group['id'])
super(DomainConfigurationTestJSON, cls).resource_cleanup()
def _create_domain_and_config(self, config):
domain = self.setup_test_domain()
config = self.client.create_domain_config(domain['id'], **config)[
'config']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.client.delete_domain_config, domain['id'])
return domain, config
@decorators.idempotent_id('11a02bf0-6f94-4380-b3b0-c8dc18fc0d22')
def test_show_default_group_config_and_options(self):
# The API supports only the identity and ldap groups. For the ldap
# group, a valid value is url or user_tree_dn. For the identity group,
# a valid value is driver.
# Check that the default config has the identity and ldap groups.
config = self.client.show_default_config_settings()['config']
self.assertIsInstance(config, dict)
self.assertIn('identity', config)
self.assertIn('ldap', config)
# Check that the identity group is correct.
identity_config = self.client.show_default_group_config('identity')[
'config']
self.assertIsInstance(identity_config, dict)
self.assertIn('identity', identity_config)
self.assertIn('driver', identity_config['identity'])
self.assertIn('list_limit', identity_config['identity'])
# Show each option for the default domain and identity group.
for config_opt_name in ['driver', 'list_limit']:
retrieved_config_opt = self.client.show_default_group_option(
'identity', config_opt_name)['config']
self.assertIn(config_opt_name, retrieved_config_opt)
# Check that the ldap group is correct.
ldap_config = self.client.show_default_group_config('ldap')['config']
self.assertIsInstance(ldap_config, dict)
self.assertIn('ldap', ldap_config)
# Several valid options exist for ldap group.
valid_options = ldap_config['ldap'].keys()
# Show each option for the default domain and ldap group.
for config_opt_name in valid_options:
retrieved_config_opt = self.client.show_default_group_option(
'ldap', config_opt_name)['config']
self.assertIn(config_opt_name, retrieved_config_opt)
@decorators.idempotent_id('9e3ff13c-f597-4f01-9377-d6c06c2a1477')
def test_create_domain_config_and_show_config_groups_and_options(self):
domain, created_config = self._create_domain_and_config(
self.custom_config)
# Check that the entire configuration is correct.
self.assertEqual(self.custom_config, created_config)
# Check that each configuration group is correct.
for group_name in self.custom_config.keys():
group_cfg = self.client.show_domain_group_config(
domain['id'], group_name)['config']
self.assertIn(group_name, group_cfg)
self.assertEqual(self.custom_config[group_name],
group_cfg[group_name])
# Check that each configuration option is correct.
for opt_name in self.custom_config[group_name].keys():
group_opt = self.client.show_domain_group_option_config(
domain['id'], group_name, opt_name)['config']
self.assertIn(opt_name, group_opt)
self.assertEqual(self.custom_config[group_name][opt_name],
group_opt[opt_name])
@decorators.idempotent_id('7161023e-5dd0-4612-9da0-1bac6ac30b63')
def test_create_update_and_delete_domain_config(self):
domain, created_config = self._create_domain_and_config(
self.custom_config)
new_config = created_config
new_config['ldap']['url'] = data_utils.rand_url()
# Check that the altered configuration is reflected in updated_config.
updated_config = self.client.update_domain_config(
domain['id'], **new_config)['config']
self.assertEqual(new_config, updated_config)
# Check that showing the domain config shows the altered configuration.
retrieved_config = self.client.show_domain_config(domain['id'])[
'config']
self.assertEqual(new_config, retrieved_config)
# Check that deleting a configuration works.
self.client.delete_domain_config(domain['id'])
self.assertRaises(lib_exc.NotFound, self.client.show_domain_config,
domain['id'])
@decorators.idempotent_id('c7510fa2-6661-4170-9c6b-4783a80651e9')
def test_create_update_and_delete_domain_config_groups_and_opts(self):
domain, _ = self._create_domain_and_config(self.custom_config)
# Check that updating configuration groups work.
new_driver = data_utils.rand_name('driver')
new_limit = data_utils.rand_int_id(0, 100)
new_group_config = {'identity': {'driver': new_driver,
'list_limit': new_limit}}
updated_config = self.client.update_domain_group_config(
domain['id'], 'identity', **new_group_config)['config']
self.assertEqual(new_driver, updated_config['identity']['driver'])
self.assertEqual(new_limit, updated_config['identity']['list_limit'])
# Check that updating individual configuration group options work.
new_driver = data_utils.rand_name('driver')
updated_config = self.client.update_domain_group_option_config(
domain['id'], 'identity', 'driver', driver=new_driver)['config']
self.assertEqual(new_driver, updated_config['identity']['driver'])
# Check that deleting individual configuration group options work.
self.client.delete_domain_group_option_config(
domain['id'], 'identity', 'driver')
self.assertRaises(lib_exc.NotFound,
self.client.show_domain_group_option_config,
domain['id'], 'identity', 'driver')
# Check that deleting configuration groups work.
self.client.delete_domain_group_config(domain['id'], 'identity')
self.assertRaises(lib_exc.NotFound,
self.client.show_domain_group_config,
domain['id'], 'identity')
| 43.481081 | 79 | 0.669443 |
from tempest.api.identity import base
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
class DomainConfigurationTestJSON(base.BaseIdentityV3AdminTest):
custom_config = {
"identity": {
"driver": "ldap"
},
"ldap": {
"url": "ldap://myldap.com:389/",
"user_tree_dn": "ou=Users,dc=my_new_root,dc=org"
}
}
@classmethod
def setup_clients(cls):
super(DomainConfigurationTestJSON, cls).setup_clients()
cls.client = cls.domain_config_client
@classmethod
def resource_setup(cls):
super(DomainConfigurationTestJSON, cls).resource_setup()
cls.group = cls.groups_client.create_group(
name=data_utils.rand_name('group'),
description=data_utils.rand_name('group-desc'))['group']
@classmethod
def resource_cleanup(cls):
cls.groups_client.delete_group(cls.group['id'])
super(DomainConfigurationTestJSON, cls).resource_cleanup()
def _create_domain_and_config(self, config):
domain = self.setup_test_domain()
config = self.client.create_domain_config(domain['id'], **config)[
'config']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.client.delete_domain_config, domain['id'])
return domain, config
@decorators.idempotent_id('11a02bf0-6f94-4380-b3b0-c8dc18fc0d22')
def test_show_default_group_config_and_options(self):
config = self.client.show_default_config_settings()['config']
self.assertIsInstance(config, dict)
self.assertIn('identity', config)
self.assertIn('ldap', config)
identity_config = self.client.show_default_group_config('identity')[
'config']
self.assertIsInstance(identity_config, dict)
self.assertIn('identity', identity_config)
self.assertIn('driver', identity_config['identity'])
self.assertIn('list_limit', identity_config['identity'])
for config_opt_name in ['driver', 'list_limit']:
retrieved_config_opt = self.client.show_default_group_option(
'identity', config_opt_name)['config']
self.assertIn(config_opt_name, retrieved_config_opt)
ldap_config = self.client.show_default_group_config('ldap')['config']
self.assertIsInstance(ldap_config, dict)
self.assertIn('ldap', ldap_config)
valid_options = ldap_config['ldap'].keys()
for config_opt_name in valid_options:
retrieved_config_opt = self.client.show_default_group_option(
'ldap', config_opt_name)['config']
self.assertIn(config_opt_name, retrieved_config_opt)
@decorators.idempotent_id('9e3ff13c-f597-4f01-9377-d6c06c2a1477')
def test_create_domain_config_and_show_config_groups_and_options(self):
domain, created_config = self._create_domain_and_config(
self.custom_config)
self.assertEqual(self.custom_config, created_config)
for group_name in self.custom_config.keys():
group_cfg = self.client.show_domain_group_config(
domain['id'], group_name)['config']
self.assertIn(group_name, group_cfg)
self.assertEqual(self.custom_config[group_name],
group_cfg[group_name])
for opt_name in self.custom_config[group_name].keys():
group_opt = self.client.show_domain_group_option_config(
domain['id'], group_name, opt_name)['config']
self.assertIn(opt_name, group_opt)
self.assertEqual(self.custom_config[group_name][opt_name],
group_opt[opt_name])
@decorators.idempotent_id('7161023e-5dd0-4612-9da0-1bac6ac30b63')
def test_create_update_and_delete_domain_config(self):
domain, created_config = self._create_domain_and_config(
self.custom_config)
new_config = created_config
new_config['ldap']['url'] = data_utils.rand_url()
updated_config = self.client.update_domain_config(
domain['id'], **new_config)['config']
self.assertEqual(new_config, updated_config)
retrieved_config = self.client.show_domain_config(domain['id'])[
'config']
self.assertEqual(new_config, retrieved_config)
self.client.delete_domain_config(domain['id'])
self.assertRaises(lib_exc.NotFound, self.client.show_domain_config,
domain['id'])
@decorators.idempotent_id('c7510fa2-6661-4170-9c6b-4783a80651e9')
def test_create_update_and_delete_domain_config_groups_and_opts(self):
domain, _ = self._create_domain_and_config(self.custom_config)
new_driver = data_utils.rand_name('driver')
new_limit = data_utils.rand_int_id(0, 100)
new_group_config = {'identity': {'driver': new_driver,
'list_limit': new_limit}}
updated_config = self.client.update_domain_group_config(
domain['id'], 'identity', **new_group_config)['config']
self.assertEqual(new_driver, updated_config['identity']['driver'])
self.assertEqual(new_limit, updated_config['identity']['list_limit'])
new_driver = data_utils.rand_name('driver')
updated_config = self.client.update_domain_group_option_config(
domain['id'], 'identity', 'driver', driver=new_driver)['config']
self.assertEqual(new_driver, updated_config['identity']['driver'])
self.client.delete_domain_group_option_config(
domain['id'], 'identity', 'driver')
self.assertRaises(lib_exc.NotFound,
self.client.show_domain_group_option_config,
domain['id'], 'identity', 'driver')
self.client.delete_domain_group_config(domain['id'], 'identity')
self.assertRaises(lib_exc.NotFound,
self.client.show_domain_group_config,
domain['id'], 'identity')
| true | true |
f731698f7c8a52b2ad20a1732001853d221b636d | 9,161 | py | Python | metalibm_core/code_generation/code_function.py | metalibm/metalibm-clone | d04839e58950a156b79b763b9f45cb874e21ebfe | [
"MIT"
] | null | null | null | metalibm_core/code_generation/code_function.py | metalibm/metalibm-clone | d04839e58950a156b79b763b9f45cb874e21ebfe | [
"MIT"
] | null | null | null | metalibm_core/code_generation/code_function.py | metalibm/metalibm-clone | d04839e58950a156b79b763b9f45cb874e21ebfe | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
###############################################################################
# This file is part of metalibm (https://github.com/kalray/metalibm)
###############################################################################
# MIT License
#
# Copyright (c) 2018 Kalray
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
# created: Feb 1st, 2016
# last-modified: Mar 7th, 2018
#
# author(s): Nicolas Brunie (nicolas.brunie@kalray.eu)
###############################################################################
from ..core.ml_operations import Variable, FunctionObject, FunctionType
from .code_object import NestedCode
from .generator_utility import FunctionOperator, FO_Arg
from .code_constant import *
class CodeFunction(object):
""" function code object """
def __init__(self, name, arg_list=None, output_format=None, code_object=None, language=C_Code, attributes=None):
""" code function initialization """
self.arg_list = arg_list if arg_list else []
arg_list_precision = [arg.get_precision() for arg in self.arg_list]
self.function_type = FunctionType(name, arg_list_precision, output_format, attributes)
self.code_object = code_object
self.function_object = None
self.function_operator = None
self.language = language
@property
def name(self):
return self.function_type.name
@property
def output_format(self):
return self.function_type.output_format
@property
def attributes(self):
return self.function_type.attributes
def get_name(self):
return self.name
def add_input_variable(self, name, vartype, **kw):
""" declares a new Variable with name @p name and format @p vartype
and registers it as an input variable """
input_var = Variable(name, precision = vartype, **kw)
self.arg_list.append(input_var)
# WARNING: self.function_type.arg_list_precision is not updated
return input_var
def register_new_input_variable(self, new_input):
self.arg_list.append(new_input)
# WARNING: self.function_type.arg_list_precision is not updated
def get_arg_list(self):
return self.arg_list
def clear_arg_list(self):
self.arg_list = []
def get_function_object(self):
# if None, build it
if self.function_object is None:
self.function_object = self.build_function_object()
return self.function_object
def build_function_object(self):
arg_list_precision = [arg.get_precision() for arg in self.arg_list]
return FunctionObject(self.name, arg_list_precision, self.output_format, self.get_function_operator(), self.attributes)
def get_function_operator(self):
return self.build_function_operator()
def build_function_operator(self):
function_arg_map = {}
for i in range(len(self.arg_list)):
function_arg_map[i] = FO_Arg(i)
return FunctionOperator(self.name, arg_map = function_arg_map)
## retrieve format of the result(s) returned by the function
# @return ML_Format object
def get_output_format(self):
return self.output_format
## define a new at for the function return value(s)
# @param new_output_format ML_Format object indicated which format is returned by the function
def set_output_format(self, new_output_format):
self.function_type.output_format = new_output_format
def add_attribute(self, attribute):
assert not attribute in self.attributes
self.attributes.append(attribute)
def get_attributes_dec(self, language=C_Code):
""" generate function attribute string """
if self.attributes:
return " ".join(self.attributes)
return ""
def get_LLVM_definition(self, final=True, language=LLVM_IR_Code):
# TODO: support attributes and metadata
arg_format_list = ", ".join("%s %s" % (inp.get_precision().get_name(language = language), inp.get_tag()) for inp in self.arg_list)
return "define %s @%s(%s)" % (self.output_format.get_name(language = language), self.name, arg_format_list)
def update_arg_list_precisions(self):
self.function_type.arg_list_precision = [arg.precision for arg in self.arg_list]
def get_declaration(self, code_generator, final=True, language=None, named_arg_list=False, is_definition=False):
"""
:param self:
:param for_definition: indicate if the declaration is a definition prolog or a true declaration
:type for_definition: bool
"""
self.update_arg_list_precisions()
language = self.language if language is None else language
if is_definition:
return code_generator.get_function_definition(self.function_type, final, language, arg_list=(self.arg_list if named_arg_list else None))
else:
# pure declaration
return code_generator.get_function_declaration(self.function_type, final, language, arg_list=(self.arg_list if named_arg_list else None))
#self.name, self.output_format, self.arg_list, final, language
#)
## define function implementation
# @param scheme ML_Operation object to be defined as function implementation
def set_scheme(self, scheme):
self.scheme = scheme
## @return function implementation (ML_Operation DAG)
def get_scheme(self):
return self.scheme
def get_definition(self, code_generator, language, folded = True, static_cst = False):
code_object = NestedCode(code_generator, static_cst = static_cst)
code_object << self.get_declaration(code_generator, final=False, language=language, named_arg_list=True, is_definition=True)
code_object.open_level()
code_generator.generate_expr(code_object, self.scheme, folded = folded, initial = True, language = language)
code_object.close_level()
return code_object
def add_definition(self, code_generator, language, code_object, folded = True, static_cst = False):
code_object << self.get_declaration(code_generator, final=False, language=language, named_arg_list=True, is_definition=True)
code_object.open_level()
code_generator.generate_expr(code_object, self.scheme, folded = folded, initial = True, language = language)
code_object.close_level()
return code_object
def add_declaration(self, code_generator, language, code_object):
code_object << self.get_declaration(code_generator, final=True, language=language) +"\n"
return code_object
class FunctionGroup(object):
""" group of multiple functions """
def __init__(self, core_function_list=None, sub_function_list=None):
self.core_function_list = [] if not(core_function_list) else core_function_list
self.sub_function_list = [] if not(sub_function_list) else sub_function_list
def add_sub_function(self, sub_function):
self.sub_function_list.append(sub_function)
def add_core_function(self, sub_function):
self.core_function_list.append(sub_function)
def apply_to_core_functions(self, routine):
for fct in self.core_function_list:
routine(self, fct)
def apply_to_sub_functions(self, routine):
for fct in self.sub_function_list:
routine(self, fct)
def apply_to_all_functions(self, routine):
self.apply_to_sub_functions(routine)
self.apply_to_core_functions(routine)
return self
def merge_with_group(self, subgroup, demote_sub_core=True):
""" Merge two FunctionGroup-s together (if demote_sub_core
is set, the argument core and sub function list are merged
into self sub function list, it unset core list are merged
together and sub list are merged together """
for sub_fct in subgroup.sub_function_list:
self.add_sub_function(sub_fct)
for sub_fct in subgroup.core_function_list:
if demote_sub_core:
self.add_sub_function(sub_fct)
else:
self.add_core_function(sub_fct)
return self
def get_code_function_by_name(self, function_name):
for fct in self.core_function_list + self.sub_function_list:
if fct.name == function_name:
return fct
return None
| 41.640909 | 145 | 0.709966 | true | true | |
f7316a0076d2b4bf5804f2d9d837fa670e1f56a2 | 5,487 | py | Python | classic_tetris_project_django/settings.py | professor-l/classic-tetris-project | d171ab40c06b87ee945dce058babf2ed23dd3b88 | [
"MIT"
] | 17 | 2019-11-23T12:56:06.000Z | 2022-02-05T21:48:00.000Z | classic_tetris_project_django/settings.py | professor-l/classic-tetris-project | d171ab40c06b87ee945dce058babf2ed23dd3b88 | [
"MIT"
] | 43 | 2019-10-03T20:16:11.000Z | 2022-03-12T00:24:52.000Z | classic_tetris_project_django/settings.py | professor-l/classic-tetris-project | d171ab40c06b87ee945dce058babf2ed23dd3b88 | [
"MIT"
] | 17 | 2020-02-09T01:55:01.000Z | 2021-11-12T21:16:50.000Z | """
Django settings for classic_tetris_project_django project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import environ
import django
import os
ENV = environ.Env(
SECRET_KEY=(str, 'd0$j=wune9kn70srt1lt!g3a8fim7ug#j@x8+zmy0gi_mv7&dk'),
DEBUG=(bool, True),
DATABASE_URL=(str, 'sqlite:///db.sqlite3'),
CACHE_URL=(str, 'rediscache://'),
BASE_URL=(str, 'http://dev.monthlytetris.info:8000'),
DISCORD_USER_ID_WHITELIST=(list, []),
DISCORD_CHANNEL_MESSAGES=(bool, False),
ROLLBAR_ENABLED=(bool, False),
ROLLBAR_TOKEN=(str, ''),
)
environ.Env.read_env('.env')
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_URL = ENV('BASE_URL')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ENV('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = ENV('DEBUG')
ALLOWED_HOSTS = [
'ctm.gg',
'monthlytetris.info',
'monthlytetris.com',
]
if DEBUG:
ALLOWED_HOSTS.append('*')
# Application definition
INSTALLED_APPS = [
'classic_tetris_project.apps.ClassicTetrisProjectConfig',
'dal',
'dal_select2',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.redirects',
'django_extensions',
'django_object_actions',
'markdownx',
'adminsortable2',
'colorfield',
'webpack_loader',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
'rollbar.contrib.django.middleware.RollbarNotifierMiddleware',
]
ROOT_URLCONF = 'classic_tetris_project_django.urls'
LOGIN_URL = '/oauth/login/'
FORM_RENDERER = 'django.forms.renderers.TemplatesSetting'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, "classic_tetris_project", "templates"),
os.path.join(BASE_DIR, "classic_tetris_project", "private", "templates"),
os.path.join(django.__path__[0], "forms", "templates"),
],
'APP_DIRS': True,
'OPTIONS': {
'builtins': [
'classic_tetris_project.private.templatetags',
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'classic_tetris_project.private.context_processors.session_processor',
],
},
},
]
WSGI_APPLICATION = 'classic_tetris_project_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
**ENV.db(),
"ATOMIC_REQUESTS": True,
}
}
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
SITE_ID = 1
SHELL_PLUS_PRINT_SQL = True
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Expires after 180 days
SESSION_COOKIE_AGE = 180 * 24 * 60 * 60
MARKDOWNX_MARKDOWN_EXTENSIONS = [
'markdown.extensions.extra',
]
STATIC_URL = '/static/'
STATIC_ROOT = '/var/www/tetris/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'bundles/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
}
}
ROLLBAR = {
'access_token': ENV('ROLLBAR_TOKEN'),
'environment': 'development' if DEBUG else 'production',
'root': BASE_DIR,
'enabled': ENV('ROLLBAR_ENABLED'),
}
import rollbar
rollbar.init(**ROLLBAR)
CELERY_BROKER_URL = 'redis://127.0.0.1:6379/1',
| 26.253589 | 91 | 0.67906 |
import environ
import django
import os
ENV = environ.Env(
SECRET_KEY=(str, 'd0$j=wune9kn70srt1lt!g3a8fim7ug#j@x8+zmy0gi_mv7&dk'),
DEBUG=(bool, True),
DATABASE_URL=(str, 'sqlite:///db.sqlite3'),
CACHE_URL=(str, 'rediscache://'),
BASE_URL=(str, 'http://dev.monthlytetris.info:8000'),
DISCORD_USER_ID_WHITELIST=(list, []),
DISCORD_CHANNEL_MESSAGES=(bool, False),
ROLLBAR_ENABLED=(bool, False),
ROLLBAR_TOKEN=(str, ''),
)
environ.Env.read_env('.env')
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_URL = ENV('BASE_URL')
SECRET_KEY = ENV('SECRET_KEY')
DEBUG = ENV('DEBUG')
ALLOWED_HOSTS = [
'ctm.gg',
'monthlytetris.info',
'monthlytetris.com',
]
if DEBUG:
ALLOWED_HOSTS.append('*')
# Application definition
INSTALLED_APPS = [
'classic_tetris_project.apps.ClassicTetrisProjectConfig',
'dal',
'dal_select2',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.redirects',
'django_extensions',
'django_object_actions',
'markdownx',
'adminsortable2',
'colorfield',
'webpack_loader',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
'rollbar.contrib.django.middleware.RollbarNotifierMiddleware',
]
ROOT_URLCONF = 'classic_tetris_project_django.urls'
LOGIN_URL = '/oauth/login/'
FORM_RENDERER = 'django.forms.renderers.TemplatesSetting'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, "classic_tetris_project", "templates"),
os.path.join(BASE_DIR, "classic_tetris_project", "private", "templates"),
os.path.join(django.__path__[0], "forms", "templates"),
],
'APP_DIRS': True,
'OPTIONS': {
'builtins': [
'classic_tetris_project.private.templatetags',
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'classic_tetris_project.private.context_processors.session_processor',
],
},
},
]
WSGI_APPLICATION = 'classic_tetris_project_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
**ENV.db(),
"ATOMIC_REQUESTS": True,
}
}
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
SITE_ID = 1
SHELL_PLUS_PRINT_SQL = True
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Expires after 180 days
SESSION_COOKIE_AGE = 180 * 24 * 60 * 60
MARKDOWNX_MARKDOWN_EXTENSIONS = [
'markdown.extensions.extra',
]
STATIC_URL = '/static/'
STATIC_ROOT = '/var/www/tetris/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'bundles/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
}
}
ROLLBAR = {
'access_token': ENV('ROLLBAR_TOKEN'),
'environment': 'development' if DEBUG else 'production',
'root': BASE_DIR,
'enabled': ENV('ROLLBAR_ENABLED'),
}
import rollbar
rollbar.init(**ROLLBAR)
CELERY_BROKER_URL = 'redis://127.0.0.1:6379/1',
| true | true |
f7316a594c1c8ddc04649d2c3866818979f193b5 | 12,132 | py | Python | python/modprop/core/modules_core.py | Humhu/modprop | 0cff8240d5e1522f620de8004c22a74491a0c9fb | [
"AFL-3.0"
] | 1 | 2017-11-10T00:54:53.000Z | 2017-11-10T00:54:53.000Z | python/modprop/core/modules_core.py | Humhu/modprop | 0cff8240d5e1522f620de8004c22a74491a0c9fb | [
"AFL-3.0"
] | null | null | null | python/modprop/core/modules_core.py | Humhu/modprop | 0cff8240d5e1522f620de8004c22a74491a0c9fb | [
"AFL-3.0"
] | null | null | null | """This module contains base classes and types for creating new Modules and using module trees.
"""
import abc
from collections import deque
import numpy as np
class ModuleBase(object):
"""The base interface for all modules. Modules must inherit from this interface.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
self._inputs = []
self._outputs = []
def register_inputs(self, *args):
"""Registers inputs to this module.
Parameters
----------
inputs : Variable number of inputs to register.
"""
for arg in args:
if not isinstance(arg, InputPort):
raise ValueError('All inputs must be InputPort type')
self._inputs.append(arg)
def register_outputs(self, *args):
"""Registers outputs to this module.
Parameters
----------
outputs : Variable number of outputs to register.
"""
for arg in args:
if not isinstance(arg, OutputPort):
raise ValueError('All outputs must be OutputPort type')
self._outputs.append(arg)
def foreprop_ready(self):
"""Returns if the module is ready to forward-propagate.
Default implementation returns true when all inputs are ready and
not all outputs are set.
Returns
-------
ready : Boolean denoting if the module is ready to foreprop
"""
return all(self._inputs) and not all(self._outputs)
@abc.abstractmethod
def foreprop(self):
"""Perform forward-propagation for this module.
Returns
-------
ready : The aggregated return list from all forepropped output ports.
"""
return []
def backprop_ready(self):
"""Returns if the module is ready to backward-propagate.
Typically this is when all outputs have received all backprops.
Default implementation checks to see if all outputs are ready to backprop.
Returns
-------
ready : Boolean denoting if the module is ready to backprop
"""
return all([o.backprop_ready() for o in self._outputs])
@abc.abstractmethod
def backprop(self):
"""Perform backward-propagation for this module.
Returns
-------
ready : The aggregated return list from all backpropped input ports.
"""
return []
def is_invalid(self):
"""Returns if the module is fully invalidated.
Typically this is when all ports are invalidated.
Default implementation checks to see if all ports are invalidated.
Returns
-------
invalid : Boolean denoting if this module is fully invalid
"""
return not any(self._inputs) and not any(self._outputs)
def invalidate(self):
"""Invalidate this modules' inputs and outputs.
Default implementation first checks to see if the module is already invalid.
If it is not, it calls invalidate on all inputs and outputs.
Returns
-------
ready : List of modules to invalidate next.
"""
if self.is_invalid():
return []
ready = []
for i in self._inputs:
ready += i.invalidate()
for o in self._outputs:
ready += o.invalidate()
return ready
# TODO Ways to unregister port connections
class InputPort(object):
"""An input to a module. Ideally instantiated as a member of the module.
Parameters
----------
module : The owning module. Must implement the ModuleBase interface.
"""
def __init__(self, module):
if not isinstance(module, ModuleBase):
raise ValueError('module must implement ModuleBase')
self._module = module
self._value = None
self._source = None
def __nonzero__(self):
"""Override of Python boolean test operator to return if the port has a value.
Returns
-------
ready : Boolean denoting if the port has a valid value.
"""
return self._value is not None
def invalidate(self):
"""Invalidate this input port and propagate to the module and source.
Returns
-------
valid : List of modules to invalidate next.
"""
# If we're already invalidated, there's nothing for us to do here
if not self:
return []
self._value = None
valid = []
# If the owning module is not invalid, return it
if not self._module.is_invalid():
valid.append(self._module)
# Propagate invalidation to source
if self._source is not None:
valid += self._source.invalidate()
return valid
def foreprop(self, v):
"""Set this port's value and forward-propagate.
Typically only called by OutputPorts.
Parameters
----------
v : The value to set the port to.
Returns
-------
ready : List of modules to foreprop next.
"""
self._value = v
if self._module.foreprop_ready():
return [self._module]
else:
return []
def backprop(self, do_dx):
"""Give this port a backpropagation accumulator to pass on.
Typically called by the owning module.
Parameters
----------
do_dx : Numpy 2D array Jacobian[i,j] of tree outputs[i] w.r.t. this input port elements[j].
Returns
-------
ready : List of modules to backprop next.
"""
if self._source is not None:
return self._source.backprop(do_dx)
else:
return []
def register_source(self, src):
"""Register an OutputPort source for this port.
Parameters
----------
src : OutputPort to take as the source of this port.
"""
if not isinstance(src, OutputPort):
raise ValueError('src must be OutputPort')
self._source = src
@property
def value(self):
return self._value
class OutputPort(object):
"""An output from a module. Typically instantiated as a module member.
Parameters
----------
module : The owning module. Must implement the ModuleBase interface.
"""
def __init__(self, module):
if not isinstance(module, ModuleBase):
raise ValueError('module must implement ModuleBase')
self._module = module
self._backprop_acc = None
self._num_backs = 0
self._value = None
self._consumers = []
def __nonzero__(self):
"""Override of Python boolean test operator to return whether this port has a value.
"""
return self.value is not None
@property
def num_consumers(self):
"""Return the number of registered consumers.
"""
return len(self._consumers)
@property
def value(self):
return self._value
def register_consumer(self, con):
"""Register an InputPort consumer to this port.
"""
if not isinstance(con, InputPort):
raise ValueError('Consumer must be InputPort')
self._consumers.append(con)
def invalidate(self):
"""Invalidate this port and propagate.
Returns
-------
valid : List of modules to invalidate next
"""
# If we're already invalid, there's nothing to do
if not self:
return []
self._backprop_acc = None
self._num_backs = 0
self._value = None
valid = []
if not self._module.is_invalid():
valid.append(self._module)
for con in self._consumers:
valid += con.invalidate()
return valid
def foreprop(self, v):
"""Perform forward-propagation through this output.
Typically called by the owning module.
Parameters
----------
v : The value to set this port to.
Returns
-------
ready : List of modules to foreprop next.
"""
self._value = v
ready = []
for con in self._consumers:
ready += con.foreprop(self._value)
return ready
def backprop(self, do_dx):
"""Perform backward-propagation through this output.
Typically called by a connected InputPort.
Only propagates when data from all registered consumers is received.
Parameters
----------
do_dx : Numpy 2D array Jacobian[i,j] of tree outputs[i] w.r.t. this input port elements[j]
Returns
-------
ready : List of modules to backprop next
"""
if do_dx is None:
raise RuntimeError('OutputPort received None backprop value.')
do_dx.tick_descent()
if self._backprop_acc is None:
self._backprop_acc = do_dx
else:
self._backprop_acc += do_dx
self._num_backs += 1
# Check for backprop errors
if self._num_backs > len(self._consumers):
errstr = 'Received %d backprops for %d consumers!' % (self._num_backs, len(self._consumers))
raise RuntimeError(errstr)
# If we've heard from every consumer and our module is ready
if self.backprop_ready() and self._module.backprop_ready():
return [self._module]
else:
return []
def backprop_ready(self):
"""Returns if this port has heard from all its consumers.
"""
return self._num_backs == self.num_consumers
def chain_backprop(self, dy_dx=None):
"""Returns a copy of this port's backprop accumulator right-multiplied by the
given gradient. If the port has not received a backprop, returns None.
"""
if self._backprop_acc is None:
return None
#raise RuntimeError('Cannot chain backprop! Port has not received do_dx.')
out_acc = self._backprop_acc.copy()
if dy_dx is not None:
out_acc = out_acc * dy_dx
return out_acc
@property
def backprop_accumulator(self):
"""Returns the port's backprop accumulator.
"""
return self._backprop_acc
@property
def backprop_value(self):
if self._backprop_acc is None:
return 0
else:
return self._backprop_acc.retrieve()
def link_ports(in_port, out_port):
"""Join an input and output port together.
Parameters
----------
in_port : InputPort to join
out_port : OutputPort to join
"""
if not isinstance(in_port, InputPort):
raise ValueError('in_port must be an InputPort.')
if not isinstance(out_port, OutputPort):
raise ValueError('out_port must be an OutputPort.')
in_port.register_source(out_port)
out_port.register_consumer(in_port)
# @profile
def iterative_operation(init_module, op):
# TODO Allow taking list of initial modules
"""Iteratively perform an operation on a module tree.
This function should be used instead of recursive calls, which do not scale
to deep trees very well.
Parameters
----------
init_module : Module to begin iteration on
op : Function that takes a module and returns a list of modules to operate on next
"""
to_prop = deque()
to_prop.append(init_module)
while len(to_prop) > 0:
current = to_prop.popleft()
ready_children = op(current)
for c in ready_children:
to_prop.append(c)
def iterative_foreprop(init_module):
"""Iterative forward-pass propagation on a module tree.
"""
op = lambda x: x.foreprop()
iterative_operation(init_module, op)
def iterative_backprop(init_module):
"""Iterative backward-pass propagation on a module tree.
"""
op = lambda x: x.backprop()
iterative_operation(init_module, op)
def iterative_invalidate(init_module):
"""Iterative invalidation on a module tree.
"""
op = lambda x: x.invalidate()
iterative_operation(init_module, op)
| 28.817102 | 104 | 0.60089 | import abc
from collections import deque
import numpy as np
class ModuleBase(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
self._inputs = []
self._outputs = []
def register_inputs(self, *args):
for arg in args:
if not isinstance(arg, InputPort):
raise ValueError('All inputs must be InputPort type')
self._inputs.append(arg)
def register_outputs(self, *args):
for arg in args:
if not isinstance(arg, OutputPort):
raise ValueError('All outputs must be OutputPort type')
self._outputs.append(arg)
def foreprop_ready(self):
return all(self._inputs) and not all(self._outputs)
@abc.abstractmethod
def foreprop(self):
return []
def backprop_ready(self):
return all([o.backprop_ready() for o in self._outputs])
@abc.abstractmethod
def backprop(self):
return []
def is_invalid(self):
return not any(self._inputs) and not any(self._outputs)
def invalidate(self):
if self.is_invalid():
return []
ready = []
for i in self._inputs:
ready += i.invalidate()
for o in self._outputs:
ready += o.invalidate()
return ready
class InputPort(object):
def __init__(self, module):
if not isinstance(module, ModuleBase):
raise ValueError('module must implement ModuleBase')
self._module = module
self._value = None
self._source = None
def __nonzero__(self):
return self._value is not None
def invalidate(self):
if not self:
return []
self._value = None
valid = []
if not self._module.is_invalid():
valid.append(self._module)
if self._source is not None:
valid += self._source.invalidate()
return valid
def foreprop(self, v):
self._value = v
if self._module.foreprop_ready():
return [self._module]
else:
return []
def backprop(self, do_dx):
if self._source is not None:
return self._source.backprop(do_dx)
else:
return []
def register_source(self, src):
if not isinstance(src, OutputPort):
raise ValueError('src must be OutputPort')
self._source = src
@property
def value(self):
return self._value
class OutputPort(object):
def __init__(self, module):
if not isinstance(module, ModuleBase):
raise ValueError('module must implement ModuleBase')
self._module = module
self._backprop_acc = None
self._num_backs = 0
self._value = None
self._consumers = []
def __nonzero__(self):
return self.value is not None
@property
def num_consumers(self):
return len(self._consumers)
@property
def value(self):
return self._value
def register_consumer(self, con):
if not isinstance(con, InputPort):
raise ValueError('Consumer must be InputPort')
self._consumers.append(con)
def invalidate(self):
if not self:
return []
self._backprop_acc = None
self._num_backs = 0
self._value = None
valid = []
if not self._module.is_invalid():
valid.append(self._module)
for con in self._consumers:
valid += con.invalidate()
return valid
def foreprop(self, v):
self._value = v
ready = []
for con in self._consumers:
ready += con.foreprop(self._value)
return ready
def backprop(self, do_dx):
if do_dx is None:
raise RuntimeError('OutputPort received None backprop value.')
do_dx.tick_descent()
if self._backprop_acc is None:
self._backprop_acc = do_dx
else:
self._backprop_acc += do_dx
self._num_backs += 1
if self._num_backs > len(self._consumers):
errstr = 'Received %d backprops for %d consumers!' % (self._num_backs, len(self._consumers))
raise RuntimeError(errstr)
if self.backprop_ready() and self._module.backprop_ready():
return [self._module]
else:
return []
def backprop_ready(self):
return self._num_backs == self.num_consumers
def chain_backprop(self, dy_dx=None):
if self._backprop_acc is None:
return None
#raise RuntimeError('Cannot chain backprop! Port has not received do_dx.')
out_acc = self._backprop_acc.copy()
if dy_dx is not None:
out_acc = out_acc * dy_dx
return out_acc
@property
def backprop_accumulator(self):
return self._backprop_acc
@property
def backprop_value(self):
if self._backprop_acc is None:
return 0
else:
return self._backprop_acc.retrieve()
def link_ports(in_port, out_port):
if not isinstance(in_port, InputPort):
raise ValueError('in_port must be an InputPort.')
if not isinstance(out_port, OutputPort):
raise ValueError('out_port must be an OutputPort.')
in_port.register_source(out_port)
out_port.register_consumer(in_port)
# @profile
def iterative_operation(init_module, op):
# TODO Allow taking list of initial modules
to_prop = deque()
to_prop.append(init_module)
while len(to_prop) > 0:
current = to_prop.popleft()
ready_children = op(current)
for c in ready_children:
to_prop.append(c)
def iterative_foreprop(init_module):
op = lambda x: x.foreprop()
iterative_operation(init_module, op)
def iterative_backprop(init_module):
op = lambda x: x.backprop()
iterative_operation(init_module, op)
def iterative_invalidate(init_module):
op = lambda x: x.invalidate()
iterative_operation(init_module, op)
| true | true |
f7316ace9299b1066e1b315ba165fdf6fd372280 | 1,042 | py | Python | recipes/mojo.py | azunite/chrome_build | fed8b4e9c12aa9a0e33680e223b6327a65b2c268 | [
"BSD-3-Clause"
] | 10 | 2016-06-15T06:27:53.000Z | 2019-08-29T05:44:28.000Z | recipes/mojo.py | azunite/chrome_build | fed8b4e9c12aa9a0e33680e223b6327a65b2c268 | [
"BSD-3-Clause"
] | null | null | null | recipes/mojo.py | azunite/chrome_build | fed8b4e9c12aa9a0e33680e223b6327a65b2c268 | [
"BSD-3-Clause"
] | 19 | 2016-03-25T08:12:35.000Z | 2022-02-14T06:05:26.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class Mojo(recipe_util.Recipe):
"""Basic Recipe class for Mojo."""
@staticmethod
def fetch_spec(props):
url = 'https://github.com/domokit/mojo.git'
solution = {
'name' :'src',
'url' : url,
'deps_file': 'DEPS',
'managed' : False,
'custom_deps': {},
'safesync_url': '',
}
spec = {
'solutions': [solution],
}
if props.get('target_os'):
spec['target_os'] = props['target_os'].split(',')
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'src'
def main(argv=None):
return Mojo().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 22.170213 | 72 | 0.627639 |
import sys
import recipe_util
# pylint: disable=W0232
class Mojo(recipe_util.Recipe):
@staticmethod
def fetch_spec(props):
url = 'https://github.com/domokit/mojo.git'
solution = {
'name' :'src',
'url' : url,
'deps_file': 'DEPS',
'managed' : False,
'custom_deps': {},
'safesync_url': '',
}
spec = {
'solutions': [solution],
}
if props.get('target_os'):
spec['target_os'] = props['target_os'].split(',')
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'src'
def main(argv=None):
return Mojo().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| true | true |
f7316bd69a4ba467eed53268684ee4aaaa448a25 | 2,259 | py | Python | mmaction/models/builder.py | jiaoml1996/mmaction2 | cff4a9e196dfc7b7b0e842ab44f2a7f2573a2c7c | [
"Apache-2.0"
] | 1 | 2021-01-07T05:03:16.000Z | 2021-01-07T05:03:16.000Z | mmaction/models/builder.py | xumingze0308/mmaction2 | 777546f27f8f5a3c83e10d966e2149be2fc9fa31 | [
"Apache-2.0"
] | null | null | null | mmaction/models/builder.py | xumingze0308/mmaction2 | 777546f27f8f5a3c83e10d966e2149be2fc9fa31 | [
"Apache-2.0"
] | null | null | null | import warnings
import torch.nn as nn
from mmcv.utils import Registry, build_from_cfg
from .registry import BACKBONES, HEADS, LOCALIZERS, LOSSES, NECKS, RECOGNIZERS
try:
from mmdet.models.builder import DETECTORS, build_detector
except (ImportError, ModuleNotFoundError):
warnings.warn('Please install mmdet to use DETECTORS, build_detector')
# Define an empty registry and building func, so that can import
DETECTORS = Registry('detector')
def build_detector(cfg, train_cfg, test_cfg):
pass
def build(cfg, registry, default_args=None):
"""Build a module.
Args:
cfg (dict, list[dict]): The config of modules, it is either a dict
or a list of configs.
registry (:obj:`Registry`): A registry the module belongs to.
default_args (dict, optional): Default arguments to build the module.
Defaults to None.
Returns:
nn.Module: A built nn module.
"""
if isinstance(cfg, list):
modules = [
build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
]
return nn.Sequential(*modules)
return build_from_cfg(cfg, registry, default_args)
def build_backbone(cfg):
"""Build backbone."""
return build(cfg, BACKBONES)
def build_head(cfg):
"""Build head."""
return build(cfg, HEADS)
def build_recognizer(cfg, train_cfg=None, test_cfg=None):
"""Build recognizer."""
return build(cfg, RECOGNIZERS,
dict(train_cfg=train_cfg, test_cfg=test_cfg))
def build_loss(cfg):
"""Build loss."""
return build(cfg, LOSSES)
def build_localizer(cfg):
"""Build localizer."""
return build(cfg, LOCALIZERS)
def build_model(cfg, train_cfg=None, test_cfg=None):
"""Build model."""
args = cfg.copy()
obj_type = args.pop('type')
if obj_type in LOCALIZERS:
return build_localizer(cfg)
if obj_type in RECOGNIZERS:
return build_recognizer(cfg, train_cfg, test_cfg)
if obj_type in DETECTORS:
return build_detector(cfg, train_cfg, test_cfg)
raise ValueError(f'{obj_type} is not registered in '
'LOCALIZERS, RECOGNIZERS or DETECTORS')
def build_neck(cfg):
"""Build neck."""
return build(cfg, NECKS)
| 26.267442 | 78 | 0.666224 | import warnings
import torch.nn as nn
from mmcv.utils import Registry, build_from_cfg
from .registry import BACKBONES, HEADS, LOCALIZERS, LOSSES, NECKS, RECOGNIZERS
try:
from mmdet.models.builder import DETECTORS, build_detector
except (ImportError, ModuleNotFoundError):
warnings.warn('Please install mmdet to use DETECTORS, build_detector')
DETECTORS = Registry('detector')
def build_detector(cfg, train_cfg, test_cfg):
pass
def build(cfg, registry, default_args=None):
if isinstance(cfg, list):
modules = [
build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
]
return nn.Sequential(*modules)
return build_from_cfg(cfg, registry, default_args)
def build_backbone(cfg):
return build(cfg, BACKBONES)
def build_head(cfg):
return build(cfg, HEADS)
def build_recognizer(cfg, train_cfg=None, test_cfg=None):
return build(cfg, RECOGNIZERS,
dict(train_cfg=train_cfg, test_cfg=test_cfg))
def build_loss(cfg):
return build(cfg, LOSSES)
def build_localizer(cfg):
return build(cfg, LOCALIZERS)
def build_model(cfg, train_cfg=None, test_cfg=None):
args = cfg.copy()
obj_type = args.pop('type')
if obj_type in LOCALIZERS:
return build_localizer(cfg)
if obj_type in RECOGNIZERS:
return build_recognizer(cfg, train_cfg, test_cfg)
if obj_type in DETECTORS:
return build_detector(cfg, train_cfg, test_cfg)
raise ValueError(f'{obj_type} is not registered in '
'LOCALIZERS, RECOGNIZERS or DETECTORS')
def build_neck(cfg):
return build(cfg, NECKS)
| true | true |
f7316c5bc4aa4105269362035d277cc55ecd7b85 | 3,975 | py | Python | MyResumes/MyResumes/MyResumes/settings.py | githubError/MessyRepository | 2380ed13c167c5c6174f0e71c8dfc634318cda4f | [
"MIT"
] | 2 | 2018-03-12T08:01:47.000Z | 2018-03-12T08:06:14.000Z | MyResumes/MyResumes/MyResumes/settings.py | githubError/MessyRepository | 2380ed13c167c5c6174f0e71c8dfc634318cda4f | [
"MIT"
] | null | null | null | MyResumes/MyResumes/MyResumes/settings.py | githubError/MessyRepository | 2380ed13c167c5c6174f0e71c8dfc634318cda4f | [
"MIT"
] | 1 | 2019-11-06T15:58:05.000Z | 2019-11-06T15:58:05.000Z | """
Django settings for MyResumes project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lqpjra6xa@pm96&$y1xri(uau#vk2&)7b1hi6$k&v=zvne*o)%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'Resumes.apps.ResumesConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
CORS_ORIGIN_WHITELIST = (
'127.0.0.1:8000',
)
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MyResumes.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MyResumes.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME' : 'myresumes',
'USER' : 'root',
'PASSWORD' : 'root',
'HOST' : '140.143.249.103',
'PORT' : '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# email
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.163.com'
EMAIL_PORT = 465
EMAIL_HOST_USER = 'githuberror@163.com'
EMAIL_HOST_PASSWORD = 'cpf9401'
DEFAULT_CHARSET = 'utf-8'
EMAIL_USE_TLS = False
EMAIL_USE_SSL = True
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'static')
STATICFILES_DIRS = (
('css',os.path.join(STATIC_ROOT,'css').replace('\\','/') ),
('js',os.path.join(STATIC_ROOT,'js').replace('\\','/') ),
('images',os.path.join(STATIC_ROOT,'images').replace('\\','/') ),
('upload',os.path.join(STATIC_ROOT,'upload').replace('\\','/') ),
) | 25 | 91 | 0.683522 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'lqpjra6xa@pm96&$y1xri(uau#vk2&)7b1hi6$k&v=zvne*o)%'
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'Resumes.apps.ResumesConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
CORS_ORIGIN_WHITELIST = (
'127.0.0.1:8000',
)
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MyResumes.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MyResumes.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME' : 'myresumes',
'USER' : 'root',
'PASSWORD' : 'root',
'HOST' : '140.143.249.103',
'PORT' : '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# email
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.163.com'
EMAIL_PORT = 465
EMAIL_HOST_USER = 'githuberror@163.com'
EMAIL_HOST_PASSWORD = 'cpf9401'
DEFAULT_CHARSET = 'utf-8'
EMAIL_USE_TLS = False
EMAIL_USE_SSL = True
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'static')
STATICFILES_DIRS = (
('css',os.path.join(STATIC_ROOT,'css').replace('\\','/') ),
('js',os.path.join(STATIC_ROOT,'js').replace('\\','/') ),
('images',os.path.join(STATIC_ROOT,'images').replace('\\','/') ),
('upload',os.path.join(STATIC_ROOT,'upload').replace('\\','/') ),
) | true | true |
f7316c7ddaccd1a7c79dfb5e3ac9e15377fd1b26 | 4,228 | py | Python | test_tile/test_tile_magma.py | zbelateche/ee272_cgra | 4cf2e3cf4a4bdf585d87a9209a5bf252666bc6a2 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T02:57:12.000Z | 2020-07-23T02:57:12.000Z | test_tile/test_tile_magma.py | zbelateche/ee272_cgra | 4cf2e3cf4a4bdf585d87a9209a5bf252666bc6a2 | [
"BSD-3-Clause"
] | null | null | null | test_tile/test_tile_magma.py | zbelateche/ee272_cgra | 4cf2e3cf4a4bdf585d87a9209a5bf252666bc6a2 | [
"BSD-3-Clause"
] | 1 | 2021-04-27T23:13:43.000Z | 2021-04-27T23:13:43.000Z | from common.dummy_core_magma import DummyCore
from bit_vector import BitVector
from tile.tile_magma import Tile
from common.testers import BasicTester
import tempfile
from fault.random import random_bv
def check_all_config(tester,
tile_circ,
tile,
data_written,
inputs_applied):
for addr in data_written:
tester.config_read(addr)
expected_data = data_written[addr]
tester.expect(tile_circ.read_config_data, expected_data)
def test_tile():
core = DummyCore()
tile = Tile(core)
tile_circ = tile.circuit()
# No functional model for tile yet, so we have to use the
# standard fault tester for now
tester = BasicTester(tile_circ, tile_circ.clk, tile_circ.reset)
# assign the tile a random ID for configuration
tile_id = random_bv(16)
tester.poke(tile_circ.tile_id, tile_id)
tester.reset()
# Connect random vals to all tile inputs
inputs_applied = {}
for side_in in (tile_circ.north.I, tile_circ.south.I,
tile_circ.east.I, tile_circ.west.I):
for i in range(len(side_in.layer1)):
port = side_in.layer1[i]
rand_input = random_bv(1)
inputs_applied[port] = rand_input
tester.poke(port, rand_input)
for j in range(len(side_in.layer16)):
port = side_in.layer16[j]
rand_input = random_bv(16)
inputs_applied[port] = rand_input
tester.poke(port, rand_input)
# Write to all configuration registers in the tile
# This test should be applicapable to any tile, regardless
# of the core it's using
data_written = {}
for i, feat in enumerate(tile.features()):
feat_addr = BitVector(i, 8)
for reg in feat.registers.values():
reg_addr = BitVector(reg.addr, 8)
upper_config_addr = BitVector.concat(reg_addr, feat_addr)
config_addr = BitVector.concat(upper_config_addr, tile_id)
# Ensure the register is wide enough to contain the random value
rand_data = random_bv(reg.width)
# Further restrict random config data values based on feature
# Only 0-3 valid for SB config_data
if (feat == tile.sb):
if((reg_addr % 2) == 0):
rand_data = rand_data % 4
# Only 0-1 valid for SB regs
else:
rand_data = rand_data % 2
# Only 0-9 valid for CB config_data
elif (feat in tile.cbs):
rand_data = rand_data % 10
# Make sure we pass 32 bits of config data to configure
config_data = BitVector(rand_data, 32)
tester.configure(config_addr, config_data)
# Keep track of data written so we know what to expect to read back
data_written[config_addr] = config_data
# Now, read back all the configuration we just wrote
for addr in data_written:
tester.config_read(addr)
expected_data = data_written[addr]
tester.expect(tile_circ.read_config_data, expected_data)
feat_addr = addr[16:24]
reg_addr = addr[24:32]
check_all_config(tester,
tile_circ,
tile,
data_written,
inputs_applied)
# Try writing to tile with wrong tile id
for config_addr in data_written:
new_tile_id = config_addr[0:16] + 1
upper_config_addr = config_addr[16:32]
new_config_addr = BitVector.concat(upper_config_addr, new_tile_id)
random_data = random_bv(32)
tester.configure(new_config_addr, random_data)
# Read all the config back again to make sure nothing changed
check_all_config(tester,
tile_circ,
tile,
data_written,
inputs_applied)
with tempfile.TemporaryDirectory() as tempdir:
tester.compile_and_run(target="verilator",
magma_output="coreir-verilog",
directory=tempdir,
flags=["-Wno-fatal"])
| 38.436364 | 79 | 0.602176 | from common.dummy_core_magma import DummyCore
from bit_vector import BitVector
from tile.tile_magma import Tile
from common.testers import BasicTester
import tempfile
from fault.random import random_bv
def check_all_config(tester,
tile_circ,
tile,
data_written,
inputs_applied):
for addr in data_written:
tester.config_read(addr)
expected_data = data_written[addr]
tester.expect(tile_circ.read_config_data, expected_data)
def test_tile():
core = DummyCore()
tile = Tile(core)
tile_circ = tile.circuit()
tester = BasicTester(tile_circ, tile_circ.clk, tile_circ.reset)
tile_id = random_bv(16)
tester.poke(tile_circ.tile_id, tile_id)
tester.reset()
inputs_applied = {}
for side_in in (tile_circ.north.I, tile_circ.south.I,
tile_circ.east.I, tile_circ.west.I):
for i in range(len(side_in.layer1)):
port = side_in.layer1[i]
rand_input = random_bv(1)
inputs_applied[port] = rand_input
tester.poke(port, rand_input)
for j in range(len(side_in.layer16)):
port = side_in.layer16[j]
rand_input = random_bv(16)
inputs_applied[port] = rand_input
tester.poke(port, rand_input)
data_written = {}
for i, feat in enumerate(tile.features()):
feat_addr = BitVector(i, 8)
for reg in feat.registers.values():
reg_addr = BitVector(reg.addr, 8)
upper_config_addr = BitVector.concat(reg_addr, feat_addr)
config_addr = BitVector.concat(upper_config_addr, tile_id)
# Ensure the register is wide enough to contain the random value
rand_data = random_bv(reg.width)
# Further restrict random config data values based on feature
# Only 0-3 valid for SB config_data
if (feat == tile.sb):
if((reg_addr % 2) == 0):
rand_data = rand_data % 4
# Only 0-1 valid for SB regs
else:
rand_data = rand_data % 2
# Only 0-9 valid for CB config_data
elif (feat in tile.cbs):
rand_data = rand_data % 10
# Make sure we pass 32 bits of config data to configure
config_data = BitVector(rand_data, 32)
tester.configure(config_addr, config_data)
# Keep track of data written so we know what to expect to read back
data_written[config_addr] = config_data
# Now, read back all the configuration we just wrote
for addr in data_written:
tester.config_read(addr)
expected_data = data_written[addr]
tester.expect(tile_circ.read_config_data, expected_data)
feat_addr = addr[16:24]
reg_addr = addr[24:32]
check_all_config(tester,
tile_circ,
tile,
data_written,
inputs_applied)
# Try writing to tile with wrong tile id
for config_addr in data_written:
new_tile_id = config_addr[0:16] + 1
upper_config_addr = config_addr[16:32]
new_config_addr = BitVector.concat(upper_config_addr, new_tile_id)
random_data = random_bv(32)
tester.configure(new_config_addr, random_data)
# Read all the config back again to make sure nothing changed
check_all_config(tester,
tile_circ,
tile,
data_written,
inputs_applied)
with tempfile.TemporaryDirectory() as tempdir:
tester.compile_and_run(target="verilator",
magma_output="coreir-verilog",
directory=tempdir,
flags=["-Wno-fatal"])
| true | true |
f7316cf7b3b3e6fbabab8a174cf2ecb75444019b | 4,820 | py | Python | docs/conf.py | aladinoster/prjits_01_v2i | b6b3f96899d56c583c87098ea53ef008a8cb4365 | [
"MIT"
] | null | null | null | docs/conf.py | aladinoster/prjits_01_v2i | b6b3f96899d56c583c87098ea53ef008a8cb4365 | [
"MIT"
] | null | null | null | docs/conf.py | aladinoster/prjits_01_v2i | b6b3f96899d56c583c87098ea53ef008a8cb4365 | [
"MIT"
] | 1 | 2020-10-20T09:37:48.000Z | 2020-10-20T09:37:48.000Z | #!/usr/bin/env python
#
# connectv2x documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import connectv2x
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'connectv2x'
copyright = "2019, Andres Ladino"
author = "Andres Ladino"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = connectv2x.__version__
# The full version, including alpha/beta/rc tags.
release = connectv2x.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'connectv2xdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'connectv2x.tex',
'connectv2x Documentation',
'Andres Ladino', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'connectv2x',
'connectv2x Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'connectv2x',
'connectv2x Documentation',
author,
'connectv2x',
'One line description of project.',
'Miscellaneous'),
]
| 29.570552 | 77 | 0.686722 |
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import connectv2x
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'connectv2x'
copyright = "2019, Andres Ladino"
author = "Andres Ladino"
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = connectv2x.__version__
# The full version, including alpha/beta/rc tags.
release = connectv2x.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'connectv2xdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'connectv2x.tex',
'connectv2x Documentation',
'Andres Ladino', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'connectv2x',
'connectv2x Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'connectv2x',
'connectv2x Documentation',
author,
'connectv2x',
'One line description of project.',
'Miscellaneous'),
]
| true | true |
f7316dfaeb40184095a7f7a53a6f4baaf2fb85dd | 4,081 | py | Python | vcx/wrappers/python3/tests/test_wallet.py | absltkaos/indy-sdk | bc14c5b514dc1c76ce62dd7f6bf804120bf69f5e | [
"Apache-2.0"
] | 5 | 2018-04-09T12:26:28.000Z | 2019-06-12T01:45:30.000Z | vcx/wrappers/python3/tests/test_wallet.py | absltkaos/indy-sdk | bc14c5b514dc1c76ce62dd7f6bf804120bf69f5e | [
"Apache-2.0"
] | 9 | 2019-01-22T22:31:54.000Z | 2019-04-11T21:45:09.000Z | vcx/wrappers/python3/tests/test_wallet.py | absltkaos/indy-sdk | bc14c5b514dc1c76ce62dd7f6bf804120bf69f5e | [
"Apache-2.0"
] | 19 | 2018-04-25T16:08:43.000Z | 2022-01-11T10:18:38.000Z | import pytest
from vcx.error import VcxError, ErrorCode
from vcx.api.wallet import *
import json
TYPE = "record type"
EMPTY_TYPE = ""
ID = "123"
EMPTY_ID = ""
VALUE = "record value"
VALUE_NEW = "RecordValueNew"
EMPTY_VALUE = ""
TAGS = "{\"tagName1\":\"str1\",\"tagName2\":\"5\",\"tagName3\":\"12\"}"
OPTIONS = json.dumps({"retrieveType": True, "retrieveValue": True, "retrieveTags": True})
TAGS_EMPTY = ""
TAGS_EMPTY_JSON = "{}"
TAGS_MALFORMED_JSON = "{\"e\":}"
QUERY_JSON = {"tagName1": "str1"}
SEARCHED_RECORD = {
"id": "RecordId",
"type": None,
"value": "RecordValue",
"tags": TAGS
}
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_get_token_info():
info = await Wallet.get_token_info(0)
assert info
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_send_tokens():
receipt = await Wallet.send_tokens(0,1,"address")
assert receipt
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_create_payment_address():
address = await Wallet.create_payment_address()
assert address
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_create_payment_address_with_seed():
address = await Wallet.create_payment_address("0000000000000000000000WHATEVER00")
assert address
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_validate_payment_address():
await Wallet.validate_payment_address('sov:1:1234')
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_wallet_storage():
await Wallet.add_record(TYPE, ID, VALUE, TAGS)
await Wallet.update_record_value(TYPE, ID, VALUE_NEW)
await Wallet.update_record_tags(TYPE, ID, TAGS_EMPTY_JSON)
await Wallet.add_record_tags(TYPE, ID, TAGS)
await Wallet.delete_record_tags(TYPE, ID, ['one', 'two'])
await Wallet.delete_record(TYPE, ID)
record = {
"id": ID,
"type": TYPE,
"value": VALUE,
"tags": None,
}
assert (json.loads(await Wallet.get_record(TYPE, ID, OPTIONS)) == record)
@pytest.mark.asyncio
async def test_wallet_search():
search_handle = await Wallet.open_search(TYPE, QUERY_JSON, None)
assert (search_handle == 1)
searched_record = await Wallet.search_next_records(search_handle, 1)
assert (json.loads(searched_record) == SEARCHED_RECORD)
await Wallet.close_search(search_handle)
with pytest.raises(VcxError) as e:
await Wallet.export("/tmp/output.wallet", "backupKey")
@pytest.mark.asyncio
async def test_import_wallet_failures(vcx_init_test_mode, cleanup):
with pytest.raises(VcxError) as e:
await Wallet.import_wallet('Invalid Json')
assert ErrorCode.InvalidJson == e.value.error_code
cleanup(True)
config = {'wallet_name': '', 'wallet_key': '', 'exported_wallet_path': '', 'backup_key': ''}
with pytest.raises(VcxError) as e:
await Wallet.import_wallet(json.dumps(config))
assert ErrorCode.IOError == e.value.error_code
cleanup(True)
config = {'wallet_key': '', 'exported_wallet_path': '', 'backup_key': ''}
with pytest.raises(VcxError) as e:
await Wallet.import_wallet(json.dumps(config))
assert ErrorCode.MissingWalletName == e.value.error_code
cleanup(True)
config = {'wallet_name': '', 'exported_wallet_path': '', 'backup_key': ''}
with pytest.raises(VcxError) as e:
await Wallet.import_wallet(json.dumps(config))
assert ErrorCode.MissingWalletKey == e.value.error_code
cleanup(True)
config = {'wallet_name': '', 'wallet_key': '', 'backup_key': ''}
with pytest.raises(VcxError) as e:
await Wallet.import_wallet(json.dumps(config))
assert ErrorCode.MissingExportedWalletPath == e.value.error_code
cleanup(True)
config = {'wallet_name': '', 'wallet_key': '', 'exported_wallet_path': ''}
with pytest.raises(VcxError) as e:
await Wallet.import_wallet(json.dumps(config))
assert ErrorCode.MissingBackupKey == e.value.error_code
cleanup(True)
| 31.392308 | 96 | 0.708895 | import pytest
from vcx.error import VcxError, ErrorCode
from vcx.api.wallet import *
import json
TYPE = "record type"
EMPTY_TYPE = ""
ID = "123"
EMPTY_ID = ""
VALUE = "record value"
VALUE_NEW = "RecordValueNew"
EMPTY_VALUE = ""
TAGS = "{\"tagName1\":\"str1\",\"tagName2\":\"5\",\"tagName3\":\"12\"}"
OPTIONS = json.dumps({"retrieveType": True, "retrieveValue": True, "retrieveTags": True})
TAGS_EMPTY = ""
TAGS_EMPTY_JSON = "{}"
TAGS_MALFORMED_JSON = "{\"e\":}"
QUERY_JSON = {"tagName1": "str1"}
SEARCHED_RECORD = {
"id": "RecordId",
"type": None,
"value": "RecordValue",
"tags": TAGS
}
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_get_token_info():
info = await Wallet.get_token_info(0)
assert info
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_send_tokens():
receipt = await Wallet.send_tokens(0,1,"address")
assert receipt
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_create_payment_address():
address = await Wallet.create_payment_address()
assert address
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_create_payment_address_with_seed():
address = await Wallet.create_payment_address("0000000000000000000000WHATEVER00")
assert address
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_validate_payment_address():
await Wallet.validate_payment_address('sov:1:1234')
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_wallet_storage():
await Wallet.add_record(TYPE, ID, VALUE, TAGS)
await Wallet.update_record_value(TYPE, ID, VALUE_NEW)
await Wallet.update_record_tags(TYPE, ID, TAGS_EMPTY_JSON)
await Wallet.add_record_tags(TYPE, ID, TAGS)
await Wallet.delete_record_tags(TYPE, ID, ['one', 'two'])
await Wallet.delete_record(TYPE, ID)
record = {
"id": ID,
"type": TYPE,
"value": VALUE,
"tags": None,
}
assert (json.loads(await Wallet.get_record(TYPE, ID, OPTIONS)) == record)
@pytest.mark.asyncio
async def test_wallet_search():
search_handle = await Wallet.open_search(TYPE, QUERY_JSON, None)
assert (search_handle == 1)
searched_record = await Wallet.search_next_records(search_handle, 1)
assert (json.loads(searched_record) == SEARCHED_RECORD)
await Wallet.close_search(search_handle)
with pytest.raises(VcxError) as e:
await Wallet.export("/tmp/output.wallet", "backupKey")
@pytest.mark.asyncio
async def test_import_wallet_failures(vcx_init_test_mode, cleanup):
with pytest.raises(VcxError) as e:
await Wallet.import_wallet('Invalid Json')
assert ErrorCode.InvalidJson == e.value.error_code
cleanup(True)
config = {'wallet_name': '', 'wallet_key': '', 'exported_wallet_path': '', 'backup_key': ''}
with pytest.raises(VcxError) as e:
await Wallet.import_wallet(json.dumps(config))
assert ErrorCode.IOError == e.value.error_code
cleanup(True)
config = {'wallet_key': '', 'exported_wallet_path': '', 'backup_key': ''}
with pytest.raises(VcxError) as e:
await Wallet.import_wallet(json.dumps(config))
assert ErrorCode.MissingWalletName == e.value.error_code
cleanup(True)
config = {'wallet_name': '', 'exported_wallet_path': '', 'backup_key': ''}
with pytest.raises(VcxError) as e:
await Wallet.import_wallet(json.dumps(config))
assert ErrorCode.MissingWalletKey == e.value.error_code
cleanup(True)
config = {'wallet_name': '', 'wallet_key': '', 'backup_key': ''}
with pytest.raises(VcxError) as e:
await Wallet.import_wallet(json.dumps(config))
assert ErrorCode.MissingExportedWalletPath == e.value.error_code
cleanup(True)
config = {'wallet_name': '', 'wallet_key': '', 'exported_wallet_path': ''}
with pytest.raises(VcxError) as e:
await Wallet.import_wallet(json.dumps(config))
assert ErrorCode.MissingBackupKey == e.value.error_code
cleanup(True)
| true | true |
f73170c66a28ee836b140a20bd40414c0d6d7122 | 13,527 | py | Python | micropsi_server/usermanagement.py | joschabach/micropsi2 | 74a2642d20da9da1d64acc5e4c11aeabee192a27 | [
"MIT"
] | 119 | 2015-01-23T11:24:58.000Z | 2022-03-13T08:00:50.000Z | micropsi_server/usermanagement.py | Chediak/micropsi2 | 74a2642d20da9da1d64acc5e4c11aeabee192a27 | [
"MIT"
] | 9 | 2015-02-18T20:44:58.000Z | 2021-09-17T14:38:05.000Z | micropsi_server/usermanagement.py | Chediak/micropsi2 | 74a2642d20da9da1d64acc5e4c11aeabee192a27 | [
"MIT"
] | 34 | 2015-04-01T20:48:49.000Z | 2022-03-13T08:02:00.000Z | """
Very simple user management for the MicroPsi service
The user manager takes care of users, sessions and user roles.
Users without a password set can login with an arbitrary password, so make sure that users do not set empty passwords
if this concerns you.
When new users are created, they are given a role and stored along with their hashed password. Because we do not store
the password itself, it cannot be retrieved if it is lost. Instead, set a new password.
Users, including the admin user, must be logged in to receive a valid session token. The session token is valid until
the user logs off, or until it expires. To prevent expiration, it may be refreshed during each user interaction.
To check the permissions of a given user, you may use get_permissions_for_session_token. In return, the user manager
will return the rights matrix of the associated user, if the user is logged in, or the rights of a guest it the session
token does not correspond to an open session.
At the moment, persistence is achieved with a simple file, into which user and session data is dumped in json format.
Example usage:
>>> um = UserManager()
>>> um.create_user("eliza", "qwerty", "World Creator") # new user "eliza" with password "querty" as "World Creator"
>>> print um.list_users["eliza"]
{'is_active': False, 'role': 'World Creator'}
>>> elizas_token = um.start_session("eliza", "querty") # log in eliza (give this token to her)
>>> print um.list_users["eliza"]
{'is_active': True, 'role': 'World Creator'}
>>> print um.get_permissions(elizas_token)
set(['manage worlds', 'manage nodenets'])
>>> um.set_user_role('eliza', 'Administrator')
>>> print um.get_permissions(elizas_token)
Set(['manage users', 'manage worlds', 'manage nodenets'])
>>> um.end_session(elizas_token) # log off eliza
>>> print um.get_permissions(elizas_token)
{}
"""
__author__ = 'joscha'
__date__ = '11.05.12'
import json
import hashlib
import os
import datetime
import threading
import time
import uuid
import logging
import micropsi_core.tools
from configuration import config as cfg
ADMIN_USER = "admin" # default name of the admin user
DEFAULT_ROLE = "Restricted" # new users can create and edit nodenets, but not create worlds
IDLE_TIME_BEFORE_SESSION_EXPIRES = 360000 # after 100h idle time, expire the user session (but not the calculation)
TIME_INTERVAL_BETWEEN_EXPIRATION_CHECKS = 3600 # check every hour if we should log out users
USER_ROLES = { # sets of strings; each represents a permission.
"Administrator": {"manage users","manage worlds","manage nodenets", "manage server",
"create admin", "create restricted", "create full"},
"Full": {"manage worlds","manage nodenets", "manage server", "create full", "create restricted"},
"Restricted": {"manage nodenets", "create restricted"},
"Guest": {"create restricted"}
}
class UserManager(object):
"""The user manager creates, deletes and authenticates users.
It should be a singleton, because all user managers would use the same resources for maintaining persistence.
Attributes:
users: a dictionary of user_ids to user objects (containing session tokens, access role and hashed passwords)
sessions: a dictionary of active sessions for faster reference
user_file: the handle for the user data file
"""
def __init__(self, userfile_path=None):
"""initialize user management.
If no user data are found, a new resource file is created.
Parameters:
resource_path (optional): a path to store user data permanently.
"""
self.users = None
self.sessions = {}
# set up persistence
if userfile_path is None:
userfile_path = cfg['paths']['usermanager_path']
os.makedirs(os.path.dirname(userfile_path), exist_ok=True)
self.user_file_name = userfile_path # todo: make this work without a file system
try:
with open(self.user_file_name) as file:
self.users = json.load(file)
except ValueError:
logging.getLogger('system').warn("Invalid user data")
except IOError:
logging.getLogger('system').info("No readable userdata file, attempting to create one.")
if not self.users:
self.users = {}
# set up sessions
for name in self.users:
# compatibility for files before multi-session-feature
if "session_token" in self.users[name] and "sessions" not in self.users[name]:
self.users[name]["sessions"] = {
self.users[name]["session_token"]: {"expires": self.users[name]["session_expires"]}
}
for token in self.users[name]["sessions"]:
self.sessions[token] = name
# set up session cleanup
def _session_expiration():
while True:
self.check_for_expired_user_sessions()
time.sleep(TIME_INTERVAL_BETWEEN_EXPIRATION_CHECKS)
session_expiration_daemon = threading.Thread(target=_session_expiration)
session_expiration_daemon.daemon = True
session_expiration_daemon.start()
def __del__(self):
"""shut down user management"""
self.save_users()
def create_user(self, user_id, password="", role = DEFAULT_ROLE, uid = None):
"""create a new user.
Returns False if the creation was not successful.
Arguments:
user_id: a non-empty string which must be unique, used for display and urls
password: an arbitrary string
role: a string corresponding to a user role (such as "Administrator", or "Restricted")
uid: a string that acts as a unique, immutable handle (so we can store resources for this user)
"""
if user_id and user_id not in self.users:
self.users[user_id] = {
"uid": uid or user_id,
"hashed_password": hashlib.md5(password.encode('utf-8')).hexdigest(),
"role": role,
"sessions": {}
}
self.save_users()
return True
else:
return False
def save_users(self):
"""stores the user data to a file"""
with open(self.user_file_name, mode='w+') as file:
json.dump(self.users, file, indent=4)
def list_users(self):
"""returns a dictionary with all users currently known to the user manager for display purposes"""
return dict((name, {
"role": self.users[name]["role"],
"is_active": True if self.users[name]["sessions"] else False})
for name in self.users)
def set_user_id(self, user_id_old, user_id_new):
"""returns the new username if the user has been renamed successfully, the old username if the new one was
already in use, and None if the old username did not exist"""
if user_id_old in self.users:
if user_id_new not in self.users:
self.users[user_id_new] = self.users[user_id_old]
del self.users[user_id_old]
self.save_users()
return user_id_new
else:
return user_id_old
return None
def set_user_role(self, user_id, role):
"""sets the role, and thereby the permissions of a user, returns False if user does not exist"""
if user_id in self.users:
self.users[user_id]["role"] = role
self.save_users()
return True
return False
def set_user_password(self, user_id, password):
"""sets the password of a user, returns False if user does not exist"""
if user_id in self.users:
self.users[user_id]["hashed_password"] = hashlib.md5(password.encode('utf-8')).hexdigest()
self.save_users()
return True
return False
def delete_user(self, user_id):
"""deletes the specified user, returns True if successful"""
if user_id in self.users:
# if the user is still active, kill the session
for token in list(self.users[user_id]["sessions"].keys()):
self.end_session(token)
del self.users[user_id]
self.save_users()
return True
return False
def start_session(self, user_id, password=None, keep_logged_in_forever=True):
"""authenticates the specified user, returns session token if successful, or None if not.
Arguments:
user_id: a string that must be the id of an existing user
password (optional): checked against the stored password
keep_logged_in_forever (optional): if True, the session will not expire unless manually logging off
"""
if password is None or self.test_password(user_id, password):
session_token = str(uuid.UUID(bytes=os.urandom(16)))
self.users[user_id]["sessions"][session_token] = {
"expires": not keep_logged_in_forever
}
self.sessions[session_token] = user_id
if keep_logged_in_forever:
self.save_users()
else:
self.refresh_session(session_token)
return session_token
return None
def switch_user_for_session_token(self, user_id, session_token):
"""Ends the current session associated with the token, starts a new session for the supplied user,
and associates the same token to it. Used for allowing admins to take on the identity of a user, so they
can edit resources with the user credentials.
Returns True if successful, False if not.
Arguments:
user_id: a string that must be the id of an existing user
token: a valid session token
"""
if session_token in self.sessions and user_id in self.users:
current_user = self.sessions[session_token]
if current_user in self.users:
session = self.users[current_user]["sessions"][session_token]
del self.users[current_user]["sessions"][session_token]
self.users[user_id]["sessions"].update({
session_token: session
})
self.sessions[session_token] = user_id
self.refresh_session(session_token)
self.save_users()
return True
return False
def test_password(self, user_id, password):
"""returns True if the user is known and the password matches, False otherwise"""
if user_id in self.users:
if self.users[user_id]["hashed_password"] == hashlib.md5(password.encode('utf-8')).hexdigest():
return True
return False
def end_session(self, session_token):
"""ends the session associated with the given token"""
if session_token in self.sessions:
user_id = self.sessions[session_token]
del self.sessions[session_token]
if user_id in self.users:
del self.users[user_id]["sessions"][session_token]
def end_all_sessions(self):
"""useful during a reset of the runtime, because all open user sessions will persist during shutdown"""
sessions = self.sessions.copy()
for session_token in sessions:
self.end_session(session_token)
def refresh_session(self, session_token):
"""resets the idle time until a currently active session expires to some point in the future"""
if session_token in self.sessions:
user_id = self.sessions[session_token]
if self.users[user_id]["sessions"][session_token]["expires"]:
self.users[user_id]["sessions"][session_token]["expires"] = (datetime.datetime.now() + datetime.timedelta(
seconds=IDLE_TIME_BEFORE_SESSION_EXPIRES)).isoformat()
def check_for_expired_user_sessions(self):
"""removes all user sessions that have been idle for too long"""
change_flag = False
now = datetime.datetime.now().isoformat()
sessions = self.sessions.copy()
for session_token in sessions:
user_id = self.sessions[session_token]
expires = self.users[user_id]["sessions"][session_token]["expires"]
if expires and expires < now:
self.end_session(session_token)
change_flag = True
if change_flag:
self.save_users()
def get_permissions_for_session_token(self, session_token):
"""returns a set of permissions corresponding to the role of the user associated with the session;
if no session with that token exists, the Guest role permissions are returned.
Example usage:
if "create nodenets" in usermanager.get_permissions(my_session): ...
"""
if session_token in self.sessions:
user_id = self.sessions[session_token]
if user_id in self.users:
role = self.users[user_id]["role"]
if role in USER_ROLES:
return USER_ROLES[role]
return USER_ROLES["Guest"]
def get_user_id_for_session_token(self, session_token):
"""returns the id of the user associated with the session token, or 'Guest', if the token is invalid"""
if session_token in self.sessions:
return self.sessions[session_token]
else:
return "Guest"
| 42.537736 | 122 | 0.645524 |
__author__ = 'joscha'
__date__ = '11.05.12'
import json
import hashlib
import os
import datetime
import threading
import time
import uuid
import logging
import micropsi_core.tools
from configuration import config as cfg
ADMIN_USER = "admin"
DEFAULT_ROLE = "Restricted"
IDLE_TIME_BEFORE_SESSION_EXPIRES = 360000
TIME_INTERVAL_BETWEEN_EXPIRATION_CHECKS = 3600
USER_ROLES = {
"Administrator": {"manage users","manage worlds","manage nodenets", "manage server",
"create admin", "create restricted", "create full"},
"Full": {"manage worlds","manage nodenets", "manage server", "create full", "create restricted"},
"Restricted": {"manage nodenets", "create restricted"},
"Guest": {"create restricted"}
}
class UserManager(object):
def __init__(self, userfile_path=None):
self.users = None
self.sessions = {}
if userfile_path is None:
userfile_path = cfg['paths']['usermanager_path']
os.makedirs(os.path.dirname(userfile_path), exist_ok=True)
self.user_file_name = userfile_path
try:
with open(self.user_file_name) as file:
self.users = json.load(file)
except ValueError:
logging.getLogger('system').warn("Invalid user data")
except IOError:
logging.getLogger('system').info("No readable userdata file, attempting to create one.")
if not self.users:
self.users = {}
for name in self.users:
if "session_token" in self.users[name] and "sessions" not in self.users[name]:
self.users[name]["sessions"] = {
self.users[name]["session_token"]: {"expires": self.users[name]["session_expires"]}
}
for token in self.users[name]["sessions"]:
self.sessions[token] = name
def _session_expiration():
while True:
self.check_for_expired_user_sessions()
time.sleep(TIME_INTERVAL_BETWEEN_EXPIRATION_CHECKS)
session_expiration_daemon = threading.Thread(target=_session_expiration)
session_expiration_daemon.daemon = True
session_expiration_daemon.start()
def __del__(self):
self.save_users()
def create_user(self, user_id, password="", role = DEFAULT_ROLE, uid = None):
if user_id and user_id not in self.users:
self.users[user_id] = {
"uid": uid or user_id,
"hashed_password": hashlib.md5(password.encode('utf-8')).hexdigest(),
"role": role,
"sessions": {}
}
self.save_users()
return True
else:
return False
def save_users(self):
with open(self.user_file_name, mode='w+') as file:
json.dump(self.users, file, indent=4)
def list_users(self):
return dict((name, {
"role": self.users[name]["role"],
"is_active": True if self.users[name]["sessions"] else False})
for name in self.users)
def set_user_id(self, user_id_old, user_id_new):
if user_id_old in self.users:
if user_id_new not in self.users:
self.users[user_id_new] = self.users[user_id_old]
del self.users[user_id_old]
self.save_users()
return user_id_new
else:
return user_id_old
return None
def set_user_role(self, user_id, role):
if user_id in self.users:
self.users[user_id]["role"] = role
self.save_users()
return True
return False
def set_user_password(self, user_id, password):
if user_id in self.users:
self.users[user_id]["hashed_password"] = hashlib.md5(password.encode('utf-8')).hexdigest()
self.save_users()
return True
return False
def delete_user(self, user_id):
if user_id in self.users:
for token in list(self.users[user_id]["sessions"].keys()):
self.end_session(token)
del self.users[user_id]
self.save_users()
return True
return False
def start_session(self, user_id, password=None, keep_logged_in_forever=True):
if password is None or self.test_password(user_id, password):
session_token = str(uuid.UUID(bytes=os.urandom(16)))
self.users[user_id]["sessions"][session_token] = {
"expires": not keep_logged_in_forever
}
self.sessions[session_token] = user_id
if keep_logged_in_forever:
self.save_users()
else:
self.refresh_session(session_token)
return session_token
return None
def switch_user_for_session_token(self, user_id, session_token):
if session_token in self.sessions and user_id in self.users:
current_user = self.sessions[session_token]
if current_user in self.users:
session = self.users[current_user]["sessions"][session_token]
del self.users[current_user]["sessions"][session_token]
self.users[user_id]["sessions"].update({
session_token: session
})
self.sessions[session_token] = user_id
self.refresh_session(session_token)
self.save_users()
return True
return False
def test_password(self, user_id, password):
if user_id in self.users:
if self.users[user_id]["hashed_password"] == hashlib.md5(password.encode('utf-8')).hexdigest():
return True
return False
def end_session(self, session_token):
if session_token in self.sessions:
user_id = self.sessions[session_token]
del self.sessions[session_token]
if user_id in self.users:
del self.users[user_id]["sessions"][session_token]
def end_all_sessions(self):
sessions = self.sessions.copy()
for session_token in sessions:
self.end_session(session_token)
def refresh_session(self, session_token):
if session_token in self.sessions:
user_id = self.sessions[session_token]
if self.users[user_id]["sessions"][session_token]["expires"]:
self.users[user_id]["sessions"][session_token]["expires"] = (datetime.datetime.now() + datetime.timedelta(
seconds=IDLE_TIME_BEFORE_SESSION_EXPIRES)).isoformat()
def check_for_expired_user_sessions(self):
change_flag = False
now = datetime.datetime.now().isoformat()
sessions = self.sessions.copy()
for session_token in sessions:
user_id = self.sessions[session_token]
expires = self.users[user_id]["sessions"][session_token]["expires"]
if expires and expires < now:
self.end_session(session_token)
change_flag = True
if change_flag:
self.save_users()
def get_permissions_for_session_token(self, session_token):
if session_token in self.sessions:
user_id = self.sessions[session_token]
if user_id in self.users:
role = self.users[user_id]["role"]
if role in USER_ROLES:
return USER_ROLES[role]
return USER_ROLES["Guest"]
def get_user_id_for_session_token(self, session_token):
if session_token in self.sessions:
return self.sessions[session_token]
else:
return "Guest"
| true | true |
f731716e66ce6ce28fd803a90df2d90dd154013a | 1,402 | py | Python | iop_data_flow/footfall/01_footfall_clip.py | IaaC/MACT21.22_Digital_tools_Big_Data_part_2 | f0c50a5f7ac147f6e9753545767d2d9998075ebb | [
"Apache-2.0"
] | 1 | 2022-02-18T14:35:34.000Z | 2022-02-18T14:35:34.000Z | iop_data_flow/footfall/01_footfall_clip.py | IaaC/MACT21.22_Digital_tools_Big_Data_part_2 | f0c50a5f7ac147f6e9753545767d2d9998075ebb | [
"Apache-2.0"
] | null | null | null | iop_data_flow/footfall/01_footfall_clip.py | IaaC/MACT21.22_Digital_tools_Big_Data_part_2 | f0c50a5f7ac147f6e9753545767d2d9998075ebb | [
"Apache-2.0"
] | 1 | 2022-02-18T14:35:40.000Z | 2022-02-18T14:35:40.000Z | import os
import pandas as pd
import geopandas as gpd
## Config
# Number of rows to read
nrows = 1000
#nrows = 1000000
#nrows = None
# Output file path
day_num = 1
input_csv_filepath = f'../../data/footfall/footfall_20210217/day{day_num}Bcntrakingotherdays.csv'
# Clip mask file path
#clip_mask_filepath = '../../data/studio/clip_area/clip_darea.shp'
clip_mask_filepath = '../../data/footfall/aoi_glories.geojson'
# Output file path
output_file = f'ff-day{day_num}-clipped.shp'
output_folder = '../../data/studio/footfall/01_clipped/'
## Run
# Load csv all spain footfall
print(f"Load csv footfall : {input_csv_filepath}")
df = pd.read_csv(input_csv_filepath,
delimiter='|',
nrows=nrows)
# Convert it to geopandas
gdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.LONGITUDE, df.LATITUDE), crs='epsg:4326')
print(f"Footfall all: {len(gdf)} points")
# Load clip mask
mask_gdf = gpd.read_file(clip_mask_filepath)
mask_gdf = mask_gdf[mask_gdf['geometry'].notnull()]
# Clip it to district
gdf = gpd.clip(gdf, mask_gdf)
print(f"Footfall clipped district: {len(gdf)} points")
# Create output directory if it doesn't exist
if not os.path.exists(output_folder):
os.mkdir(output_folder)
output_fullpath = os.path.join(output_folder, output_file)
# Save clipped points
gdf.to_file(output_fullpath)
print(f"Saved shp footfall district: {output_fullpath}") | 26.961538 | 99 | 0.736091 | import os
import pandas as pd
import geopandas as gpd
= 1000
day_num = 1
input_csv_filepath = f'../../data/footfall/footfall_20210217/day{day_num}Bcntrakingotherdays.csv'
clip_mask_filepath = '../../data/footfall/aoi_glories.geojson'
output_file = f'ff-day{day_num}-clipped.shp'
output_folder = '../../data/studio/footfall/01_clipped/'
int(f"Load csv footfall : {input_csv_filepath}")
df = pd.read_csv(input_csv_filepath,
delimiter='|',
nrows=nrows)
gdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.LONGITUDE, df.LATITUDE), crs='epsg:4326')
print(f"Footfall all: {len(gdf)} points")
mask_gdf = gpd.read_file(clip_mask_filepath)
mask_gdf = mask_gdf[mask_gdf['geometry'].notnull()]
gdf = gpd.clip(gdf, mask_gdf)
print(f"Footfall clipped district: {len(gdf)} points")
if not os.path.exists(output_folder):
os.mkdir(output_folder)
output_fullpath = os.path.join(output_folder, output_file)
# Save clipped points
gdf.to_file(output_fullpath)
print(f"Saved shp footfall district: {output_fullpath}") | true | true |
f731716eb335aa711c9fca62072e00fad94f8a35 | 4,633 | py | Python | sis-api/swagger_server/models/address.py | maxbilbow/7054CEM-sis | 1c5067c9afc38e340fcce046048f8ae21d267365 | [
"MIT"
] | null | null | null | sis-api/swagger_server/models/address.py | maxbilbow/7054CEM-sis | 1c5067c9afc38e340fcce046048f8ae21d267365 | [
"MIT"
] | null | null | null | sis-api/swagger_server/models/address.py | maxbilbow/7054CEM-sis | 1c5067c9afc38e340fcce046048f8ae21d267365 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class Address(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id: int=None, number_or_name: str=None, street: str=None, town: str=None, county: str=None, postcode: str=None): # noqa: E501
"""Address - a model defined in Swagger
:param id: The id of this Address. # noqa: E501
:type id: int
:param number_or_name: The number_or_name of this Address. # noqa: E501
:type number_or_name: str
:param street: The street of this Address. # noqa: E501
:type street: str
:param town: The town of this Address. # noqa: E501
:type town: str
:param county: The county of this Address. # noqa: E501
:type county: str
:param postcode: The postcode of this Address. # noqa: E501
:type postcode: str
"""
self.swagger_types = {
'id': int,
'number_or_name': str,
'street': str,
'town': str,
'county': str,
'postcode': str
}
self.attribute_map = {
'id': 'id',
'number_or_name': 'number_or_name',
'street': 'street',
'town': 'town',
'county': 'county',
'postcode': 'postcode'
}
self._id = id
self._number_or_name = number_or_name
self._street = street
self._town = town
self._county = county
self._postcode = postcode
@classmethod
def from_dict(cls, dikt) -> 'Address':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Address of this Address. # noqa: E501
:rtype: Address
"""
return util.deserialize_model(dikt, cls)
@property
def id(self) -> int:
"""Gets the id of this Address.
:return: The id of this Address.
:rtype: int
"""
return self._id
@id.setter
def id(self, id: int):
"""Sets the id of this Address.
:param id: The id of this Address.
:type id: int
"""
self._id = id
@property
def number_or_name(self) -> str:
"""Gets the number_or_name of this Address.
:return: The number_or_name of this Address.
:rtype: str
"""
return self._number_or_name
@number_or_name.setter
def number_or_name(self, number_or_name: str):
"""Sets the number_or_name of this Address.
:param number_or_name: The number_or_name of this Address.
:type number_or_name: str
"""
self._number_or_name = number_or_name
@property
def street(self) -> str:
"""Gets the street of this Address.
:return: The street of this Address.
:rtype: str
"""
return self._street
@street.setter
def street(self, street: str):
"""Sets the street of this Address.
:param street: The street of this Address.
:type street: str
"""
self._street = street
@property
def town(self) -> str:
"""Gets the town of this Address.
:return: The town of this Address.
:rtype: str
"""
return self._town
@town.setter
def town(self, town: str):
"""Sets the town of this Address.
:param town: The town of this Address.
:type town: str
"""
self._town = town
@property
def county(self) -> str:
"""Gets the county of this Address.
:return: The county of this Address.
:rtype: str
"""
return self._county
@county.setter
def county(self, county: str):
"""Sets the county of this Address.
:param county: The county of this Address.
:type county: str
"""
self._county = county
@property
def postcode(self) -> str:
"""Gets the postcode of this Address.
:return: The postcode of this Address.
:rtype: str
"""
return self._postcode
@postcode.setter
def postcode(self, postcode: str):
"""Sets the postcode of this Address.
:param postcode: The postcode of this Address.
:type postcode: str
"""
self._postcode = postcode
| 24.005181 | 149 | 0.564429 |
from __future__ import absolute_import
from datetime import date, datetime
from typing import List, Dict
from swagger_server.models.base_model_ import Model
from swagger_server import util
class Address(Model):
def __init__(self, id: int=None, number_or_name: str=None, street: str=None, town: str=None, county: str=None, postcode: str=None):
self.swagger_types = {
'id': int,
'number_or_name': str,
'street': str,
'town': str,
'county': str,
'postcode': str
}
self.attribute_map = {
'id': 'id',
'number_or_name': 'number_or_name',
'street': 'street',
'town': 'town',
'county': 'county',
'postcode': 'postcode'
}
self._id = id
self._number_or_name = number_or_name
self._street = street
self._town = town
self._county = county
self._postcode = postcode
@classmethod
def from_dict(cls, dikt) -> 'Address':
return util.deserialize_model(dikt, cls)
@property
def id(self) -> int:
return self._id
@id.setter
def id(self, id: int):
self._id = id
@property
def number_or_name(self) -> str:
return self._number_or_name
@number_or_name.setter
def number_or_name(self, number_or_name: str):
self._number_or_name = number_or_name
@property
def street(self) -> str:
return self._street
@street.setter
def street(self, street: str):
self._street = street
@property
def town(self) -> str:
return self._town
@town.setter
def town(self, town: str):
self._town = town
@property
def county(self) -> str:
return self._county
@county.setter
def county(self, county: str):
self._county = county
@property
def postcode(self) -> str:
return self._postcode
@postcode.setter
def postcode(self, postcode: str):
self._postcode = postcode
| true | true |
f73171a82a9e8efd02676c00bb3724b2c05b4702 | 673 | py | Python | atlasbrief/mainsite/migrations/0002_auto_20180124_1611.py | joshr2020/Atlas-Brief | 4471102a7a4b5bf549ef044d7d7de939438011dd | [
"MIT"
] | 1 | 2018-08-20T19:02:00.000Z | 2018-08-20T19:02:00.000Z | atlasbrief/mainsite/migrations/0002_auto_20180124_1611.py | joshr2020/Atlas-Brief | 4471102a7a4b5bf549ef044d7d7de939438011dd | [
"MIT"
] | null | null | null | atlasbrief/mainsite/migrations/0002_auto_20180124_1611.py | joshr2020/Atlas-Brief | 4471102a7a4b5bf549ef044d7d7de939438011dd | [
"MIT"
] | null | null | null | # Generated by Django 2.0.1 on 2018-01-24 21:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mainsite', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='brief',
name='sources',
),
migrations.RemoveField(
model_name='tag',
name='kind',
),
migrations.AddField(
model_name='country',
name='tag',
field=models.OneToOneField(default=None, on_delete=django.db.models.deletion.CASCADE, to='mainsite.Tag'),
),
]
| 24.035714 | 117 | 0.576523 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mainsite', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='brief',
name='sources',
),
migrations.RemoveField(
model_name='tag',
name='kind',
),
migrations.AddField(
model_name='country',
name='tag',
field=models.OneToOneField(default=None, on_delete=django.db.models.deletion.CASCADE, to='mainsite.Tag'),
),
]
| true | true |
f73172073a5b7aa03cf878398739be9ce4b17eb0 | 9,088 | py | Python | src/ploomber/io/_commander.py | aadityasinha-dotcom/ploomber | ddbdb63bf7e92d4c48073893b5f54a5f59383291 | [
"Apache-2.0"
] | null | null | null | src/ploomber/io/_commander.py | aadityasinha-dotcom/ploomber | ddbdb63bf7e92d4c48073893b5f54a5f59383291 | [
"Apache-2.0"
] | null | null | null | src/ploomber/io/_commander.py | aadityasinha-dotcom/ploomber | ddbdb63bf7e92d4c48073893b5f54a5f59383291 | [
"Apache-2.0"
] | null | null | null | import os
import sys
import subprocess
import shutil
from pathlib import Path, PurePosixPath
from click import ClickException
from jinja2 import Environment, PackageLoader, StrictUndefined
from ploomber.io import TerminalWriter
def to_pascal_case(name):
return ''.join([w.capitalize() for w in name.split('_')])
def _delete(dst):
dst = Path(dst)
if dst.is_file():
dst.unlink()
if dst.is_dir():
shutil.rmtree(dst)
class CommanderException(ClickException):
"""
Exception raised when the workflow cannot proceed and require a fix
from the user. It is a subclass of ClickException, which signals the CLI
to hide the traceback
"""
pass
class CommanderStop(Exception):
"""
An exception that stops the execution of a commander without raising
an exception
"""
pass
class Commander:
"""Manage script workflows
"""
def __init__(self,
workspace=None,
templates_path=None,
environment_kwargs=None):
self.tw = TerminalWriter()
self.workspace = None if not workspace else Path(workspace).resolve()
self._to_delete = []
self._warnings = []
self._wd = Path('.').resolve()
if templates_path:
self._env = Environment(loader=PackageLoader(*templates_path),
undefined=StrictUndefined,
**(environment_kwargs or {}))
self._env.filters['to_pascal_case'] = to_pascal_case
else:
self._env = None
def run(self,
*cmd,
description=None,
capture_output=False,
expected_output=None,
error_message=None,
hint=None,
show_cmd=True):
"""Execute a command in a subprocess
Parameters
----------
*cmd
Command to execute
description: str, default=None
Label to display before executing the command
capture_output: bool, default=False
Captures output, otherwise prints to standard output and standard
error
expected_output: str, default=None
Raises a RuntimeError if the output is different than this value.
Only valid when capture_output=True
error_message: str, default=None
Error to display when expected_output does not match. If None,
a generic message is shown
hint: str, default=None
An optional string to show when at the end of the error when
the expected_output does not match. Used to hint the user how
to fix the problem
show_cmd : bool, default=True
Whether to display the command next to the description
(and error message if it fails) or not. Only valid when
description is not None
"""
cmd_str = ' '.join(cmd)
if expected_output is not None and not capture_output:
raise RuntimeError('capture_output must be True when '
'expected_output is not None')
if description:
header = f'{description}: {cmd_str}' if show_cmd else description
self.tw.sep('=', header, blue=True)
error = None
# py 3.6 compatibility: cannot use subprocess.run directly
# because the check_output arg was included until version 3.7
if not capture_output:
try:
result = subprocess.check_call(cmd)
except Exception as e:
error = e
# capture outpuut
else:
try:
result = subprocess.check_output(cmd)
except Exception as e:
error = e
else:
result = result.decode(sys.stdout.encoding)
if expected_output is not None:
error = result != expected_output
if error:
lines = []
if error_message:
line_first = error_message
else:
if show_cmd:
cmd_str = ' '.join(cmd)
line_first = ('An error occurred when executing '
f'command: {cmd_str}')
else:
line_first = 'An error occurred.'
lines.append(line_first)
if not capture_output:
lines.append(f'Original error message: {error}')
if hint:
lines.append(f'Hint: {hint}.')
raise CommanderException('\n'.join(lines))
else:
return result
def __enter__(self):
if self.workspace and not Path(self.workspace).exists():
Path(self.workspace).mkdir()
return self
def __exit__(self, exc_type, exc_value, traceback):
# move to the original working directory
os.chdir(self._wd)
self.rm(*self._to_delete)
supress = isinstance(exc_value, CommanderStop)
if supress:
self.info(str(exc_value))
self._warn_show()
return supress
def rm(self, *args):
"""Deletes all files/directories
Examples
--------
>>> cmdr.rm('file', 'directory') # doctest: +SKIP
"""
for f in args:
_delete(f)
def rm_on_exit(self, path):
"""Removes file upon exit
Examples
--------
>>> cmdr.rm_on_exit('some_temporary_file') # doctest: +SKIP
"""
self._to_delete.append(Path(path).resolve())
def copy_template(self, path, **render_kwargs):
"""Copy template to the workspace
Parameters
----------
path : str
Path to template (relative to templates path)
**render_kwargs
Keyword arguments passed to the template
Examples
--------
>>> # copies template in {templates-path}/directory/template.yaml
>>> # to {workspace}/template.yaml
>>> cmdr.copy_template('directory/template.yaml') # doctest: +SKIP
"""
dst = Path(self.workspace, PurePosixPath(path).name)
# This message is no longer valid since this is only called
# when there is no env yet
if dst.exists():
self.success(f'Using existing {path!s}...')
else:
self.info(f'Adding {dst!s}...')
dst.parent.mkdir(exist_ok=True, parents=True)
content = self._env.get_template(str(path)).render(**render_kwargs)
dst.write_text(content)
def cd(self, dir_):
"""Change current working directory
"""
os.chdir(dir_)
def cp(self, src):
"""
Copies a file or directory to the workspace, replacing it if necessary.
Deleted on exit.
Notes
-----
Used mainly for preparing Dockerfiles since they can only
copy from the current working directory
Examples
--------
>>> # copies dir/file to {workspace}/file
>>> cmdr.cp('dir/file') # doctest: +SKIP
"""
path = Path(src)
if not path.exists():
raise CommanderException(
f'Missing {src} file. Add it and try again.')
# convert to absolute to ensure we delete the right file on __exit__
dst = Path(self.workspace, path.name).resolve()
self._to_delete.append(dst)
_delete(dst)
if path.is_file():
shutil.copy(src, dst)
else:
shutil.copytree(src, dst)
def append_inline(self, line, dst):
"""Append line to a file
Parameters
----------
line : str
Line to append
dst : str
File to append (can be outside the workspace)
Examples
--------
>>> cmdr.append_inline('*.csv', '.gitignore') # doctest: +SKIP
"""
if not Path(dst).exists():
Path(dst).touch()
original = Path(dst).read_text()
Path(dst).write_text(original + '\n' + line + '\n')
def print(self, line):
"""Print message (no color)
"""
self.tw.write(f'{line}\n')
def success(self, line=None):
"""Print success message (green)
"""
self.tw.sep('=', line, green=True)
def info(self, line=None):
"""Print information message (blue)
"""
self.tw.sep('=', line, blue=True)
def warn(self, line=None):
"""Print warning (yellow)
"""
self.tw.sep('=', line, yellow=True)
def warn_on_exit(self, line):
"""Append a warning message to be displayed on exit
"""
self._warnings.append(line)
def _warn_show(self):
"""Display accumulated warning messages (added via .warn_on_exit)
"""
if self._warnings:
self.tw.sep('=', 'Warnings', yellow=True)
self.tw.write('\n\n'.join(self._warnings) + '\n')
self.tw.sep('=', yellow=True)
| 28.4 | 79 | 0.554027 | import os
import sys
import subprocess
import shutil
from pathlib import Path, PurePosixPath
from click import ClickException
from jinja2 import Environment, PackageLoader, StrictUndefined
from ploomber.io import TerminalWriter
def to_pascal_case(name):
return ''.join([w.capitalize() for w in name.split('_')])
def _delete(dst):
dst = Path(dst)
if dst.is_file():
dst.unlink()
if dst.is_dir():
shutil.rmtree(dst)
class CommanderException(ClickException):
pass
class CommanderStop(Exception):
pass
class Commander:
def __init__(self,
workspace=None,
templates_path=None,
environment_kwargs=None):
self.tw = TerminalWriter()
self.workspace = None if not workspace else Path(workspace).resolve()
self._to_delete = []
self._warnings = []
self._wd = Path('.').resolve()
if templates_path:
self._env = Environment(loader=PackageLoader(*templates_path),
undefined=StrictUndefined,
**(environment_kwargs or {}))
self._env.filters['to_pascal_case'] = to_pascal_case
else:
self._env = None
def run(self,
*cmd,
description=None,
capture_output=False,
expected_output=None,
error_message=None,
hint=None,
show_cmd=True):
cmd_str = ' '.join(cmd)
if expected_output is not None and not capture_output:
raise RuntimeError('capture_output must be True when '
'expected_output is not None')
if description:
header = f'{description}: {cmd_str}' if show_cmd else description
self.tw.sep('=', header, blue=True)
error = None
if not capture_output:
try:
result = subprocess.check_call(cmd)
except Exception as e:
error = e
else:
try:
result = subprocess.check_output(cmd)
except Exception as e:
error = e
else:
result = result.decode(sys.stdout.encoding)
if expected_output is not None:
error = result != expected_output
if error:
lines = []
if error_message:
line_first = error_message
else:
if show_cmd:
cmd_str = ' '.join(cmd)
line_first = ('An error occurred when executing '
f'command: {cmd_str}')
else:
line_first = 'An error occurred.'
lines.append(line_first)
if not capture_output:
lines.append(f'Original error message: {error}')
if hint:
lines.append(f'Hint: {hint}.')
raise CommanderException('\n'.join(lines))
else:
return result
def __enter__(self):
if self.workspace and not Path(self.workspace).exists():
Path(self.workspace).mkdir()
return self
def __exit__(self, exc_type, exc_value, traceback):
os.chdir(self._wd)
self.rm(*self._to_delete)
supress = isinstance(exc_value, CommanderStop)
if supress:
self.info(str(exc_value))
self._warn_show()
return supress
def rm(self, *args):
for f in args:
_delete(f)
def rm_on_exit(self, path):
self._to_delete.append(Path(path).resolve())
def copy_template(self, path, **render_kwargs):
dst = Path(self.workspace, PurePosixPath(path).name)
if dst.exists():
self.success(f'Using existing {path!s}...')
else:
self.info(f'Adding {dst!s}...')
dst.parent.mkdir(exist_ok=True, parents=True)
content = self._env.get_template(str(path)).render(**render_kwargs)
dst.write_text(content)
def cd(self, dir_):
os.chdir(dir_)
def cp(self, src):
path = Path(src)
if not path.exists():
raise CommanderException(
f'Missing {src} file. Add it and try again.')
dst = Path(self.workspace, path.name).resolve()
self._to_delete.append(dst)
_delete(dst)
if path.is_file():
shutil.copy(src, dst)
else:
shutil.copytree(src, dst)
def append_inline(self, line, dst):
if not Path(dst).exists():
Path(dst).touch()
original = Path(dst).read_text()
Path(dst).write_text(original + '\n' + line + '\n')
def print(self, line):
self.tw.write(f'{line}\n')
def success(self, line=None):
self.tw.sep('=', line, green=True)
def info(self, line=None):
self.tw.sep('=', line, blue=True)
def warn(self, line=None):
self.tw.sep('=', line, yellow=True)
def warn_on_exit(self, line):
self._warnings.append(line)
def _warn_show(self):
if self._warnings:
self.tw.sep('=', 'Warnings', yellow=True)
self.tw.write('\n\n'.join(self._warnings) + '\n')
self.tw.sep('=', yellow=True)
| true | true |
f73172862b0577c542d1d19db00a7e6b31dc2af6 | 101 | py | Python | python/scope/scope-3.py | trammell/test | ccac5e1dac947032e64d813e53cb961417a58d05 | [
"Artistic-2.0"
] | null | null | null | python/scope/scope-3.py | trammell/test | ccac5e1dac947032e64d813e53cb961417a58d05 | [
"Artistic-2.0"
] | null | null | null | python/scope/scope-3.py | trammell/test | ccac5e1dac947032e64d813e53cb961417a58d05 | [
"Artistic-2.0"
] | null | null | null | #!/usr/bin/env python2.4
# Example 3
i = 1
def h():
print i
i = 2
print i
h()
print i
| 7.769231 | 24 | 0.514851 |
i = 1
def h():
print i
i = 2
print i
h()
print i
| false | true |
f73173b0bf6552c411247cc25c457802bd9b31e5 | 15,273 | py | Python | zerogercrnn/experiments/ast_level/metrics.py | zerogerc/rnn-autocomplete | 39dc8dd7c431cb8ac9e15016388ec823771388e4 | [
"Apache-2.0"
] | 7 | 2019-02-27T09:48:39.000Z | 2021-11-30T19:01:01.000Z | zerogercrnn/experiments/ast_level/metrics.py | ZeRoGerc/rnn-autocomplete | 39dc8dd7c431cb8ac9e15016388ec823771388e4 | [
"Apache-2.0"
] | null | null | null | zerogercrnn/experiments/ast_level/metrics.py | ZeRoGerc/rnn-autocomplete | 39dc8dd7c431cb8ac9e15016388ec823771388e4 | [
"Apache-2.0"
] | null | null | null | import json
import os
import numpy as np
import torch
from zerogercrnn.lib.constants import EMPTY_TOKEN_ID, UNKNOWN_TOKEN_ID
from zerogercrnn.experiments.ast_level.utils import read_non_terminals
from zerogercrnn.lib.constants import EMPTY_TOKEN_ID, UNKNOWN_TOKEN_ID, EOF_TOKEN
from zerogercrnn.lib.metrics import Metrics, BaseAccuracyMetrics, IndexedAccuracyMetrics, MaxPredictionAccuracyMetrics, TopKAccuracy
class NonTerminalsMetricsWrapper(Metrics):
"""Metrics that extract non-terminals from target and pass non-terminals tensor to base metrics."""
def __init__(self, base: Metrics):
super().__init__()
self.base = base
def drop_state(self):
self.base.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
self.base.report((prediction, target.non_terminals))
def get_current_value(self, should_print=False):
return self.base.get_current_value(should_print)
def decrease_hits(self, number):
self.base.decrease_hits(number)
class SingleNonTerminalAccuracyMetrics(Metrics):
"""Metrics that show accuracies per non-terminal. It should not be used for plotting, but to
print results on console during model evaluation."""
def __init__(self, non_terminals_file, results_dir=None, group=False, dim=2):
"""
:param non_terminals_file: file with json of non-terminals
:param results_dir: where to save json with accuracies per non-terminal
:param dim: dimension to run max function on for predicted values
"""
super().__init__()
print('SingleNonTerminalAccuracyMetrics created!')
self.non_terminals = read_non_terminals(non_terminals_file)
self.non_terminals_number = len(self.non_terminals)
self.results_dir = results_dir
self.group = group
self.dim = dim
self.accuracies = [IndexedAccuracyMetrics(label='ERROR') for _ in self.non_terminals]
def drop_state(self):
for accuracy in self.accuracies:
accuracy.drop_state()
def report(self, data):
prediction, target = data
if self.dim is None:
predicted = prediction
else:
_, predicted = torch.max(prediction, dim=self.dim)
predicted = predicted.view(-1)
target = target.non_terminals.view(-1)
for cur in range(len(self.non_terminals)):
indices = (target == cur).nonzero().squeeze()
self.accuracies[cur].report(predicted, target, indices)
def get_current_value(self, should_print=False):
result = []
for cur in range(len(self.non_terminals)):
cur_accuracy = self.accuracies[cur].get_current_value(should_print=False)
result.append(cur_accuracy)
# if should_print:
# print('Accuracy on {} is {}'.format(self.non_terminals[cur], cur_accuracy))
self.save_to_file(result)
return 0 # this metrics if only for printing
def save_to_file(self, result):
if self.results_dir is not None:
if self.group:
nt, res = self.get_grouped_result()
else:
nt, res = self.non_terminals, result
with open(os.path.join(self.results_dir, 'nt_acc.txt'), mode='w') as f:
f.write(json.dumps(nt))
f.write('\n')
f.write(json.dumps(res))
def get_grouped_result(self):
"""Calc accuracies ignoring last two bits of information."""
nt = set()
hits = {}
misses = {}
for i in range(len(self.non_terminals)):
base = self.non_terminals[i]
if self.non_terminals[i] != EOF_TOKEN:
base = base[:-2] # remove last two bits
nt.add(base)
if base not in hits:
hits[base] = 0
if base not in misses:
misses[base] = 0
hits[base] += self.accuracies[i].metrics.hits
misses[base] += self.accuracies[i].metrics.misses
nt = sorted(list(nt))
result = []
nt.remove('Program')
nt.remove('AssignmentPattern')
for cur in nt:
if hits[cur] + misses[cur] == 0:
result.append(0)
else:
result.append(float(hits[cur]) / (hits[cur] + misses[cur]))
return nt, result
class TerminalAccuracyMetrics(Metrics):
def __init__(self, dim=2):
super().__init__()
self.dim = dim
self.general_accuracy = BaseAccuracyMetrics()
self.empty_accuracy = IndexedAccuracyMetrics(
label='Accuracy on terminals that ground truth is <empty>'
)
self.non_empty_accuracy = IndexedAccuracyMetrics(
label='Accuracy on terminals that ground truth is not <empty>'
)
self.ground_not_unk_accuracy = IndexedAccuracyMetrics(
label='Accuracy on terminals that ground truth is not <unk> (and ground truth is not <empty>)'
)
self.model_not_unk_accuracy = IndexedAccuracyMetrics(
label='Accuracy on terminals that model predicted to non <unk> (and ground truth is not <empty>)'
)
def drop_state(self):
self.general_accuracy.drop_state()
self.empty_accuracy.drop_state()
self.non_empty_accuracy.drop_state()
self.ground_not_unk_accuracy.drop_state()
self.model_not_unk_accuracy.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
_, predicted = torch.max(prediction, dim=self.dim)
predicted = predicted.view(-1)
target = target.view(-1)
self.general_accuracy.report((predicted, target))
if not self.is_train:
empty_indexes = torch.nonzero(target == 0).squeeze()
self.empty_accuracy.report(predicted, target, empty_indexes)
non_empty_indexes = torch.nonzero(target - EMPTY_TOKEN_ID).squeeze()
self.non_empty_accuracy.report(predicted, target, non_empty_indexes)
predicted = torch.index_select(predicted, 0, non_empty_indexes)
target = torch.index_select(target, 0, non_empty_indexes)
ground_not_unk_indexes = torch.nonzero(target - UNKNOWN_TOKEN_ID).squeeze()
self.ground_not_unk_accuracy.report(predicted, target, ground_not_unk_indexes)
model_not_unk_indexes = torch.nonzero(predicted - UNKNOWN_TOKEN_ID).squeeze()
self.model_not_unk_accuracy.report(predicted, target, model_not_unk_indexes)
def get_current_value(self, should_print=False):
general_accuracy = self.general_accuracy.get_current_value(should_print=should_print)
if (not self.is_train) and should_print:
self.empty_accuracy.get_current_value(should_print=True)
self.non_empty_accuracy.get_current_value(should_print=True)
self.ground_not_unk_accuracy.get_current_value(should_print=True)
self.model_not_unk_accuracy.get_current_value(should_print=True)
return general_accuracy
class NonTerminalTerminalAccuracyMetrics(Metrics):
def __init__(self):
super().__init__()
self.nt_accuracy = MaxPredictionAccuracyMetrics()
self.t_accuracy = MaxPredictionAccuracyMetrics()
def drop_state(self):
self.nt_accuracy.drop_state()
self.t_accuracy.drop_state()
def report(self, data):
nt_prediction, t_prediction, nt_target, t_target = data
self.nt_accuracy.report((nt_prediction, nt_target))
self.t_accuracy.report((t_prediction, t_target))
def get_current_value(self, should_print=False):
nt_value = self.nt_accuracy.get_current_value(should_print=False)
t_value = self.t_accuracy.get_current_value(should_print=False)
if should_print:
print('Non terminals accuracy: {}'.format(nt_value))
print('Terminals accuracy: {}'.format(t_value))
return nt_value, t_value
class LayeredNodeDepthsAttentionMetrics(Metrics):
"""Metrics that is able to visualize attention coefficient per node depths"""
def __init__(self):
super().__init__()
self.per_depth_attention_sum = np.zeros((50, 50))
self.per_depth_reports = np.zeros((50))
def drop_state(self):
pass
def report(self, node_depths, attention_coefficients):
for i in range(50):
index = torch.nonzero((node_depths == i))
if index.size()[0] == 0:
continue
selected_attention = torch.index_select(attention_coefficients, dim=0, index=index.squeeze())
selected_attention = selected_attention.squeeze(2)
to_report = torch.sum(selected_attention, dim=0).cpu().numpy()
self.per_depth_attention_sum[i] += to_report
self.per_depth_reports[i] += index.size()[0]
def get_current_value(self, should_print=False):
for i in range(50):
if abs(self.per_depth_reports[i]) > 1e-6:
self.per_depth_attention_sum[i] /= self.per_depth_reports[i]
np.save('eval/temp/attention/per_depth_matrix', self.per_depth_attention_sum)
return 0 # this metrics is only for saving results to file.
class PerNtAttentionMetrics(Metrics):
def __init__(self):
super().__init__()
def report(self, current_input, attention_coefficients):
nt_ids = torch.argmax(current_input, dim=-1)
# for i in range(97): # TODO: check
# index = torch.nonzero((nt_ids == i))
# if index.size()[0] == 0:
# continue
# selected_attention = torch.index_select(attention_coefficients, dim=0, index=index.squeeze())
# selected_attention = selected_attention.squeeze(2)
# to_report = torch.sum(selected_attention, dim=0).cpu().numpy()
# self.per_depth_attention_sum[i] += to_report
# self.per_depth_reports[i] += index.size()[0]
def drop_state(self):
pass
def get_current_value(self, should_print=False):
pass
class EmptyNonEmptyWrapper(Metrics):
def __init__(self, non_emp_base: Metrics, with_emp_base:Metrics):
super().__init__()
self.non_emp_base = non_emp_base
self.with_emp_base = with_emp_base
def drop_state(self):
self.non_emp_base.drop_state()
self.with_emp_base.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
prediction = prediction.view(-1)
target = target.view(-1)
self.with_emp_base.report((prediction, target))
non_emp_indices = (target != EMPTY_TOKEN_ID).nonzero().squeeze()
prediction = torch.index_select(prediction, 0, non_emp_indices)
target = torch.index_select(target, 0, non_emp_indices)
self.non_emp_base.report((prediction, target))
def get_current_value(self, should_print=False):
print('Non Empty')
self.non_emp_base.get_current_value(should_print=should_print)
print('With Empty')
self.with_emp_base.get_current_value(should_print=should_print)
class EmptyNonEmptyTerminalTopKAccuracyWrapper(Metrics):
def __init__(self):
super().__init__()
self.non_emp_base = TopKAccuracy(k=5)
self.with_emp_base = TopKAccuracy(k=5)
def drop_state(self):
self.non_emp_base.drop_state()
self.with_emp_base.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
prediction = prediction.view(-1, prediction.size()[-1])
target = target.view(-1)
self.with_emp_base.report((prediction, target))
non_emp_indices = (target != EMPTY_TOKEN_ID).nonzero().squeeze()
prediction = torch.index_select(prediction, 0, non_emp_indices)
target = torch.index_select(target, 0, non_emp_indices)
self.non_emp_base.report((prediction, target))
def get_current_value(self, should_print=False):
print('Non Empty')
self.non_emp_base.get_current_value(should_print=should_print)
print('With Empty')
self.with_emp_base.get_current_value(should_print=should_print)
# class AggregatedTerminalTopKMetrics(Metrics):
#
# def __init__(self, k):
# super().__init__()
# self.k = k
# self.common = BaseAccuracyMetrics()
# self.target_non_unk = Top
# self.prediction_non_unk = IndexedAccuracyMetrics('Prediction not unk')
#
# def drop_state(self):
# self.common.drop_state()
# self.target_non_unk.drop_state()
# self.prediction_non_unk.drop_state()
#
# def report(self, prediction_target):
# prediction, target = prediction_target
# prediction = prediction.view(-1)
# target = target.view(-1)
#
# self.common.report((prediction, target))
#
# pred_non_unk_indices = (prediction != UNKNOWN_TOKEN_ID).nonzero().squeeze()
# target_non_unk_indices = (target != UNKNOWN_TOKEN_ID).nonzero().squeeze()
#
# self.prediction_non_unk.report(prediction, target, pred_non_unk_indices)
# self.target_non_unk.report(prediction, target, target_non_unk_indices)
#
# def get_current_value(self, should_print=False):
# print('P(hat(t) == t) = {}'.format(self.common.get_current_value(False)))
# print('P(hat(t) == t && hat(t) != unk) = {}'.format(self.prediction_non_unk.metrics.hits / (self.common.hits + self.common.misses)))
# print('P(hat(t) == t | t != unk) = {}'.format(self.target_non_unk.get_current_value(False)))
# print('P(hat(t) == t | hat(t) != unk) = {}'.format(self.prediction_non_unk.get_current_value(False)))
class AggregatedTerminalMetrics(Metrics):
def __init__(self):
super().__init__()
self.common = BaseAccuracyMetrics()
self.target_non_unk = IndexedAccuracyMetrics('Target not unk')
self.prediction_non_unk = IndexedAccuracyMetrics('Prediction not unk')
def drop_state(self):
self.common.drop_state()
self.target_non_unk.drop_state()
self.prediction_non_unk.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
prediction = prediction.view(-1)
target = target.view(-1)
self.common.report((prediction, target))
pred_non_unk_indices = (prediction != UNKNOWN_TOKEN_ID).nonzero().squeeze()
target_non_unk_indices = (target != UNKNOWN_TOKEN_ID).nonzero().squeeze()
self.prediction_non_unk.report(prediction, target, pred_non_unk_indices)
self.target_non_unk.report(prediction, target, target_non_unk_indices)
def get_current_value(self, should_print=False):
print('P(hat(t) == t) = {}'.format(self.common.get_current_value(False)))
print('P(hat(t) == t && hat(t) != unk) = {}'.format(self.prediction_non_unk.metrics.hits / (self.common.hits + self.common.misses)))
print('P(hat(t) == t | t != unk) = {}'.format(self.target_non_unk.get_current_value(False)))
print('P(hat(t) == t | hat(t) != unk) = {}'.format(self.prediction_non_unk.get_current_value(False)))
| 38.568182 | 142 | 0.659726 | import json
import os
import numpy as np
import torch
from zerogercrnn.lib.constants import EMPTY_TOKEN_ID, UNKNOWN_TOKEN_ID
from zerogercrnn.experiments.ast_level.utils import read_non_terminals
from zerogercrnn.lib.constants import EMPTY_TOKEN_ID, UNKNOWN_TOKEN_ID, EOF_TOKEN
from zerogercrnn.lib.metrics import Metrics, BaseAccuracyMetrics, IndexedAccuracyMetrics, MaxPredictionAccuracyMetrics, TopKAccuracy
class NonTerminalsMetricsWrapper(Metrics):
def __init__(self, base: Metrics):
super().__init__()
self.base = base
def drop_state(self):
self.base.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
self.base.report((prediction, target.non_terminals))
def get_current_value(self, should_print=False):
return self.base.get_current_value(should_print)
def decrease_hits(self, number):
self.base.decrease_hits(number)
class SingleNonTerminalAccuracyMetrics(Metrics):
def __init__(self, non_terminals_file, results_dir=None, group=False, dim=2):
super().__init__()
print('SingleNonTerminalAccuracyMetrics created!')
self.non_terminals = read_non_terminals(non_terminals_file)
self.non_terminals_number = len(self.non_terminals)
self.results_dir = results_dir
self.group = group
self.dim = dim
self.accuracies = [IndexedAccuracyMetrics(label='ERROR') for _ in self.non_terminals]
def drop_state(self):
for accuracy in self.accuracies:
accuracy.drop_state()
def report(self, data):
prediction, target = data
if self.dim is None:
predicted = prediction
else:
_, predicted = torch.max(prediction, dim=self.dim)
predicted = predicted.view(-1)
target = target.non_terminals.view(-1)
for cur in range(len(self.non_terminals)):
indices = (target == cur).nonzero().squeeze()
self.accuracies[cur].report(predicted, target, indices)
def get_current_value(self, should_print=False):
result = []
for cur in range(len(self.non_terminals)):
cur_accuracy = self.accuracies[cur].get_current_value(should_print=False)
result.append(cur_accuracy)
self.save_to_file(result)
return 0
def save_to_file(self, result):
if self.results_dir is not None:
if self.group:
nt, res = self.get_grouped_result()
else:
nt, res = self.non_terminals, result
with open(os.path.join(self.results_dir, 'nt_acc.txt'), mode='w') as f:
f.write(json.dumps(nt))
f.write('\n')
f.write(json.dumps(res))
def get_grouped_result(self):
nt = set()
hits = {}
misses = {}
for i in range(len(self.non_terminals)):
base = self.non_terminals[i]
if self.non_terminals[i] != EOF_TOKEN:
base = base[:-2]
nt.add(base)
if base not in hits:
hits[base] = 0
if base not in misses:
misses[base] = 0
hits[base] += self.accuracies[i].metrics.hits
misses[base] += self.accuracies[i].metrics.misses
nt = sorted(list(nt))
result = []
nt.remove('Program')
nt.remove('AssignmentPattern')
for cur in nt:
if hits[cur] + misses[cur] == 0:
result.append(0)
else:
result.append(float(hits[cur]) / (hits[cur] + misses[cur]))
return nt, result
class TerminalAccuracyMetrics(Metrics):
def __init__(self, dim=2):
super().__init__()
self.dim = dim
self.general_accuracy = BaseAccuracyMetrics()
self.empty_accuracy = IndexedAccuracyMetrics(
label='Accuracy on terminals that ground truth is <empty>'
)
self.non_empty_accuracy = IndexedAccuracyMetrics(
label='Accuracy on terminals that ground truth is not <empty>'
)
self.ground_not_unk_accuracy = IndexedAccuracyMetrics(
label='Accuracy on terminals that ground truth is not <unk> (and ground truth is not <empty>)'
)
self.model_not_unk_accuracy = IndexedAccuracyMetrics(
label='Accuracy on terminals that model predicted to non <unk> (and ground truth is not <empty>)'
)
def drop_state(self):
self.general_accuracy.drop_state()
self.empty_accuracy.drop_state()
self.non_empty_accuracy.drop_state()
self.ground_not_unk_accuracy.drop_state()
self.model_not_unk_accuracy.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
_, predicted = torch.max(prediction, dim=self.dim)
predicted = predicted.view(-1)
target = target.view(-1)
self.general_accuracy.report((predicted, target))
if not self.is_train:
empty_indexes = torch.nonzero(target == 0).squeeze()
self.empty_accuracy.report(predicted, target, empty_indexes)
non_empty_indexes = torch.nonzero(target - EMPTY_TOKEN_ID).squeeze()
self.non_empty_accuracy.report(predicted, target, non_empty_indexes)
predicted = torch.index_select(predicted, 0, non_empty_indexes)
target = torch.index_select(target, 0, non_empty_indexes)
ground_not_unk_indexes = torch.nonzero(target - UNKNOWN_TOKEN_ID).squeeze()
self.ground_not_unk_accuracy.report(predicted, target, ground_not_unk_indexes)
model_not_unk_indexes = torch.nonzero(predicted - UNKNOWN_TOKEN_ID).squeeze()
self.model_not_unk_accuracy.report(predicted, target, model_not_unk_indexes)
def get_current_value(self, should_print=False):
general_accuracy = self.general_accuracy.get_current_value(should_print=should_print)
if (not self.is_train) and should_print:
self.empty_accuracy.get_current_value(should_print=True)
self.non_empty_accuracy.get_current_value(should_print=True)
self.ground_not_unk_accuracy.get_current_value(should_print=True)
self.model_not_unk_accuracy.get_current_value(should_print=True)
return general_accuracy
class NonTerminalTerminalAccuracyMetrics(Metrics):
def __init__(self):
super().__init__()
self.nt_accuracy = MaxPredictionAccuracyMetrics()
self.t_accuracy = MaxPredictionAccuracyMetrics()
def drop_state(self):
self.nt_accuracy.drop_state()
self.t_accuracy.drop_state()
def report(self, data):
nt_prediction, t_prediction, nt_target, t_target = data
self.nt_accuracy.report((nt_prediction, nt_target))
self.t_accuracy.report((t_prediction, t_target))
def get_current_value(self, should_print=False):
nt_value = self.nt_accuracy.get_current_value(should_print=False)
t_value = self.t_accuracy.get_current_value(should_print=False)
if should_print:
print('Non terminals accuracy: {}'.format(nt_value))
print('Terminals accuracy: {}'.format(t_value))
return nt_value, t_value
class LayeredNodeDepthsAttentionMetrics(Metrics):
def __init__(self):
super().__init__()
self.per_depth_attention_sum = np.zeros((50, 50))
self.per_depth_reports = np.zeros((50))
def drop_state(self):
pass
def report(self, node_depths, attention_coefficients):
for i in range(50):
index = torch.nonzero((node_depths == i))
if index.size()[0] == 0:
continue
selected_attention = torch.index_select(attention_coefficients, dim=0, index=index.squeeze())
selected_attention = selected_attention.squeeze(2)
to_report = torch.sum(selected_attention, dim=0).cpu().numpy()
self.per_depth_attention_sum[i] += to_report
self.per_depth_reports[i] += index.size()[0]
def get_current_value(self, should_print=False):
for i in range(50):
if abs(self.per_depth_reports[i]) > 1e-6:
self.per_depth_attention_sum[i] /= self.per_depth_reports[i]
np.save('eval/temp/attention/per_depth_matrix', self.per_depth_attention_sum)
return 0
class PerNtAttentionMetrics(Metrics):
def __init__(self):
super().__init__()
def report(self, current_input, attention_coefficients):
nt_ids = torch.argmax(current_input, dim=-1)
def drop_state(self):
pass
def get_current_value(self, should_print=False):
pass
class EmptyNonEmptyWrapper(Metrics):
def __init__(self, non_emp_base: Metrics, with_emp_base:Metrics):
super().__init__()
self.non_emp_base = non_emp_base
self.with_emp_base = with_emp_base
def drop_state(self):
self.non_emp_base.drop_state()
self.with_emp_base.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
prediction = prediction.view(-1)
target = target.view(-1)
self.with_emp_base.report((prediction, target))
non_emp_indices = (target != EMPTY_TOKEN_ID).nonzero().squeeze()
prediction = torch.index_select(prediction, 0, non_emp_indices)
target = torch.index_select(target, 0, non_emp_indices)
self.non_emp_base.report((prediction, target))
def get_current_value(self, should_print=False):
print('Non Empty')
self.non_emp_base.get_current_value(should_print=should_print)
print('With Empty')
self.with_emp_base.get_current_value(should_print=should_print)
class EmptyNonEmptyTerminalTopKAccuracyWrapper(Metrics):
def __init__(self):
super().__init__()
self.non_emp_base = TopKAccuracy(k=5)
self.with_emp_base = TopKAccuracy(k=5)
def drop_state(self):
self.non_emp_base.drop_state()
self.with_emp_base.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
prediction = prediction.view(-1, prediction.size()[-1])
target = target.view(-1)
self.with_emp_base.report((prediction, target))
non_emp_indices = (target != EMPTY_TOKEN_ID).nonzero().squeeze()
prediction = torch.index_select(prediction, 0, non_emp_indices)
target = torch.index_select(target, 0, non_emp_indices)
self.non_emp_base.report((prediction, target))
def get_current_value(self, should_print=False):
print('Non Empty')
self.non_emp_base.get_current_value(should_print=should_print)
print('With Empty')
self.with_emp_base.get_current_value(should_print=should_print)
class AggregatedTerminalMetrics(Metrics):
def __init__(self):
super().__init__()
self.common = BaseAccuracyMetrics()
self.target_non_unk = IndexedAccuracyMetrics('Target not unk')
self.prediction_non_unk = IndexedAccuracyMetrics('Prediction not unk')
def drop_state(self):
self.common.drop_state()
self.target_non_unk.drop_state()
self.prediction_non_unk.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
prediction = prediction.view(-1)
target = target.view(-1)
self.common.report((prediction, target))
pred_non_unk_indices = (prediction != UNKNOWN_TOKEN_ID).nonzero().squeeze()
target_non_unk_indices = (target != UNKNOWN_TOKEN_ID).nonzero().squeeze()
self.prediction_non_unk.report(prediction, target, pred_non_unk_indices)
self.target_non_unk.report(prediction, target, target_non_unk_indices)
def get_current_value(self, should_print=False):
print('P(hat(t) == t) = {}'.format(self.common.get_current_value(False)))
print('P(hat(t) == t && hat(t) != unk) = {}'.format(self.prediction_non_unk.metrics.hits / (self.common.hits + self.common.misses)))
print('P(hat(t) == t | t != unk) = {}'.format(self.target_non_unk.get_current_value(False)))
print('P(hat(t) == t | hat(t) != unk) = {}'.format(self.prediction_non_unk.get_current_value(False)))
| true | true |
f731744360c85355f37d6f3b7a8789da418a6261 | 544 | py | Python | LeetCode/Algorithms/Easy/PascalsTriangle/PascalsTriangle.py | roshan11160/Competitive-Programming-Solutions | 2d9cfe901c23a2b7344c410b7368eb02f7fa6e7e | [
"MIT"
] | 40 | 2020-07-25T19:35:37.000Z | 2022-01-28T02:57:02.000Z | LeetCode/Algorithms/Easy/PascalsTriangle/PascalsTriangle.py | afrozchakure/Hackerrank-Problem-Solutions | 014155d841e08cb1f7609c23335576dc9b29cef3 | [
"MIT"
] | 34 | 2020-10-10T17:59:46.000Z | 2021-10-05T18:29:25.000Z | LeetCode/Algorithms/Easy/PascalsTriangle/PascalsTriangle.py | afrozchakure/Hackerrank-Problem-Solutions | 014155d841e08cb1f7609c23335576dc9b29cef3 | [
"MIT"
] | 24 | 2020-05-03T08:11:53.000Z | 2021-10-04T03:23:20.000Z | class Solution:
def generate(self, numRows: int) -> List[List[int]]:
result = [[1]]
for i in range(1, numRows):
temp1 = result[-1] + [0]
temp2 = [0] + result[-1]
result.append([temp1[i] + temp2[i] for i in range(len(temp1))])
return result[:numRows]
# Time Complexity - O(n**2)\
# Space Complexity - O(n)
"""
explanation: Any row can be constructed using the offset sum of the previous row. Example:
1 3 3 1 0
+ 0 1 3 3 1
= 1 4 6 4 1
"""
| 25.904762 | 90 | 0.523897 | class Solution:
def generate(self, numRows: int) -> List[List[int]]:
result = [[1]]
for i in range(1, numRows):
temp1 = result[-1] + [0]
temp2 = [0] + result[-1]
result.append([temp1[i] + temp2[i] for i in range(len(temp1))])
return result[:numRows]
| true | true |
f7317480ca6a2ca583ccb6170587b803d919d1a4 | 2,591 | py | Python | framework/auth/decorators.py | alexschiller/osf.io | 4122d4be152c6189142c2ebb19cfdee09c77035d | [
"Apache-2.0"
] | null | null | null | framework/auth/decorators.py | alexschiller/osf.io | 4122d4be152c6189142c2ebb19cfdee09c77035d | [
"Apache-2.0"
] | null | null | null | framework/auth/decorators.py | alexschiller/osf.io | 4122d4be152c6189142c2ebb19cfdee09c77035d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import time
import httplib
import functools
from flask import request
from framework.auth import cas
from framework.auth import signing
from framework.flask import redirect
from framework.exceptions import HTTPError
from .core import Auth
from .core import User
def collect_auth(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
kwargs['auth'] = Auth.from_kwargs(request.args.to_dict(), kwargs)
return func(*args, **kwargs)
return wrapped
def must_be_confirmed(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
user = User.load(kwargs['uid'])
if user is not None:
if user.is_confirmed:
return func(*args, **kwargs)
else:
raise HTTPError(httplib.BAD_REQUEST, data={
'message_short': 'Account not yet confirmed',
'message_long': 'The profile page could not be displayed as the user has not confirmed the account.'
})
else:
raise HTTPError(httplib.NOT_FOUND)
return wrapped
def must_be_logged_in(func):
"""Require that user be logged in. Modifies kwargs to include the current
user.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
kwargs['auth'] = Auth.from_kwargs(request.args.to_dict(), kwargs)
if kwargs['auth'].logged_in:
return func(*args, **kwargs)
else:
return redirect(cas.get_login_url(request.url))
return wrapped
def must_be_signed(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if request.method in ('GET', 'DELETE'):
data = request.args
else:
data = request.get_json()
try:
sig = data['signature']
payload = signing.unserialize_payload(data['payload'])
exp_time = payload['time']
except (KeyError, ValueError):
raise HTTPError(httplib.BAD_REQUEST, data={
'message_short': 'Invalid payload',
'message_long': 'The request payload could not be deserialized.'
})
if not signing.default_signer.verify_payload(sig, payload):
raise HTTPError(httplib.UNAUTHORIZED)
if time.time() > exp_time:
raise HTTPError(httplib.BAD_REQUEST, data={
'message_short': 'Expired',
'message_long': 'Signature has expired.'
})
kwargs['payload'] = payload
return func(*args, **kwargs)
return wrapped
| 27.56383 | 120 | 0.603242 |
import time
import httplib
import functools
from flask import request
from framework.auth import cas
from framework.auth import signing
from framework.flask import redirect
from framework.exceptions import HTTPError
from .core import Auth
from .core import User
def collect_auth(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
kwargs['auth'] = Auth.from_kwargs(request.args.to_dict(), kwargs)
return func(*args, **kwargs)
return wrapped
def must_be_confirmed(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
user = User.load(kwargs['uid'])
if user is not None:
if user.is_confirmed:
return func(*args, **kwargs)
else:
raise HTTPError(httplib.BAD_REQUEST, data={
'message_short': 'Account not yet confirmed',
'message_long': 'The profile page could not be displayed as the user has not confirmed the account.'
})
else:
raise HTTPError(httplib.NOT_FOUND)
return wrapped
def must_be_logged_in(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
kwargs['auth'] = Auth.from_kwargs(request.args.to_dict(), kwargs)
if kwargs['auth'].logged_in:
return func(*args, **kwargs)
else:
return redirect(cas.get_login_url(request.url))
return wrapped
def must_be_signed(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if request.method in ('GET', 'DELETE'):
data = request.args
else:
data = request.get_json()
try:
sig = data['signature']
payload = signing.unserialize_payload(data['payload'])
exp_time = payload['time']
except (KeyError, ValueError):
raise HTTPError(httplib.BAD_REQUEST, data={
'message_short': 'Invalid payload',
'message_long': 'The request payload could not be deserialized.'
})
if not signing.default_signer.verify_payload(sig, payload):
raise HTTPError(httplib.UNAUTHORIZED)
if time.time() > exp_time:
raise HTTPError(httplib.BAD_REQUEST, data={
'message_short': 'Expired',
'message_long': 'Signature has expired.'
})
kwargs['payload'] = payload
return func(*args, **kwargs)
return wrapped
| true | true |
f731751fa69ae18a6c65a0e7b8a660da710c2f8f | 663 | py | Python | model.py | SMMousaviSP/Sudoku-Solver | 13ab46585aaa1c8072ace58f0eee6df7388f684e | [
"MIT"
] | 26 | 2020-01-25T16:51:01.000Z | 2021-08-02T10:34:49.000Z | model.py | SMMousaviSP/Sudoku-Solver | 13ab46585aaa1c8072ace58f0eee6df7388f684e | [
"MIT"
] | 1 | 2021-04-26T09:03:39.000Z | 2021-04-26T09:03:39.000Z | model.py | SMMousaviSP/Sudoku-Solver | 13ab46585aaa1c8072ace58f0eee6df7388f684e | [
"MIT"
] | 21 | 2020-01-27T08:14:20.000Z | 2021-11-23T07:51:46.000Z | import keras
from keras.layers import Activation
from keras.layers import Conv2D, BatchNormalization, Dense, Flatten, Reshape
def get_model():
model = keras.models.Sequential()
model.add(Conv2D(64, kernel_size=(3,3), activation='relu', padding='same', input_shape=(9,9,1)))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3,3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(128, kernel_size=(1,1), activation='relu', padding='same'))
model.add(Flatten())
model.add(Dense(81*9))
model.add(Reshape((-1, 9)))
model.add(Activation('softmax'))
return model
| 31.571429 | 100 | 0.689291 | import keras
from keras.layers import Activation
from keras.layers import Conv2D, BatchNormalization, Dense, Flatten, Reshape
def get_model():
model = keras.models.Sequential()
model.add(Conv2D(64, kernel_size=(3,3), activation='relu', padding='same', input_shape=(9,9,1)))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3,3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(128, kernel_size=(1,1), activation='relu', padding='same'))
model.add(Flatten())
model.add(Dense(81*9))
model.add(Reshape((-1, 9)))
model.add(Activation('softmax'))
return model
| true | true |
f73175349ae72496647a8ded5362832c8f303bf2 | 45,377 | py | Python | exp/cips3d_inversion/models/generator_v2.py | PeterouZh/CIPS-3D | 9b8bfa0fb23f642af042e150ccd70408f9d137c6 | [
"MIT"
] | 308 | 2021-10-19T17:29:14.000Z | 2022-03-31T11:54:45.000Z | exp/cips3d_inversion/models/generator_v2.py | PeterouZh/CIPS-3D | 9b8bfa0fb23f642af042e150ccd70408f9d137c6 | [
"MIT"
] | 28 | 2021-10-31T22:49:00.000Z | 2022-03-25T05:49:47.000Z | exp/cips3d_inversion/models/generator_v2.py | PeterouZh/CIPS-3D | 9b8bfa0fb23f642af042e150ccd70408f9d137c6 | [
"MIT"
] | 44 | 2021-10-21T10:08:23.000Z | 2022-03-16T10:05:08.000Z | from itertools import chain
import math
import logging
import collections
from collections import OrderedDict
import tqdm
import random
import time
from einops import rearrange, repeat
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda.amp import autocast
from tl2.proj.fvcore import MODEL_REGISTRY, build_model
# from tl2.proj.stylegan2_ada import persistence
from tl2.launch.launch_utils import global_cfg
from tl2.proj.pytorch.pytorch_hook import VerboseModel
from tl2.proj.pytorch import torch_utils
from tl2.proj.pytorch import torch_utils, init_func
from tl2 import tl2_utils
from tl2.proj.pytorch.examples.nerf import cam_params
from tl2.proj.pytorch.examples.nerf import volume_rendering
from tl2.proj.pytorch.examples.networks import nerf_net
from tl2.proj.pytorch.examples.networks import multi_head_mapping
from tl2.proj.pytorch.examples.networks import cips_net
from exp.pigan import pigan_utils
from exp.dev.nerf_inr.models.generator_nerf_inr import INRNetwork
from exp.dev.nerf_inr.models.generator_nerf_inr import GeneratorNerfINR as GeneratorNerfINR_base
from exp.comm import comm_utils
from exp.comm.models import nerf_network
from exp.comm.models import inr_network
from exp.comm.models import film_layer
from exp.comm.models import mod_conv_fc
# from exp.cips3d.models import multi_head_mapping
class SkipLayer(nn.Module):
def __init__(self, ):
super(SkipLayer, self).__init__()
def forward(self, x0, x1):
# out = (x0 + x1) / math.pi
out = (x0 + x1)
return out
class SinAct(nn.Module):
def __init__(self, ):
super(SinAct, self).__init__()
def forward(self, x):
return torch.sin(x)
class LinearSinAct(nn.Module):
def __init__(self,
in_features,
out_features):
super(LinearSinAct, self).__init__()
self.linear = nn.Linear(in_features=in_features, out_features=out_features)
self.sin = SinAct()
pass
def forward(self, x, *args, **kwargs):
x = self.linear(x)
x = self.sin(x)
return x
class FiLMLayer(nn.Module):
def __init__(self,
in_dim,
out_dim,
style_dim,
use_style_fc=True,
which_linear=nn.Linear,
**kwargs):
super(FiLMLayer, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.style_dim = style_dim
self.use_style_fc = use_style_fc
self.linear = which_linear(in_dim, out_dim)
# self.linear.apply(film_layer.frequency_init(25))
# self.gain_scale = film_layer.LinearScale(scale=15, bias=30)
self.gain_scale = nn.Identity()
# Prepare gain and bias layers
if use_style_fc:
self.gain_fc = which_linear(style_dim, out_dim)
self.bias_fc = which_linear(style_dim, out_dim)
# self.gain_fc.weight.data.mul_(0.25)
# self.bias_fc.weight.data.mul_(0.25)
else:
self.style_dim = out_dim * 2
self.sin = SinAct()
self.lrelu = nn.LeakyReLU(0.2, inplace=True)
# self.register_buffer('stored_mean', torch.zeros(output_size))
# self.register_buffer('stored_var', torch.ones(output_size))
pass
def forward(self,
x,
style):
"""
:param x: (b, c) or (b, n, c)
:param style: (b, c)
:return:
"""
if self.use_style_fc:
gain = self.gain_fc(style)
gain = self.gain_scale(gain)
bias = self.bias_fc(style)
else:
style = rearrange(style, "b (n c) -> b n c", n=2)
gain, bias = style.unbind(dim=1)
gain = self.gain_scale(gain)
if x.dim() == 3:
gain = rearrange(gain, "b c -> b 1 c")
bias = rearrange(bias, "b c -> b 1 c")
elif x.dim() == 2:
pass
else:
assert 0
x = self.linear(x)
x = x * torch.rsqrt(torch.mean(x ** 2, dim=-1, keepdim=True) + 1e-8)
# out = self.sin(gain * x + bias)
out = self.lrelu((gain + 1.) * x + bias)
return out
def __repr__(self):
s = f'{self.__class__.__name__}(' \
f'in_dim={self.in_dim}, ' \
f'out_dim={self.out_dim}, ' \
f'style_dim={self.style_dim}, ' \
f'use_style_fc={self.use_style_fc}, ' \
f')'
return s
class INRNetwork_Skip(nn.Module):
def __repr__(self): return f"{self.__class__.__name__}({self.repr})"
def __init__(self,
input_dim,
style_dim,
hidden_layers,
dim_scale=1,
rgb_dim=3,
device=None,
name_prefix='inr',
**kwargs):
"""
:param z_dim:
:param hidden_dim:
:param rgb_dim:
:param device:
:param kwargs:
"""
super().__init__()
self.repr = f"input_dim={input_dim}, " \
f"style_dim={style_dim}, " \
f"hidden_layers={hidden_layers}, " \
f"dim_scale={dim_scale}, "
self.device = device
self.rgb_dim = rgb_dim
self.hidden_layers = hidden_layers
self.name_prefix = name_prefix
self.channels = {
0: int(512 * dim_scale), # 4
1: int(512 * dim_scale), # 8
2: int(512 * dim_scale), # 16
3: int(512 * dim_scale), # 32
4: int(512 * dim_scale), # 64
5: int(128 * dim_scale), # 128
6: int(64 * dim_scale), # 256
7: int(32 * dim_scale), # 512
8: int(16 * dim_scale), # 1024
}
self.style_dim_dict = {}
_out_dim = input_dim
self.network = nn.ModuleList()
self.to_rbgs = nn.ModuleList()
for i in range(hidden_layers):
_in_dim = _out_dim
_out_dim = self.channels[i]
_layer = film_layer.FiLMLayer(in_dim=_in_dim,
out_dim=_out_dim,
style_dim=style_dim)
self.network.append(_layer)
self.style_dim_dict[f'{name_prefix}_w{i}_0'] = _layer.style_dim
_layer = film_layer.FiLMLayer(in_dim=_out_dim,
out_dim=_out_dim,
style_dim=style_dim)
self.network.append(_layer)
self.style_dim_dict[f'{name_prefix}_w{i}_1'] = _layer.style_dim
to_rgb = inr_network.ToRGB(in_dim=_out_dim, dim_rgb=3)
self.to_rbgs.append(to_rgb)
self.tanh = nn.Sequential(
# nn.Linear(hidden_dim, rgb_dim),
nn.Tanh()
)
# self.to_rbg.apply(frequency_init(25))
torch_utils.print_number_params(
{
'network': self.network,
'to_rbgs': self.to_rbgs,
'inr_net': self
})
logging.getLogger('tl').info(self)
pass
def forward(self,
input,
style_dict,
**kwargs):
"""
:param input: points xyz, (b, num_points, 3)
:param style_dict:
:param ray_directions: (b, num_points, 3)
:param kwargs:
:return:
- out: (b, num_points, 4), rgb(3) + sigma(1)
"""
x = input
rgb = 0
for index in range(self.hidden_layers):
_layer = self.network[index * 2]
style = style_dict[f'{self.name_prefix}_w{index}_0']
if global_cfg.tl_debug:
VerboseModel.forward_verbose(_layer,
inputs_args=(x, style),
name_prefix=f"{self.name_prefix}.network.{index}.0.")
x = _layer(x, style)
_layer = self.network[index * 2 + 1]
style = style_dict[f'{self.name_prefix}_w{index}_1']
if global_cfg.tl_debug:
VerboseModel.forward_verbose(_layer,
inputs_args=(x, style),
name_prefix=f"{self.name_prefix}.network.{index}.1.")
x = _layer(x, style)
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.to_rbgs[index],
inputs_args=(x, rgb),
name_prefix=f'to_rgb.{index}')
rgb = self.to_rbgs[index](x, skip=rgb)
# if global_cfg.tl_debug:
# VerboseModel.forward_verbose(self.to_rbg,
# inputs_args=(x, ),
# name_prefix='to_rgb.')
# out = self.to_rbg(x)
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.tanh,
inputs_args=(rgb, ),
name_prefix='tanh.')
out = self.tanh(rgb)
return out
class ModSinLayer(nn.Module):
def __repr__(self): return f"{self.__class__.__name__}({self.repr})"
def __init__(self,
in_dim,
use_style_fc=False,
style_dim=None,
which_linear=nn.Linear,
spectral_norm=False,
eps=1e-5,
freq=1,
phase=0,
**kwargs):
super(ModSinLayer, self).__init__()
self.repr = f"in_dim={in_dim}, use_style_fc={use_style_fc}, style_dim={style_dim}, " \
f"freq={freq}, phase={phase}"
self.in_dim = in_dim
self.use_style_fc = use_style_fc
self.style_dim = style_dim
self.freq = freq
self.phase = phase
self.spectral_norm = spectral_norm
# Prepare gain and bias layers
if use_style_fc:
self.gain_fc = which_linear(style_dim, in_dim)
self.bias_fc = which_linear(style_dim, in_dim)
if spectral_norm:
self.gain_fc = nn.utils.spectral_norm(self.gain_fc)
self.bias_fc = nn.utils.spectral_norm(self.bias_fc)
else:
self.style_dim = in_dim * 2
self.eps = eps
self.lrelu = nn.LeakyReLU(0.2, inplace=True)
# self.register_buffer('stored_mean', torch.zeros(output_size))
# self.register_buffer('stored_var', torch.ones(output_size))
pass
def forward(self,
x,
style):
"""
Calculate class-conditional gains and biases.
:param x: (b, c) or (b, n, c)
:param style: (b, c)
:return:
"""
assert style.shape[-1] == self.style_dim
if self.use_style_fc:
gain = self.gain_fc(style) + 1.
bias = self.bias_fc(style)
else:
style = rearrange(style, "b (n c) -> b n c", n=2)
gain, bias = style.unbind(dim=1)
gain = gain + 1.
if x.dim() == 3:
gain = rearrange(gain, "b c -> b 1 c")
bias = rearrange(bias, "b c -> b 1 c")
elif x.dim() == 2:
pass
else:
assert 0
# x = torch.sin(self.freq * x + self.phase)
# out = x * gain + bias
x = x * torch.rsqrt(torch.mean(x ** 2, dim=-1, keepdim=True) + 1e-8)
x = x * gain + bias
out = self.lrelu(x)
return out
class ModSinLayer_NoBias(nn.Module):
def __repr__(self): return f"{self.__class__.__name__}({self.repr})"
def __init__(self,
in_dim,
use_style_fc=False,
style_dim=None,
which_linear=nn.Linear,
spectral_norm=False,
eps=1e-5,
freq=1,
phase=0,
**kwargs):
super(ModSinLayer_NoBias, self).__init__()
self.repr = f"in_dim={in_dim}, use_style_fc={use_style_fc}, style_dim={style_dim}, " \
f"freq={freq}, phase={phase}"
self.in_dim = in_dim
self.use_style_fc = use_style_fc
self.style_dim = style_dim
self.freq = freq
self.phase = phase
self.spectral_norm = spectral_norm
# Prepare gain and bias layers
if use_style_fc:
self.gain_fc = which_linear(style_dim, in_dim)
# self.bias_fc = which_linear(style_dim, in_dim)
if spectral_norm:
self.gain_fc = nn.utils.spectral_norm(self.gain_fc)
# self.bias_fc = nn.utils.spectral_norm(self.bias_fc)
else:
self.style_dim = in_dim * 2
self.eps = eps
pass
def forward(self,
x,
style):
"""
Calculate class-conditional gains and biases.
:param x: (b, c) or (b, n, c)
:param style: (b, c)
:return:
"""
assert style.shape[-1] == self.style_dim
if self.use_style_fc:
gain = self.gain_fc(style) + 1.
else:
style = rearrange(style, "b (n c) -> b n c", n=2)
gain, bias = style.unbind(dim=1)
gain = gain + 1.
if x.dim() == 3:
gain = rearrange(gain, "b c -> b 1 c")
elif x.dim() == 2:
pass
else:
assert 0
x = torch.sin(self.freq * x + self.phase)
# out = x * gain + bias
out = x * gain
return out
class SinBlock(nn.Module):
def __init__(self,
in_dim,
out_dim,
style_dim,
name_prefix,
):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.style_dim = style_dim
self.name_prefix = name_prefix
self.style_dim_dict = {}
# self.mod1 = mod_conv_fc.Modulated_FC_Conv(in_channel=in_dim,
# out_channel=out_dim,
# style_dim=style_dim,
# use_style_fc=True,
# scale=1.,
# # scale=None,
# )
self.mod1 = mod_conv_fc.SinStyleMod(in_channel=in_dim,
out_channel=out_dim,
style_dim=style_dim,
use_style_fc=True,
)
self.style_dim_dict[f'{name_prefix}_0'] = self.mod1.style_dim
self.act1 = nn.LeakyReLU(0.2, inplace=True)
# self.mod2 = mod_conv_fc.Modulated_FC_Conv(in_channel=out_dim,
# out_channel=out_dim,
# style_dim=style_dim,
# use_style_fc=True,
# scale=1.,
# # scale=None,
# )
self.mod2 = mod_conv_fc.SinStyleMod(in_channel=out_dim,
out_channel=out_dim,
style_dim=style_dim,
use_style_fc=True,
)
self.style_dim_dict[f'{name_prefix}_1'] = self.mod2.style_dim
self.act2 = nn.LeakyReLU(0.2, inplace=True)
# self.linear1 = nn.Linear(in_dim, out_dim)
# self.mod1 = ModSinLayer(in_dim=out_dim, use_style_fc=True, style_dim=style_dim)
# self.style_dim_dict[f'{name_prefix}_0'] = self.mod1.style_dim
# self.linear2 = nn.Linear(out_dim, out_dim)
# self.mod2 = ModSinLayer(in_dim=out_dim, use_style_fc=True, style_dim=style_dim)
# self.style_dim_dict[f'{name_prefix}_1'] = self.mod2.style_dim
self.skip = SkipLayer()
pass
def forward(self,
x,
style_dict,
skip=False):
x_orig = x
style = style_dict[f'{self.name_prefix}_0']
x = self.mod1(x, style)
x = self.act1(x)
style = style_dict[f'{self.name_prefix}_1']
x = self.mod2(x, style)
out = self.act2(x)
# x = self.linear1(x)
# style = style_dict[f'{self.name_prefix}_0']
# x = self.mod1(x, style)
# x = self.linear2(x)
# style = style_dict[f'{self.name_prefix}_1']
# out = self.mod2(x, style)
if skip and out.shape[-1] == x_orig.shape[-1]:
# out = (out + x_orig) / 1.41421
out = self.skip(out, x_orig)
return out
def __repr__(self):
repr = f"{self.__class__.__name__}(in_dim={self.in_dim}, " \
f"out_dim={self.out_dim}, " \
f"style_dim={self.style_dim})"
return repr
class ToRGB(nn.Module):
def __init__(self,
in_dim,
dim_rgb=3,
use_equal_fc=False):
super().__init__()
self.in_dim = in_dim
self.dim_rgb = dim_rgb
if use_equal_fc:
self.linear = mod_conv_fc.EqualLinear(in_dim, dim_rgb, scale=1.)
else:
self.linear = nn.Linear(in_dim, dim_rgb)
pass
def forward(self,
input,
skip=None):
out = self.linear(input)
if skip is not None:
out = out + skip
return out
@MODEL_REGISTRY.register(name_prefix=__name__)
# class Generator_Diffcam(GeneratorNerfINR_base):
class Generator_Diffcam(nn.Module):
def __repr__(self):
return tl2_utils.get_class_repr(self)
def __init__(self,
nerf_cfg,
mapping_shape_cfg,
mapping_app_cfg,
inr_cfg,
mapping_inr_cfg,
shape_block_end_index=None,
app_block_end_index=None,
inr_block_end_index=None,
device='cuda',
**kwargs):
super(Generator_Diffcam, self).__init__()
self.repr_str = tl2_utils.dict2string(dict_obj={
'nerf_cfg': nerf_cfg,
'mapping_shape_cfg': mapping_shape_cfg,
'mapping_app_cfg': mapping_app_cfg,
'inr_cfg': inr_cfg,
'mapping_inr_cfg': mapping_inr_cfg,
'shape_block_end_index': shape_block_end_index,
'app_block_end_index': app_block_end_index,
'inr_block_end_index': inr_block_end_index,
})
self.device = device
self.inr_block_end_index = inr_block_end_index
self.module_name_list = []
# nerf_net
self.nerf_net = nerf_net.NeRFNetwork_SIREN_skip(
shape_block_end_index=shape_block_end_index,
app_block_end_index=app_block_end_index,
**nerf_cfg)
self.module_name_list.append('nerf_net')
# mapping shape
self.mapping_shape = multi_head_mapping.MultiHeadMappingNetwork(**{
**mapping_shape_cfg,
'head_dim_dict': self.nerf_net.style_dim_dict_shape
})
self.module_name_list.append('mapping_shape')
# mapping appearance
self.mapping_app = multi_head_mapping.MultiHeadMappingNetwork(**{
**mapping_app_cfg,
'head_dim_dict': self.nerf_net.style_dim_dict_app
})
self.module_name_list.append('mapping_app')
_in_dim = nerf_cfg.app_net_cfg.out_dim
# inr_net
self.inr_net = cips_net.CIPSNet(**{
**inr_cfg,
"input_dim": _in_dim,
'add_out_layer': True,
})
self.module_name_list.append('inr_net')
self.mapping_inr = multi_head_mapping.MultiHeadMappingNetwork(**{
**mapping_inr_cfg,
'head_dim_dict': self.inr_net.style_dim_dict
})
self.module_name_list.append('mapping_inr')
self.aux_to_rbg = nn.Sequential(
nn.Linear(_in_dim, 3),
nn.Tanh()
)
self.aux_to_rbg.apply(nerf_network.frequency_init(25))
self.module_name_list.append('aux_to_rbg')
logger = logging.getLogger('tl')
models_dict = {}
for name in self.module_name_list:
models_dict[name] = getattr(self, name)
models_dict['G'] = self
torch_utils.print_number_params(models_dict=models_dict, logger=logger)
logger.info(self)
pass
def forward(self,
zs,
rays_o,
rays_d,
nerf_kwargs={},
psi=1,
return_aux_img=False,
grad_points=None,
forward_points=None, # disable gradients
**kwargs):
"""
Generates images from a noise vector, rendering parameters, and camera distribution.
Uses the hierarchical sampling scheme described in NeRF.
:param zs: {k: (b, z_dim), ...}
:param rays_o: (b, h, w, 3) in world space
:param rays_d: (b, h, w, 3) in world space
:return:
- pixels: (b, 3, h, w)
- pitch_yaw: (b, 2)
"""
# mapping network
style_dict = self.mapping_network(**zs)
if psi < 1:
avg_styles = self.generate_avg_frequencies(device=self.device)
style_dict = self.get_truncated_freq_phase(
raw_style_dict=style_dict, avg_style_dict=avg_styles, raw_lambda=psi)
b, h, w, c = rays_o.shape
rays_o = rearrange(rays_o, "b h w c -> b (h w) c")
rays_d = rearrange(rays_d, "b h w c -> b (h w) c")
if grad_points is not None and grad_points < h * w:
imgs, ret_maps = self.part_grad_forward(
rays_o=rays_o,
rays_d=rays_d,
style_dict=style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img,
grad_points=grad_points)
else:
imgs, ret_maps = self.whole_grad_forward(
rays_o=rays_o,
rays_d=rays_d,
style_dict=style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img,
forward_points=forward_points)
imgs = rearrange(imgs, "b (h w) c -> b c h w", h=h, w=w)
ret_imgs = {}
for name, v_map in ret_maps.items():
if v_map.dim() == 3:
v_map = rearrange(v_map, "b (h w) c -> b c h w", h=h, w=w)
elif v_map.dim() == 2:
v_map = rearrange(v_map, "b (h w) -> b h w", h=h, w=w)
ret_imgs[name] = v_map
return imgs, ret_imgs
def get_rays_axis_angle(self,
R,
t,
fx,
fy,
H: int,
W: int,
N_rays: int = -1):
"""
:param R: (b, 3)
:param t: (b, 3)
:param fx:
:param fy:
:param H:
:param W:
:param N_rays:
:return
- rays_o: (b, H, W, 3)
- rays_d: (b, H, W, 3)
- select_inds: (b, H, W)
"""
rays_o, rays_d, select_inds = cam_params.get_rays(
rot=R,
trans=t,
focal_x=fx,
focal_y=fy,
H=H,
W=W,
N_rays=N_rays,
flatten=False)
return rays_o, rays_d, select_inds
def get_batch_style_dict(self, b, style_dict):
ret_style_dict = {}
for name, style in style_dict.items():
ret_style_dict[name] = style[[b]]
return ret_style_dict
def whole_grad_forward(self,
rays_o,
rays_d,
style_dict,
nerf_kwargs,
return_aux_img=True,
forward_points=None,
**kwargs):
if forward_points is not None and forward_points < rays_o.shape[1]: # no gradients
# stage forward
with torch.no_grad():
batch_size = rays_o.shape[0]
num_points = rays_o.shape[1]
near = nerf_kwargs['near']
far = nerf_kwargs['far']
N_samples = nerf_kwargs['N_samples']
perturb = self.training
z_vals, points = volume_rendering.ray_sample_points(rays_o=rays_o,
rays_d=rays_d,
near=near,
far=far,
N_samples=N_samples,
perturb=perturb)
batch_image_ddict = collections.defaultdict(list)
for b in range(batch_size):
image_ddict = collections.defaultdict(list)
head = 0
while head < num_points:
tail = head + forward_points
cur_style_dict = self.get_batch_style_dict(b=b, style_dict=style_dict)
cur_inr_img, cur_ret_maps = self.points_forward(
rays_o=rays_o[[b], head:tail], # (b, hxw, 3)
rays_d=rays_d[[b], head:tail], # (b, hxw, 3)
points=points[[b], head:tail], # (b, hxw, Nsamples, 3)
z_vals=z_vals[[b], head:tail], # (b, hxw, Nsamples)
style_dict=cur_style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img)
image_ddict['inr_img'].append(cur_inr_img)
for k, v in cur_ret_maps.items():
image_ddict[k].append(v)
head += forward_points
for k, v in image_ddict.items():
one_image = torch.cat(v, dim=1)
batch_image_ddict[k].append(one_image)
ret_maps = {}
for k, v in batch_image_ddict.items():
ret_maps[k] = torch.cat(v, dim=0)
imgs = ret_maps.pop('inr_img')
else:
near = nerf_kwargs['near']
far = nerf_kwargs['far']
N_samples = nerf_kwargs['N_samples']
perturb = self.training
z_vals, points = volume_rendering.ray_sample_points(rays_o=rays_o,
rays_d=rays_d,
near=near,
far=far,
N_samples=N_samples,
perturb=perturb)
# transformed_points = rearrange(transformed_points, "b (h w s) c -> b (h w) s c", h=img_size, s=num_steps)
# transformed_ray_directions_expanded = rearrange(transformed_ray_directions_expanded,
# "b (h w s) c -> b (h w) s c", h=img_size, s=num_steps)
imgs, ret_maps = self.points_forward(
rays_o=rays_o,
rays_d=rays_d,
points=points,
z_vals=z_vals,
style_dict=style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img)
return imgs, ret_maps
def part_grad_forward(self,
rays_o,
rays_d,
style_dict,
nerf_kwargs,
return_aux_img,
grad_points):
near = nerf_kwargs['near']
far = nerf_kwargs['far']
N_samples = nerf_kwargs['N_samples']
perturb = self.training
# z_vals: (b, hxw, Nsamples), points: (b, hxw, Nsamples, 3)
z_vals, points = volume_rendering.ray_sample_points(rays_o=rays_o, # (b, hxw, 3)
rays_d=rays_d, # (b, hxw, 3)
near=near,
far=far,
N_samples=N_samples,
perturb=perturb)
# transformed_points = rearrange(transformed_points, "b (h w s) c -> b (h w) s c", h=img_size, s=num_steps)
# transformed_ray_directions_expanded = rearrange(transformed_ray_directions_expanded,
# "b (h w s) c -> b (h w) s c", h=img_size, s=num_steps)
batch_size = rays_o.shape[0]
num_points = rays_o.shape[1]
device = self.device
assert num_points > grad_points
idx_grad, idx_no_grad = torch_utils.batch_random_split_indices(bs=batch_size,
num_points=num_points,
grad_points=grad_points,
device=device)
# rand_idx = torch.randperm(num_points, device=device)
# idx_grad = rand_idx[:grad_points]
# idx_no_grad = rand_idx[grad_points:]
inr_img_grad, ret_maps_grad = self.points_forward(
rays_o=rays_o,
rays_d=rays_d,
points=points,
z_vals=z_vals,
style_dict=style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img,
idx_grad=idx_grad)
with torch.no_grad():
inr_img_no_grad, ret_maps_no_grad = self.points_forward(
rays_o=rays_o,
rays_d=rays_d,
points=points,
z_vals=z_vals,
style_dict=style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img,
idx_grad=idx_no_grad)
imgs = comm_utils.batch_scatter_points(idx_grad=idx_grad,
points_grad=inr_img_grad,
idx_no_grad=idx_no_grad,
points_no_grad=inr_img_no_grad,
num_points=num_points)
ret_maps = {}
for k in ret_maps_grad.keys():
comp_map = comm_utils.batch_scatter_points(idx_grad=idx_grad,
points_grad=ret_maps_grad[k],
idx_no_grad=idx_no_grad,
points_no_grad=ret_maps_no_grad[k],
num_points=num_points)
ret_maps[k] = comp_map
return imgs, ret_maps
def points_forward(self,
rays_o,
rays_d,
points,
z_vals,
style_dict,
nerf_kwargs,
return_aux_img,
idx_grad=None,
**kwargs):
"""
:param rays_o: (b, hxw, 3)
:param rays_d: (b, hxw, 3)
:param points: (b, hxw, Nsamples, 3)
:param z_vals: (b, hxw, Nsamples)
:param style_dict:
:param nerf_kwargs:
:param return_aux_img:
:param idx_grad: (b, N_grad, )
:param kwargs:
:return:
"""
device = points.device
viewdirs = volume_rendering.get_viewdirs(rays_d=rays_d)
# viewdirs = viewdirs[..., None, :].expand_as(points)
N_samples = nerf_kwargs['N_samples']
if idx_grad is not None:
rays_o = comm_utils.batch_gather_points(points=rays_o, idx_grad=idx_grad)
rays_d = comm_utils.batch_gather_points(points=rays_d, idx_grad=idx_grad)
points = comm_utils.batch_gather_points(points=points, idx_grad=idx_grad)
z_vals = comm_utils.batch_gather_points(points=z_vals, idx_grad=idx_grad)
points = rearrange(points, "b Nrays Nsamples c -> b (Nrays Nsamples) c")
coarse_viewdirs = repeat(viewdirs, "b Nrays c -> b (Nrays Nsamples) c", Nsamples=N_samples)
# Model prediction on course points
coarse_output = self.nerf_net(
x=points, # b (Nrays Nsamples) c
ray_directions=coarse_viewdirs, # b (Nrays Nsamples) c
style_dict=style_dict)
coarse_output = rearrange(
coarse_output, "b (Nrays Nsamples) rgb_sigma -> b Nrays Nsamples rgb_sigma", Nsamples=N_samples)
# Re-sample fine points alont camera rays, as described in NeRF
if nerf_kwargs['N_importance'] > 0:
with torch.no_grad():
raw_sigma = coarse_output[..., -1]
perturb = self.training
fine_z_vals, fine_points = volume_rendering.get_fine_points(
z_vals=z_vals,
rays_o=rays_o,
rays_d=rays_d,
raw_sigma=raw_sigma,
N_importance=nerf_kwargs['N_importance'],
perturb=perturb,
raw_noise_std=nerf_kwargs['raw_noise_std'],
eps=nerf_kwargs['eps'])
# Model prediction on re-sampled find points
fine_points = rearrange(fine_points, "b Nrays Nsamples c -> b (Nrays Nsamples) c")
fine_viewdirs = repeat(viewdirs, "b Nrays c -> b (Nrays Nsamples) c", Nsamples=nerf_kwargs['N_importance'])
fine_output = self.nerf_net(
x=fine_points, # b (Nrays Nsamples) c
ray_directions=fine_viewdirs, # b (Nrays Nsamples) c
style_dict=style_dict)
fine_output = rearrange(
fine_output, "b (Nrays Nsamples) rgb_sigma -> b Nrays Nsamples rgb_sigma", Nsamples=nerf_kwargs['N_importance'])
# Combine course and fine points
DIM_SAMPLES = 2
all_z_vals = torch.cat([fine_z_vals, z_vals], dim=DIM_SAMPLES) # (b, N_rays, N_samples)
_, indices = torch.sort(all_z_vals, dim=DIM_SAMPLES) # (b, N_rays, N_samples)
# gather z_vals
all_z_vals = torch.gather(all_z_vals, DIM_SAMPLES, indices) # (b, N_rays, N_samples)
# (b, N_rays, N_samples, rgb_sigma)
all_outputs = torch.cat([fine_output, coarse_output], dim=DIM_SAMPLES)
view_shape = [*indices.shape, *(len(all_outputs.shape) - len(indices.shape)) * [1]]
all_outputs = torch.gather(all_outputs, DIM_SAMPLES, indices.view(view_shape).expand_as(all_outputs))
else:
all_outputs = coarse_output
all_z_vals = z_vals
# Create images with NeRF
all_raw_rgb = all_outputs[..., :-1]
all_raw_sigma = all_outputs[..., -1]
pixels_fea, ret_maps = volume_rendering.ray_integration(raw_rgb=all_raw_rgb,
raw_sigma=all_raw_sigma,
z_vals=all_z_vals,
rays_d=rays_d,
raw_noise_std=nerf_kwargs['raw_noise_std'],
eps=nerf_kwargs['eps'])
# inr_net
inr_img = self.inr_net(pixels_fea, style_dict, block_end_index=self.inr_block_end_index)
if return_aux_img:
# aux rgb_branch
aux_img = self.aux_to_rbg(pixels_fea)
ret_maps['aux_img'] = aux_img
return inr_img, ret_maps
def z_sampler(self,
shape,
device,
dist='gaussian'):
if dist == 'gaussian':
z = torch.randn(shape, device=device)
elif dist == 'uniform':
z = torch.rand(shape, device=device) * 2 - 1
return z
def get_zs(self,
b,
batch_split=1):
z_shape = self.z_sampler(shape=(b, self.mapping_shape.z_dim), device=self.device)
z_app = self.z_sampler(shape=(b, self.mapping_app.z_dim), device=self.device)
z_inr = self.z_sampler(shape=(b, self.mapping_inr.z_dim), device=self.device)
if batch_split > 1:
zs_list = []
z_shape_list = z_shape.split(b // batch_split)
z_app_list = z_app.split(b // batch_split)
z_inr_list = z_inr.split(b // batch_split)
for z_shape_, z_app_, z_inr_ in zip(z_shape_list, z_app_list, z_inr_list):
zs_ = {
'z_shape': z_shape_,
'z_app': z_app_,
'z_inr': z_inr_,
}
zs_list.append(zs_)
return zs_list
else:
zs = {
'z_shape': z_shape,
'z_app': z_app,
'z_inr': z_inr,
}
return zs
def mapping_network(self,
z_shape,
z_app,
z_inr):
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.mapping_shape,
inputs_args=(z_shape,),
submodels=['base_net'],
name_prefix='mapping_shape.')
VerboseModel.forward_verbose(self.mapping_app,
inputs_args=(z_app,),
submodels=['base_net'],
name_prefix='mapping_app.')
VerboseModel.forward_verbose(self.mapping_inr,
inputs_args=(z_inr,),
submodels=['base_net', ],
input_padding=50,
name_prefix='mapping_inr.')
style_dict = {}
style_dict.update(self.mapping_shape(z_shape))
style_dict.update(self.mapping_app(z_app))
style_dict.update(self.mapping_inr(z_inr))
return style_dict
def get_truncated_freq_phase(self,
raw_style_dict,
avg_style_dict,
raw_lambda):
truncated_style_dict = {}
for name, avg_style in avg_style_dict.items():
raw_style = raw_style_dict[name]
truncated_style = avg_style + raw_lambda * (raw_style - avg_style)
truncated_style_dict[name] = truncated_style
return truncated_style_dict
def generate_avg_frequencies(self,
num_samples=10000,
device='cuda'):
"""Calculates average frequencies and phase shifts"""
# z = torch.randn((num_samples, self.z_dim), device=device)
zs = self.get_zs(num_samples)
with torch.no_grad():
style_dict = self.mapping_network(**zs)
avg_styles = {}
for name, style in style_dict.items():
avg_styles[name] = style.mean(0, keepdim=True)
# self.avg_styles = avg_styles
return avg_styles
def staged_forward(self, *args, **kwargs):
raise NotImplementedError
def set_device(self, device):
pass
def forward_camera_pos_and_lookup(self,
zs,
img_size,
fov,
ray_start,
ray_end,
num_steps,
h_stddev,
v_stddev,
h_mean,
v_mean,
hierarchical_sample,
camera_pos,
camera_lookup,
psi=1,
sample_dist=None,
lock_view_dependence=False,
clamp_mode='relu',
nerf_noise=0.,
white_back=False,
last_back=False,
return_aux_img=False,
grad_points=None,
forward_points=None,
**kwargs):
"""
Generates images from a noise vector, rendering parameters, and camera distribution.
Uses the hierarchical sampling scheme described in NeRF.
:param z: (b, z_dim)
:param img_size:
:param fov: face: 12
:param ray_start: face: 0.88
:param ray_end: face: 1.12
:param num_steps: face: 12
:param h_stddev: face: 0.3
:param v_stddev: face: 0.155
:param h_mean: face: pi/2
:param v_mean: face: pi/2
:param hierarchical_sample: face: true
:param camera_pos: (b, 3)
:param camera_lookup: (b, 3)
:param psi: [0, 1]
:param sample_dist: mode for sample_camera_positions, face: 'gaussian'
:param lock_view_dependence: face: false
:param clamp_mode: face: 'relu'
:param nerf_noise:
:param last_back: face: false
:param white_back: face: false
:param kwargs:
:return:
- pixels: (b, 3, h, w)
- pitch_yaw: (b, 2)
"""
# mapping network
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.mapping_network_nerf,
inputs_args=(zs['z_nerf'],),
submodels=['base_net'],
name_prefix='mapping_nerf.')
VerboseModel.forward_verbose(self.mapping_network_inr,
inputs_args=(zs['z_inr'],),
submodels=['base_net', ],
input_padding=50,
name_prefix='mapping_inr.')
style_dict = self.mapping_network(**zs)
if psi < 1:
avg_styles = self.generate_avg_frequencies(device=self.device)
style_dict = self.get_truncated_freq_phase(
raw_style_dict=style_dict, avg_style_dict=avg_styles, raw_lambda=psi)
if grad_points is not None and grad_points < img_size ** 2:
imgs, pitch_yaw = self.part_grad_forward(
style_dict=style_dict,
img_size=img_size,
fov=fov,
ray_start=ray_start,
ray_end=ray_end,
num_steps=num_steps,
h_stddev=h_stddev,
v_stddev=v_stddev,
h_mean=h_mean,
v_mean=v_mean,
hierarchical_sample=hierarchical_sample,
sample_dist=sample_dist,
lock_view_dependence=lock_view_dependence,
clamp_mode=clamp_mode,
nerf_noise=nerf_noise,
white_back=white_back,
last_back=last_back,
return_aux_img=return_aux_img,
grad_points=grad_points,
camera_pos=camera_pos,
camera_lookup=camera_lookup,
)
return imgs, pitch_yaw
else:
imgs, pitch_yaw = self.whole_grad_forward(
style_dict=style_dict,
img_size=img_size,
fov=fov,
ray_start=ray_start,
ray_end=ray_end,
num_steps=num_steps,
h_stddev=h_stddev,
v_stddev=v_stddev,
h_mean=h_mean,
v_mean=v_mean,
hierarchical_sample=hierarchical_sample,
sample_dist=sample_dist,
lock_view_dependence=lock_view_dependence,
clamp_mode=clamp_mode,
nerf_noise=nerf_noise,
white_back=white_back,
last_back=last_back,
return_aux_img=return_aux_img,
forward_points=forward_points,
camera_pos=camera_pos,
camera_lookup=camera_lookup,
)
return imgs, pitch_yaw
@MODEL_REGISTRY.register(name_prefix=__name__)
class GeneratorNerfINR_freeze_NeRF(Generator_Diffcam):
def load_nerf_ema(self, G_ema):
ret = self.nerf_net.load_state_dict(G_ema.nerf_net.state_dict())
ret = self.mapping_network_nerf.load_state_dict(G_ema.mapping_network_nerf.state_dict())
ret = self.aux_to_rbg.load_state_dict(G_ema.aux_to_rbg.state_dict())
ret = self.mapping_network_inr.load_state_dict(G_ema.mapping_network_inr.state_dict())
ret = self.nerf_rgb_mapping.load_state_dict(G_ema.nerf_rgb_mapping.state_dict())
pass
def mapping_network(self,
z_nerf,
z_inr):
style_dict = {}
with torch.no_grad():
style_dict.update(self.mapping_network_nerf(z_nerf))
style_dict.update(self.mapping_network_inr(z_inr))
style_dict['nerf_rgb'] = self.nerf_rgb_mapping(style_dict['nerf_rgb'])
return style_dict
def points_forward(self,
style_dict,
transformed_points,
transformed_ray_directions_expanded,
num_steps,
hierarchical_sample,
z_vals,
clamp_mode,
nerf_noise,
transformed_ray_origins,
transformed_ray_directions,
white_back,
last_back,
return_aux_img,
idx_grad=None,
):
"""
:param style_dict:
:param transformed_points: (b, n, s, 3)
:param transformed_ray_directions_expanded: (b, n, s, 3)
:param num_steps: sampled points along a ray
:param hierarchical_sample:
:param z_vals: (b, n, s, 1)
:param clamp_mode: 'relu'
:param nerf_noise:
:param transformed_ray_origins: (b, n, 3)
:param transformed_ray_directions: (b, n, 3)
:param white_back:
:param last_back:
:return:
"""
device = transformed_points.device
if idx_grad is not None:
transformed_points = comm_utils.gather_points(points=transformed_points, idx_grad=idx_grad)
transformed_ray_directions_expanded = comm_utils.gather_points(
points=transformed_ray_directions_expanded, idx_grad=idx_grad)
z_vals = comm_utils.gather_points(points=z_vals, idx_grad=idx_grad)
transformed_ray_origins = comm_utils.gather_points(points=transformed_ray_origins, idx_grad=idx_grad)
transformed_ray_directions = comm_utils.gather_points(points=transformed_ray_directions, idx_grad=idx_grad)
transformed_points = rearrange(transformed_points, "b n s c -> b (n s) c")
transformed_ray_directions_expanded = rearrange(transformed_ray_directions_expanded, "b n s c -> b (n s) c")
# Model prediction on course points
with torch.no_grad():
coarse_output = self.nerf_net(
x=transformed_points, # (b, n x s, 3)
style_dict=style_dict,
ray_directions=transformed_ray_directions_expanded,
)
coarse_output = rearrange(coarse_output, "b (n s) rgb_sigma -> b n s rgb_sigma", s=num_steps)
# Re-sample fine points alont camera rays, as described in NeRF
if hierarchical_sample:
fine_points, fine_z_vals = self.get_fine_points_and_direction(
coarse_output=coarse_output,
z_vals=z_vals,
dim_rgb=self.nerf_net.rgb_dim,
clamp_mode=clamp_mode,
nerf_noise=nerf_noise,
num_steps=num_steps,
transformed_ray_origins=transformed_ray_origins,
transformed_ray_directions=transformed_ray_directions
)
# Model prediction on re-sampled find points
with torch.no_grad():
fine_output = self.nerf_net(
x=fine_points, # (b, n x s, 3)
style_dict=style_dict,
ray_directions=transformed_ray_directions_expanded, # (b, n x s, 3)
)
fine_output = rearrange(fine_output, "b (n s) rgb_sigma -> b n s rgb_sigma", s=num_steps)
# Combine course and fine points
all_outputs = torch.cat([fine_output, coarse_output], dim=-2) # (b, n, s, dim_rgb_sigma)
all_z_vals = torch.cat([fine_z_vals, z_vals], dim=-2) # (b, n, s, 1)
_, indices = torch.sort(all_z_vals, dim=-2) # (b, n, s, 1)
all_z_vals = torch.gather(all_z_vals, -2, indices) # (b, n, s, 1)
# (b, n, s, dim_rgb_sigma)
all_outputs = torch.gather(all_outputs, -2, indices.expand(-1, -1, -1, all_outputs.shape[-1]))
else:
all_outputs = coarse_output
all_z_vals = z_vals
# Create images with NeRF
pixels_fea, depth, weights = pigan_utils.fancy_integration(
rgb_sigma=all_outputs,
z_vals=all_z_vals,
device=device,
dim_rgb=self.nerf_net.rgb_dim,
white_back=white_back,
last_back=last_back,
clamp_mode=clamp_mode,
noise_std=nerf_noise)
inr_img = self.inr_net(pixels_fea, style_dict)
if return_aux_img:
# aux rgb_branch
with torch.no_grad():
aux_img = self.aux_to_rbg(pixels_fea)
else:
aux_img = None
return inr_img, aux_img
| 32.668826 | 120 | 0.571721 | from itertools import chain
import math
import logging
import collections
from collections import OrderedDict
import tqdm
import random
import time
from einops import rearrange, repeat
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda.amp import autocast
from tl2.proj.fvcore import MODEL_REGISTRY, build_model
from tl2.launch.launch_utils import global_cfg
from tl2.proj.pytorch.pytorch_hook import VerboseModel
from tl2.proj.pytorch import torch_utils
from tl2.proj.pytorch import torch_utils, init_func
from tl2 import tl2_utils
from tl2.proj.pytorch.examples.nerf import cam_params
from tl2.proj.pytorch.examples.nerf import volume_rendering
from tl2.proj.pytorch.examples.networks import nerf_net
from tl2.proj.pytorch.examples.networks import multi_head_mapping
from tl2.proj.pytorch.examples.networks import cips_net
from exp.pigan import pigan_utils
from exp.dev.nerf_inr.models.generator_nerf_inr import INRNetwork
from exp.dev.nerf_inr.models.generator_nerf_inr import GeneratorNerfINR as GeneratorNerfINR_base
from exp.comm import comm_utils
from exp.comm.models import nerf_network
from exp.comm.models import inr_network
from exp.comm.models import film_layer
from exp.comm.models import mod_conv_fc
class SkipLayer(nn.Module):
def __init__(self, ):
super(SkipLayer, self).__init__()
def forward(self, x0, x1):
out = (x0 + x1)
return out
class SinAct(nn.Module):
def __init__(self, ):
super(SinAct, self).__init__()
def forward(self, x):
return torch.sin(x)
class LinearSinAct(nn.Module):
def __init__(self,
in_features,
out_features):
super(LinearSinAct, self).__init__()
self.linear = nn.Linear(in_features=in_features, out_features=out_features)
self.sin = SinAct()
pass
def forward(self, x, *args, **kwargs):
x = self.linear(x)
x = self.sin(x)
return x
class FiLMLayer(nn.Module):
def __init__(self,
in_dim,
out_dim,
style_dim,
use_style_fc=True,
which_linear=nn.Linear,
**kwargs):
super(FiLMLayer, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.style_dim = style_dim
self.use_style_fc = use_style_fc
self.linear = which_linear(in_dim, out_dim)
self.gain_scale = nn.Identity()
if use_style_fc:
self.gain_fc = which_linear(style_dim, out_dim)
self.bias_fc = which_linear(style_dim, out_dim)
else:
self.style_dim = out_dim * 2
self.sin = SinAct()
self.lrelu = nn.LeakyReLU(0.2, inplace=True)
pass
def forward(self,
x,
style):
if self.use_style_fc:
gain = self.gain_fc(style)
gain = self.gain_scale(gain)
bias = self.bias_fc(style)
else:
style = rearrange(style, "b (n c) -> b n c", n=2)
gain, bias = style.unbind(dim=1)
gain = self.gain_scale(gain)
if x.dim() == 3:
gain = rearrange(gain, "b c -> b 1 c")
bias = rearrange(bias, "b c -> b 1 c")
elif x.dim() == 2:
pass
else:
assert 0
x = self.linear(x)
x = x * torch.rsqrt(torch.mean(x ** 2, dim=-1, keepdim=True) + 1e-8)
out = self.lrelu((gain + 1.) * x + bias)
return out
def __repr__(self):
s = f'{self.__class__.__name__}(' \
f'in_dim={self.in_dim}, ' \
f'out_dim={self.out_dim}, ' \
f'style_dim={self.style_dim}, ' \
f'use_style_fc={self.use_style_fc}, ' \
f')'
return s
class INRNetwork_Skip(nn.Module):
def __repr__(self): return f"{self.__class__.__name__}({self.repr})"
def __init__(self,
input_dim,
style_dim,
hidden_layers,
dim_scale=1,
rgb_dim=3,
device=None,
name_prefix='inr',
**kwargs):
super().__init__()
self.repr = f"input_dim={input_dim}, " \
f"style_dim={style_dim}, " \
f"hidden_layers={hidden_layers}, " \
f"dim_scale={dim_scale}, "
self.device = device
self.rgb_dim = rgb_dim
self.hidden_layers = hidden_layers
self.name_prefix = name_prefix
self.channels = {
0: int(512 * dim_scale),
1: int(512 * dim_scale),
2: int(512 * dim_scale),
3: int(512 * dim_scale),
4: int(512 * dim_scale),
5: int(128 * dim_scale),
6: int(64 * dim_scale),
7: int(32 * dim_scale),
8: int(16 * dim_scale),
}
self.style_dim_dict = {}
_out_dim = input_dim
self.network = nn.ModuleList()
self.to_rbgs = nn.ModuleList()
for i in range(hidden_layers):
_in_dim = _out_dim
_out_dim = self.channels[i]
_layer = film_layer.FiLMLayer(in_dim=_in_dim,
out_dim=_out_dim,
style_dim=style_dim)
self.network.append(_layer)
self.style_dim_dict[f'{name_prefix}_w{i}_0'] = _layer.style_dim
_layer = film_layer.FiLMLayer(in_dim=_out_dim,
out_dim=_out_dim,
style_dim=style_dim)
self.network.append(_layer)
self.style_dim_dict[f'{name_prefix}_w{i}_1'] = _layer.style_dim
to_rgb = inr_network.ToRGB(in_dim=_out_dim, dim_rgb=3)
self.to_rbgs.append(to_rgb)
self.tanh = nn.Sequential(
nn.Tanh()
)
torch_utils.print_number_params(
{
'network': self.network,
'to_rbgs': self.to_rbgs,
'inr_net': self
})
logging.getLogger('tl').info(self)
pass
def forward(self,
input,
style_dict,
**kwargs):
x = input
rgb = 0
for index in range(self.hidden_layers):
_layer = self.network[index * 2]
style = style_dict[f'{self.name_prefix}_w{index}_0']
if global_cfg.tl_debug:
VerboseModel.forward_verbose(_layer,
inputs_args=(x, style),
name_prefix=f"{self.name_prefix}.network.{index}.0.")
x = _layer(x, style)
_layer = self.network[index * 2 + 1]
style = style_dict[f'{self.name_prefix}_w{index}_1']
if global_cfg.tl_debug:
VerboseModel.forward_verbose(_layer,
inputs_args=(x, style),
name_prefix=f"{self.name_prefix}.network.{index}.1.")
x = _layer(x, style)
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.to_rbgs[index],
inputs_args=(x, rgb),
name_prefix=f'to_rgb.{index}')
rgb = self.to_rbgs[index](x, skip=rgb)
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.tanh,
inputs_args=(rgb, ),
name_prefix='tanh.')
out = self.tanh(rgb)
return out
class ModSinLayer(nn.Module):
def __repr__(self): return f"{self.__class__.__name__}({self.repr})"
def __init__(self,
in_dim,
use_style_fc=False,
style_dim=None,
which_linear=nn.Linear,
spectral_norm=False,
eps=1e-5,
freq=1,
phase=0,
**kwargs):
super(ModSinLayer, self).__init__()
self.repr = f"in_dim={in_dim}, use_style_fc={use_style_fc}, style_dim={style_dim}, " \
f"freq={freq}, phase={phase}"
self.in_dim = in_dim
self.use_style_fc = use_style_fc
self.style_dim = style_dim
self.freq = freq
self.phase = phase
self.spectral_norm = spectral_norm
if use_style_fc:
self.gain_fc = which_linear(style_dim, in_dim)
self.bias_fc = which_linear(style_dim, in_dim)
if spectral_norm:
self.gain_fc = nn.utils.spectral_norm(self.gain_fc)
self.bias_fc = nn.utils.spectral_norm(self.bias_fc)
else:
self.style_dim = in_dim * 2
self.eps = eps
self.lrelu = nn.LeakyReLU(0.2, inplace=True)
pass
def forward(self,
x,
style):
assert style.shape[-1] == self.style_dim
if self.use_style_fc:
gain = self.gain_fc(style) + 1.
bias = self.bias_fc(style)
else:
style = rearrange(style, "b (n c) -> b n c", n=2)
gain, bias = style.unbind(dim=1)
gain = gain + 1.
if x.dim() == 3:
gain = rearrange(gain, "b c -> b 1 c")
bias = rearrange(bias, "b c -> b 1 c")
elif x.dim() == 2:
pass
else:
assert 0
x = x * torch.rsqrt(torch.mean(x ** 2, dim=-1, keepdim=True) + 1e-8)
x = x * gain + bias
out = self.lrelu(x)
return out
class ModSinLayer_NoBias(nn.Module):
def __repr__(self): return f"{self.__class__.__name__}({self.repr})"
def __init__(self,
in_dim,
use_style_fc=False,
style_dim=None,
which_linear=nn.Linear,
spectral_norm=False,
eps=1e-5,
freq=1,
phase=0,
**kwargs):
super(ModSinLayer_NoBias, self).__init__()
self.repr = f"in_dim={in_dim}, use_style_fc={use_style_fc}, style_dim={style_dim}, " \
f"freq={freq}, phase={phase}"
self.in_dim = in_dim
self.use_style_fc = use_style_fc
self.style_dim = style_dim
self.freq = freq
self.phase = phase
self.spectral_norm = spectral_norm
if use_style_fc:
self.gain_fc = which_linear(style_dim, in_dim)
if spectral_norm:
self.gain_fc = nn.utils.spectral_norm(self.gain_fc)
else:
self.style_dim = in_dim * 2
self.eps = eps
pass
def forward(self,
x,
style):
assert style.shape[-1] == self.style_dim
if self.use_style_fc:
gain = self.gain_fc(style) + 1.
else:
style = rearrange(style, "b (n c) -> b n c", n=2)
gain, bias = style.unbind(dim=1)
gain = gain + 1.
if x.dim() == 3:
gain = rearrange(gain, "b c -> b 1 c")
elif x.dim() == 2:
pass
else:
assert 0
x = torch.sin(self.freq * x + self.phase)
out = x * gain
return out
class SinBlock(nn.Module):
def __init__(self,
in_dim,
out_dim,
style_dim,
name_prefix,
):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.style_dim = style_dim
self.name_prefix = name_prefix
self.style_dim_dict = {}
f.mod1 = mod_conv_fc.SinStyleMod(in_channel=in_dim,
out_channel=out_dim,
style_dim=style_dim,
use_style_fc=True,
)
self.style_dim_dict[f'{name_prefix}_0'] = self.mod1.style_dim
self.act1 = nn.LeakyReLU(0.2, inplace=True)
f.mod2 = mod_conv_fc.SinStyleMod(in_channel=out_dim,
out_channel=out_dim,
style_dim=style_dim,
use_style_fc=True,
)
self.style_dim_dict[f'{name_prefix}_1'] = self.mod2.style_dim
self.act2 = nn.LeakyReLU(0.2, inplace=True)
self.skip = SkipLayer()
pass
def forward(self,
x,
style_dict,
skip=False):
x_orig = x
style = style_dict[f'{self.name_prefix}_0']
x = self.mod1(x, style)
x = self.act1(x)
style = style_dict[f'{self.name_prefix}_1']
x = self.mod2(x, style)
out = self.act2(x)
if skip and out.shape[-1] == x_orig.shape[-1]:
out = self.skip(out, x_orig)
return out
def __repr__(self):
repr = f"{self.__class__.__name__}(in_dim={self.in_dim}, " \
f"out_dim={self.out_dim}, " \
f"style_dim={self.style_dim})"
return repr
class ToRGB(nn.Module):
def __init__(self,
in_dim,
dim_rgb=3,
use_equal_fc=False):
super().__init__()
self.in_dim = in_dim
self.dim_rgb = dim_rgb
if use_equal_fc:
self.linear = mod_conv_fc.EqualLinear(in_dim, dim_rgb, scale=1.)
else:
self.linear = nn.Linear(in_dim, dim_rgb)
pass
def forward(self,
input,
skip=None):
out = self.linear(input)
if skip is not None:
out = out + skip
return out
@MODEL_REGISTRY.register(name_prefix=__name__)
class Generator_Diffcam(nn.Module):
def __repr__(self):
return tl2_utils.get_class_repr(self)
def __init__(self,
nerf_cfg,
mapping_shape_cfg,
mapping_app_cfg,
inr_cfg,
mapping_inr_cfg,
shape_block_end_index=None,
app_block_end_index=None,
inr_block_end_index=None,
device='cuda',
**kwargs):
super(Generator_Diffcam, self).__init__()
self.repr_str = tl2_utils.dict2string(dict_obj={
'nerf_cfg': nerf_cfg,
'mapping_shape_cfg': mapping_shape_cfg,
'mapping_app_cfg': mapping_app_cfg,
'inr_cfg': inr_cfg,
'mapping_inr_cfg': mapping_inr_cfg,
'shape_block_end_index': shape_block_end_index,
'app_block_end_index': app_block_end_index,
'inr_block_end_index': inr_block_end_index,
})
self.device = device
self.inr_block_end_index = inr_block_end_index
self.module_name_list = []
self.nerf_net = nerf_net.NeRFNetwork_SIREN_skip(
shape_block_end_index=shape_block_end_index,
app_block_end_index=app_block_end_index,
**nerf_cfg)
self.module_name_list.append('nerf_net')
self.mapping_shape = multi_head_mapping.MultiHeadMappingNetwork(**{
**mapping_shape_cfg,
'head_dim_dict': self.nerf_net.style_dim_dict_shape
})
self.module_name_list.append('mapping_shape')
self.mapping_app = multi_head_mapping.MultiHeadMappingNetwork(**{
**mapping_app_cfg,
'head_dim_dict': self.nerf_net.style_dim_dict_app
})
self.module_name_list.append('mapping_app')
_in_dim = nerf_cfg.app_net_cfg.out_dim
self.inr_net = cips_net.CIPSNet(**{
**inr_cfg,
"input_dim": _in_dim,
'add_out_layer': True,
})
self.module_name_list.append('inr_net')
self.mapping_inr = multi_head_mapping.MultiHeadMappingNetwork(**{
**mapping_inr_cfg,
'head_dim_dict': self.inr_net.style_dim_dict
})
self.module_name_list.append('mapping_inr')
self.aux_to_rbg = nn.Sequential(
nn.Linear(_in_dim, 3),
nn.Tanh()
)
self.aux_to_rbg.apply(nerf_network.frequency_init(25))
self.module_name_list.append('aux_to_rbg')
logger = logging.getLogger('tl')
models_dict = {}
for name in self.module_name_list:
models_dict[name] = getattr(self, name)
models_dict['G'] = self
torch_utils.print_number_params(models_dict=models_dict, logger=logger)
logger.info(self)
pass
def forward(self,
zs,
rays_o,
rays_d,
nerf_kwargs={},
psi=1,
return_aux_img=False,
grad_points=None,
forward_points=None,
**kwargs):
style_dict = self.mapping_network(**zs)
if psi < 1:
avg_styles = self.generate_avg_frequencies(device=self.device)
style_dict = self.get_truncated_freq_phase(
raw_style_dict=style_dict, avg_style_dict=avg_styles, raw_lambda=psi)
b, h, w, c = rays_o.shape
rays_o = rearrange(rays_o, "b h w c -> b (h w) c")
rays_d = rearrange(rays_d, "b h w c -> b (h w) c")
if grad_points is not None and grad_points < h * w:
imgs, ret_maps = self.part_grad_forward(
rays_o=rays_o,
rays_d=rays_d,
style_dict=style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img,
grad_points=grad_points)
else:
imgs, ret_maps = self.whole_grad_forward(
rays_o=rays_o,
rays_d=rays_d,
style_dict=style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img,
forward_points=forward_points)
imgs = rearrange(imgs, "b (h w) c -> b c h w", h=h, w=w)
ret_imgs = {}
for name, v_map in ret_maps.items():
if v_map.dim() == 3:
v_map = rearrange(v_map, "b (h w) c -> b c h w", h=h, w=w)
elif v_map.dim() == 2:
v_map = rearrange(v_map, "b (h w) -> b h w", h=h, w=w)
ret_imgs[name] = v_map
return imgs, ret_imgs
def get_rays_axis_angle(self,
R,
t,
fx,
fy,
H: int,
W: int,
N_rays: int = -1):
rays_o, rays_d, select_inds = cam_params.get_rays(
rot=R,
trans=t,
focal_x=fx,
focal_y=fy,
H=H,
W=W,
N_rays=N_rays,
flatten=False)
return rays_o, rays_d, select_inds
def get_batch_style_dict(self, b, style_dict):
ret_style_dict = {}
for name, style in style_dict.items():
ret_style_dict[name] = style[[b]]
return ret_style_dict
def whole_grad_forward(self,
rays_o,
rays_d,
style_dict,
nerf_kwargs,
return_aux_img=True,
forward_points=None,
**kwargs):
if forward_points is not None and forward_points < rays_o.shape[1]:
with torch.no_grad():
batch_size = rays_o.shape[0]
num_points = rays_o.shape[1]
near = nerf_kwargs['near']
far = nerf_kwargs['far']
N_samples = nerf_kwargs['N_samples']
perturb = self.training
z_vals, points = volume_rendering.ray_sample_points(rays_o=rays_o,
rays_d=rays_d,
near=near,
far=far,
N_samples=N_samples,
perturb=perturb)
batch_image_ddict = collections.defaultdict(list)
for b in range(batch_size):
image_ddict = collections.defaultdict(list)
head = 0
while head < num_points:
tail = head + forward_points
cur_style_dict = self.get_batch_style_dict(b=b, style_dict=style_dict)
cur_inr_img, cur_ret_maps = self.points_forward(
rays_o=rays_o[[b], head:tail],
rays_d=rays_d[[b], head:tail],
points=points[[b], head:tail],
z_vals=z_vals[[b], head:tail],
style_dict=cur_style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img)
image_ddict['inr_img'].append(cur_inr_img)
for k, v in cur_ret_maps.items():
image_ddict[k].append(v)
head += forward_points
for k, v in image_ddict.items():
one_image = torch.cat(v, dim=1)
batch_image_ddict[k].append(one_image)
ret_maps = {}
for k, v in batch_image_ddict.items():
ret_maps[k] = torch.cat(v, dim=0)
imgs = ret_maps.pop('inr_img')
else:
near = nerf_kwargs['near']
far = nerf_kwargs['far']
N_samples = nerf_kwargs['N_samples']
perturb = self.training
z_vals, points = volume_rendering.ray_sample_points(rays_o=rays_o,
rays_d=rays_d,
near=near,
far=far,
N_samples=N_samples,
perturb=perturb)
imgs, ret_maps = self.points_forward(
rays_o=rays_o,
rays_d=rays_d,
points=points,
z_vals=z_vals,
style_dict=style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img)
return imgs, ret_maps
def part_grad_forward(self,
rays_o,
rays_d,
style_dict,
nerf_kwargs,
return_aux_img,
grad_points):
near = nerf_kwargs['near']
far = nerf_kwargs['far']
N_samples = nerf_kwargs['N_samples']
perturb = self.training
z_vals, points = volume_rendering.ray_sample_points(rays_o=rays_o,
rays_d=rays_d,
near=near,
far=far,
N_samples=N_samples,
perturb=perturb)
batch_size = rays_o.shape[0]
num_points = rays_o.shape[1]
device = self.device
assert num_points > grad_points
idx_grad, idx_no_grad = torch_utils.batch_random_split_indices(bs=batch_size,
num_points=num_points,
grad_points=grad_points,
device=device)
inr_img_grad, ret_maps_grad = self.points_forward(
rays_o=rays_o,
rays_d=rays_d,
points=points,
z_vals=z_vals,
style_dict=style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img,
idx_grad=idx_grad)
with torch.no_grad():
inr_img_no_grad, ret_maps_no_grad = self.points_forward(
rays_o=rays_o,
rays_d=rays_d,
points=points,
z_vals=z_vals,
style_dict=style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img,
idx_grad=idx_no_grad)
imgs = comm_utils.batch_scatter_points(idx_grad=idx_grad,
points_grad=inr_img_grad,
idx_no_grad=idx_no_grad,
points_no_grad=inr_img_no_grad,
num_points=num_points)
ret_maps = {}
for k in ret_maps_grad.keys():
comp_map = comm_utils.batch_scatter_points(idx_grad=idx_grad,
points_grad=ret_maps_grad[k],
idx_no_grad=idx_no_grad,
points_no_grad=ret_maps_no_grad[k],
num_points=num_points)
ret_maps[k] = comp_map
return imgs, ret_maps
def points_forward(self,
rays_o,
rays_d,
points,
z_vals,
style_dict,
nerf_kwargs,
return_aux_img,
idx_grad=None,
**kwargs):
device = points.device
viewdirs = volume_rendering.get_viewdirs(rays_d=rays_d)
N_samples = nerf_kwargs['N_samples']
if idx_grad is not None:
rays_o = comm_utils.batch_gather_points(points=rays_o, idx_grad=idx_grad)
rays_d = comm_utils.batch_gather_points(points=rays_d, idx_grad=idx_grad)
points = comm_utils.batch_gather_points(points=points, idx_grad=idx_grad)
z_vals = comm_utils.batch_gather_points(points=z_vals, idx_grad=idx_grad)
points = rearrange(points, "b Nrays Nsamples c -> b (Nrays Nsamples) c")
coarse_viewdirs = repeat(viewdirs, "b Nrays c -> b (Nrays Nsamples) c", Nsamples=N_samples)
coarse_output = self.nerf_net(
x=points,
ray_directions=coarse_viewdirs,
style_dict=style_dict)
coarse_output = rearrange(
coarse_output, "b (Nrays Nsamples) rgb_sigma -> b Nrays Nsamples rgb_sigma", Nsamples=N_samples)
if nerf_kwargs['N_importance'] > 0:
with torch.no_grad():
raw_sigma = coarse_output[..., -1]
perturb = self.training
fine_z_vals, fine_points = volume_rendering.get_fine_points(
z_vals=z_vals,
rays_o=rays_o,
rays_d=rays_d,
raw_sigma=raw_sigma,
N_importance=nerf_kwargs['N_importance'],
perturb=perturb,
raw_noise_std=nerf_kwargs['raw_noise_std'],
eps=nerf_kwargs['eps'])
fine_points = rearrange(fine_points, "b Nrays Nsamples c -> b (Nrays Nsamples) c")
fine_viewdirs = repeat(viewdirs, "b Nrays c -> b (Nrays Nsamples) c", Nsamples=nerf_kwargs['N_importance'])
fine_output = self.nerf_net(
x=fine_points,
ray_directions=fine_viewdirs,
style_dict=style_dict)
fine_output = rearrange(
fine_output, "b (Nrays Nsamples) rgb_sigma -> b Nrays Nsamples rgb_sigma", Nsamples=nerf_kwargs['N_importance'])
DIM_SAMPLES = 2
all_z_vals = torch.cat([fine_z_vals, z_vals], dim=DIM_SAMPLES)
_, indices = torch.sort(all_z_vals, dim=DIM_SAMPLES)
all_z_vals = torch.gather(all_z_vals, DIM_SAMPLES, indices)
all_outputs = torch.cat([fine_output, coarse_output], dim=DIM_SAMPLES)
view_shape = [*indices.shape, *(len(all_outputs.shape) - len(indices.shape)) * [1]]
all_outputs = torch.gather(all_outputs, DIM_SAMPLES, indices.view(view_shape).expand_as(all_outputs))
else:
all_outputs = coarse_output
all_z_vals = z_vals
all_raw_rgb = all_outputs[..., :-1]
all_raw_sigma = all_outputs[..., -1]
pixels_fea, ret_maps = volume_rendering.ray_integration(raw_rgb=all_raw_rgb,
raw_sigma=all_raw_sigma,
z_vals=all_z_vals,
rays_d=rays_d,
raw_noise_std=nerf_kwargs['raw_noise_std'],
eps=nerf_kwargs['eps'])
inr_img = self.inr_net(pixels_fea, style_dict, block_end_index=self.inr_block_end_index)
if return_aux_img:
aux_img = self.aux_to_rbg(pixels_fea)
ret_maps['aux_img'] = aux_img
return inr_img, ret_maps
def z_sampler(self,
shape,
device,
dist='gaussian'):
if dist == 'gaussian':
z = torch.randn(shape, device=device)
elif dist == 'uniform':
z = torch.rand(shape, device=device) * 2 - 1
return z
def get_zs(self,
b,
batch_split=1):
z_shape = self.z_sampler(shape=(b, self.mapping_shape.z_dim), device=self.device)
z_app = self.z_sampler(shape=(b, self.mapping_app.z_dim), device=self.device)
z_inr = self.z_sampler(shape=(b, self.mapping_inr.z_dim), device=self.device)
if batch_split > 1:
zs_list = []
z_shape_list = z_shape.split(b // batch_split)
z_app_list = z_app.split(b // batch_split)
z_inr_list = z_inr.split(b // batch_split)
for z_shape_, z_app_, z_inr_ in zip(z_shape_list, z_app_list, z_inr_list):
zs_ = {
'z_shape': z_shape_,
'z_app': z_app_,
'z_inr': z_inr_,
}
zs_list.append(zs_)
return zs_list
else:
zs = {
'z_shape': z_shape,
'z_app': z_app,
'z_inr': z_inr,
}
return zs
def mapping_network(self,
z_shape,
z_app,
z_inr):
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.mapping_shape,
inputs_args=(z_shape,),
submodels=['base_net'],
name_prefix='mapping_shape.')
VerboseModel.forward_verbose(self.mapping_app,
inputs_args=(z_app,),
submodels=['base_net'],
name_prefix='mapping_app.')
VerboseModel.forward_verbose(self.mapping_inr,
inputs_args=(z_inr,),
submodels=['base_net', ],
input_padding=50,
name_prefix='mapping_inr.')
style_dict = {}
style_dict.update(self.mapping_shape(z_shape))
style_dict.update(self.mapping_app(z_app))
style_dict.update(self.mapping_inr(z_inr))
return style_dict
def get_truncated_freq_phase(self,
raw_style_dict,
avg_style_dict,
raw_lambda):
truncated_style_dict = {}
for name, avg_style in avg_style_dict.items():
raw_style = raw_style_dict[name]
truncated_style = avg_style + raw_lambda * (raw_style - avg_style)
truncated_style_dict[name] = truncated_style
return truncated_style_dict
def generate_avg_frequencies(self,
num_samples=10000,
device='cuda'):
zs = self.get_zs(num_samples)
with torch.no_grad():
style_dict = self.mapping_network(**zs)
avg_styles = {}
for name, style in style_dict.items():
avg_styles[name] = style.mean(0, keepdim=True)
return avg_styles
def staged_forward(self, *args, **kwargs):
raise NotImplementedError
def set_device(self, device):
pass
def forward_camera_pos_and_lookup(self,
zs,
img_size,
fov,
ray_start,
ray_end,
num_steps,
h_stddev,
v_stddev,
h_mean,
v_mean,
hierarchical_sample,
camera_pos,
camera_lookup,
psi=1,
sample_dist=None,
lock_view_dependence=False,
clamp_mode='relu',
nerf_noise=0.,
white_back=False,
last_back=False,
return_aux_img=False,
grad_points=None,
forward_points=None,
**kwargs):
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.mapping_network_nerf,
inputs_args=(zs['z_nerf'],),
submodels=['base_net'],
name_prefix='mapping_nerf.')
VerboseModel.forward_verbose(self.mapping_network_inr,
inputs_args=(zs['z_inr'],),
submodels=['base_net', ],
input_padding=50,
name_prefix='mapping_inr.')
style_dict = self.mapping_network(**zs)
if psi < 1:
avg_styles = self.generate_avg_frequencies(device=self.device)
style_dict = self.get_truncated_freq_phase(
raw_style_dict=style_dict, avg_style_dict=avg_styles, raw_lambda=psi)
if grad_points is not None and grad_points < img_size ** 2:
imgs, pitch_yaw = self.part_grad_forward(
style_dict=style_dict,
img_size=img_size,
fov=fov,
ray_start=ray_start,
ray_end=ray_end,
num_steps=num_steps,
h_stddev=h_stddev,
v_stddev=v_stddev,
h_mean=h_mean,
v_mean=v_mean,
hierarchical_sample=hierarchical_sample,
sample_dist=sample_dist,
lock_view_dependence=lock_view_dependence,
clamp_mode=clamp_mode,
nerf_noise=nerf_noise,
white_back=white_back,
last_back=last_back,
return_aux_img=return_aux_img,
grad_points=grad_points,
camera_pos=camera_pos,
camera_lookup=camera_lookup,
)
return imgs, pitch_yaw
else:
imgs, pitch_yaw = self.whole_grad_forward(
style_dict=style_dict,
img_size=img_size,
fov=fov,
ray_start=ray_start,
ray_end=ray_end,
num_steps=num_steps,
h_stddev=h_stddev,
v_stddev=v_stddev,
h_mean=h_mean,
v_mean=v_mean,
hierarchical_sample=hierarchical_sample,
sample_dist=sample_dist,
lock_view_dependence=lock_view_dependence,
clamp_mode=clamp_mode,
nerf_noise=nerf_noise,
white_back=white_back,
last_back=last_back,
return_aux_img=return_aux_img,
forward_points=forward_points,
camera_pos=camera_pos,
camera_lookup=camera_lookup,
)
return imgs, pitch_yaw
@MODEL_REGISTRY.register(name_prefix=__name__)
class GeneratorNerfINR_freeze_NeRF(Generator_Diffcam):
def load_nerf_ema(self, G_ema):
ret = self.nerf_net.load_state_dict(G_ema.nerf_net.state_dict())
ret = self.mapping_network_nerf.load_state_dict(G_ema.mapping_network_nerf.state_dict())
ret = self.aux_to_rbg.load_state_dict(G_ema.aux_to_rbg.state_dict())
ret = self.mapping_network_inr.load_state_dict(G_ema.mapping_network_inr.state_dict())
ret = self.nerf_rgb_mapping.load_state_dict(G_ema.nerf_rgb_mapping.state_dict())
pass
def mapping_network(self,
z_nerf,
z_inr):
style_dict = {}
with torch.no_grad():
style_dict.update(self.mapping_network_nerf(z_nerf))
style_dict.update(self.mapping_network_inr(z_inr))
style_dict['nerf_rgb'] = self.nerf_rgb_mapping(style_dict['nerf_rgb'])
return style_dict
def points_forward(self,
style_dict,
transformed_points,
transformed_ray_directions_expanded,
num_steps,
hierarchical_sample,
z_vals,
clamp_mode,
nerf_noise,
transformed_ray_origins,
transformed_ray_directions,
white_back,
last_back,
return_aux_img,
idx_grad=None,
):
device = transformed_points.device
if idx_grad is not None:
transformed_points = comm_utils.gather_points(points=transformed_points, idx_grad=idx_grad)
transformed_ray_directions_expanded = comm_utils.gather_points(
points=transformed_ray_directions_expanded, idx_grad=idx_grad)
z_vals = comm_utils.gather_points(points=z_vals, idx_grad=idx_grad)
transformed_ray_origins = comm_utils.gather_points(points=transformed_ray_origins, idx_grad=idx_grad)
transformed_ray_directions = comm_utils.gather_points(points=transformed_ray_directions, idx_grad=idx_grad)
transformed_points = rearrange(transformed_points, "b n s c -> b (n s) c")
transformed_ray_directions_expanded = rearrange(transformed_ray_directions_expanded, "b n s c -> b (n s) c")
with torch.no_grad():
coarse_output = self.nerf_net(
x=transformed_points,
style_dict=style_dict,
ray_directions=transformed_ray_directions_expanded,
)
coarse_output = rearrange(coarse_output, "b (n s) rgb_sigma -> b n s rgb_sigma", s=num_steps)
if hierarchical_sample:
fine_points, fine_z_vals = self.get_fine_points_and_direction(
coarse_output=coarse_output,
z_vals=z_vals,
dim_rgb=self.nerf_net.rgb_dim,
clamp_mode=clamp_mode,
nerf_noise=nerf_noise,
num_steps=num_steps,
transformed_ray_origins=transformed_ray_origins,
transformed_ray_directions=transformed_ray_directions
)
with torch.no_grad():
fine_output = self.nerf_net(
x=fine_points,
style_dict=style_dict,
ray_directions=transformed_ray_directions_expanded,
)
fine_output = rearrange(fine_output, "b (n s) rgb_sigma -> b n s rgb_sigma", s=num_steps)
all_outputs = torch.cat([fine_output, coarse_output], dim=-2)
all_z_vals = torch.cat([fine_z_vals, z_vals], dim=-2)
_, indices = torch.sort(all_z_vals, dim=-2)
all_z_vals = torch.gather(all_z_vals, -2, indices)
all_outputs = torch.gather(all_outputs, -2, indices.expand(-1, -1, -1, all_outputs.shape[-1]))
else:
all_outputs = coarse_output
all_z_vals = z_vals
pixels_fea, depth, weights = pigan_utils.fancy_integration(
rgb_sigma=all_outputs,
z_vals=all_z_vals,
device=device,
dim_rgb=self.nerf_net.rgb_dim,
white_back=white_back,
last_back=last_back,
clamp_mode=clamp_mode,
noise_std=nerf_noise)
inr_img = self.inr_net(pixels_fea, style_dict)
if return_aux_img:
with torch.no_grad():
aux_img = self.aux_to_rbg(pixels_fea)
else:
aux_img = None
return inr_img, aux_img
| true | true |
f73176b9df2d9d3e6551836091e9a8f8bdc64a68 | 9,041 | py | Python | src/pyrobot/habitat/base.py | cihuang123/pyrobot | fe620097e31d11453b5ea7ac15e40f5f5721b29a | [
"MIT"
] | 2,150 | 2019-06-12T20:55:41.000Z | 2022-03-21T07:14:51.000Z | src/pyrobot/habitat/base.py | cihuang123/pyrobot | fe620097e31d11453b5ea7ac15e40f5f5721b29a | [
"MIT"
] | 124 | 2019-06-22T17:12:27.000Z | 2022-02-26T11:43:13.000Z | src/pyrobot/habitat/base.py | cihuang123/pyrobot | fe620097e31d11453b5ea7ac15e40f5f5721b29a | [
"MIT"
] | 329 | 2019-06-13T03:03:54.000Z | 2022-03-30T07:04:55.000Z | import numpy as np
import math
import pyrobot.utils.util as prutil
import rospy
import habitat_sim.agent as habAgent
import habitat_sim.utils as habUtils
from habitat_sim.agent.controls import ActuationSpec
import habitat_sim.errors
import quaternion
from tf.transformations import euler_from_quaternion, euler_from_matrix
class LoCoBotBase(object):
"""docstring for SimpleBase"""
def __init__(self, configs, simulator):
self.configs = configs
self.sim = simulator.sim
self.agent = self.sim.get_agent(self.configs.COMMON.SIMULATOR.DEFAULT_AGENT_ID)
self.transform = None
self.init_state = self.get_full_state()
def execute_action(self, action_name, actuation):
# actions = "turn_right" or "turn_left" or "move_forward"
# returns a bool showing if collided or not
return self._act(action_name, actuation)
def get_full_state(self):
# Returns habitat_sim.agent.AgentState
return self.agent.get_state()
def _rot_matrix(self, habitat_quat):
quat_list = [habitat_quat.x, habitat_quat.y, habitat_quat.z, habitat_quat.w]
return prutil.quat_to_rot_mat(quat_list)
def get_state(self, state_type="odom"):
# Returns (x, y, yaw)
assert state_type == "odom", "Error: Only Odom state is available"
cur_state = self.get_full_state()
init_rotation = self._rot_matrix(self.init_state.rotation)
# true position here refers to the relative position from
# where `self.init_state` is treated as origin
true_position = cur_state.position - self.init_state.position
true_position = np.matmul(init_rotation.transpose(), true_position, dtype=np.float64)
cur_rotation = self._rot_matrix(cur_state.rotation)
cur_rotation = np.matmul(init_rotation.transpose(), cur_rotation, dtype=np.float64)
(r, pitch, yaw) = euler_from_matrix(cur_rotation, axes="sxzy")
# Habitat has y perpendicular to map where as ROS has z perpendicular
# to the map. Where as x is same.
# Here ROS_X = -1 * habitat_z and ROS_Y = -1*habitat_x
return (-1 * true_position[2], -1 * true_position[0], yaw)
def stop(self):
raise NotImplementedError("Veclocity control is not supported in Habitat-Sim!!")
def set_vel(self, fwd_speed, turn_speed, exe_time=1):
raise NotImplementedError("Veclocity control is not supported in Habitat-Sim!!")
def go_to_relative(
self, xyt_position, use_map=False, close_loop=False, smooth=False
):
"""
Moves the robot to the robot to given
goal state relative to its initial pose.
:param xyt_position: The relative goal state of the form (x,y,t)
:param use_map: When set to "True", ensures that controler is
using only free space on the map to move the robot.
:param close_loop: When set to "True", ensures that controler is
operating in open loop by
taking account of odometry.
:param smooth: When set to "True", ensures that the motion
leading to the goal is a smooth one.
:type xyt_position: list
:type use_map: bool
:type close_loop: bool
:type smooth: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
try:
if use_map:
raise NotImplementedError(
"Using map feature is not yet supported for Habitat-Sim"
)
if close_loop:
raise NotImplementedError(
"Closed-loop postion control is not supported in Habitat-Sim!"
)
if smooth:
raise NotImplementedError(
"Smooth position control feature is not yet for Habitat-Sim"
)
except Exception as error:
print(error)
return False
(cur_x, cur_y, cur_yaw) = self.get_state()
abs_yaw = cur_yaw + xyt_position[2]
return self._go_to_relative_pose(xyt_position[0], xyt_position[1], abs_yaw)
def go_to_absolute(
self, xyt_position, use_map=False, close_loop=False, smooth=False
):
"""
Moves the robot to the robot to given goal state in the world frame.
:param xyt_position: The goal state of the form (x,y,t)
in the world (map) frame.
:param use_map: When set to "True", ensures that controler is using
only free space on the map to move the robot.
:param close_loop: When set to "True", ensures that controler is
operating in open loop by
taking account of odometry.
:param smooth: When set to "True", ensures that the motion
leading to the goal is a smooth one.
:type xyt_position: list
:type use_map: bool
:type close_loop: bool
:type smooth: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
try:
if use_map:
raise NotImplementedError(
"Using map feature is not yet supported for Habitat-Sim"
)
if close_loop:
raise NotImplementedError(
"Closed-loop postion control is not supported in Habitat-Sim!"
)
if smooth:
raise NotImplementedError(
"Smooth position control feature is not yet for Habitat-Sim"
)
except Exception as error:
print(error)
return False
(cur_x, cur_y, cur_yaw) = self.get_state()
rel_X = xyt_position[0] - cur_x
rel_Y = xyt_position[1] - cur_y
abs_yaw = xyt_position[2]
# convert rel_X & rel_Y from global frame to current frame
R = np.array([[np.cos(cur_yaw), np.sin(cur_yaw)],
[-np.sin(cur_yaw), np.cos(cur_yaw)]])
rel_x, rel_y = np.matmul(R, np.array([rel_X, rel_Y]).reshape(-1,1))
return self._go_to_relative_pose(rel_x[0], rel_y[0], abs_yaw)
def _act(self, action_name, actuation):
"""Take the action specified by action_id
:param action_id: ID of the action. Retreives the action from
`agent_config.action_space <AgentConfiguration.action_space>`
:return: Whether or not the action taken resulted in a collision
"""
did_collide = False
act_spec = ActuationSpec(actuation)
did_collide = self.agent.controls.action(
self.agent.scene_node, action_name, act_spec, apply_filter=True
)
return did_collide
def _go_to_relative_pose(self, rel_x, rel_y, abs_yaw):
# clip relative movements beyond 10 micrometer precision
# this is done to improve determinism, as habitat-sim doesn't
# seem to precisely move the robot beyond sub milimeter precision anyways
if abs(rel_x) < 1e-5:
rel_x = 0
if abs(rel_y) < 1e-5:
rel_y = 0
if math.sqrt(rel_x ** 2 + rel_y ** 2) > 0.0:
# rotate to point to (x, y) point
action_name = "turn_left"
if rel_y < 0.0:
action_name = "turn_right"
v1 = np.asarray([1, 0], dtype=np.float64)
v2 = np.asarray([rel_x, rel_y], dtype=np.float64)
cosine_angle = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
angle = np.arccos(cosine_angle)
did_collide = self._act(action_name, math.degrees(angle))
if did_collide:
print("Error: Collision accured while 1st rotating!")
return False
# move to (x,y) point
did_collide = self._act("move_forward", math.sqrt(rel_x ** 2 + rel_y ** 2))
if did_collide:
print("Error: Collision accured while moving straight!")
return False
# rotate to match the final yaw!
(cur_x, cur_y, cur_yaw) = self.get_state()
rel_yaw = abs_yaw - cur_yaw
# clip to micro-degree precision to preserve determinism
if abs(rel_yaw) < 1e-4:
rel_yaw = 0
action_name = "turn_left"
if rel_yaw < 0.0:
action_name = "turn_right"
rel_yaw *= -1
did_collide = self._act(action_name, math.degrees(rel_yaw))
if did_collide:
print("Error: Collision accured while rotating!")
return False
return True
def track_trajectory(self, states, controls, close_loop):
"""
State trajectory that the robot should track.
:param states: sequence of (x,y,t) states that the robot should track.
:param controls: optionally specify control sequence as well.
:param close_loop: whether to close loop on the
computed control sequence or not.
:type states: list
:type controls: list
:type close_loop: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
raise NotImplementedError
| 36.603239 | 93 | 0.623825 | import numpy as np
import math
import pyrobot.utils.util as prutil
import rospy
import habitat_sim.agent as habAgent
import habitat_sim.utils as habUtils
from habitat_sim.agent.controls import ActuationSpec
import habitat_sim.errors
import quaternion
from tf.transformations import euler_from_quaternion, euler_from_matrix
class LoCoBotBase(object):
def __init__(self, configs, simulator):
self.configs = configs
self.sim = simulator.sim
self.agent = self.sim.get_agent(self.configs.COMMON.SIMULATOR.DEFAULT_AGENT_ID)
self.transform = None
self.init_state = self.get_full_state()
def execute_action(self, action_name, actuation):
return self._act(action_name, actuation)
def get_full_state(self):
return self.agent.get_state()
def _rot_matrix(self, habitat_quat):
quat_list = [habitat_quat.x, habitat_quat.y, habitat_quat.z, habitat_quat.w]
return prutil.quat_to_rot_mat(quat_list)
def get_state(self, state_type="odom"):
assert state_type == "odom", "Error: Only Odom state is available"
cur_state = self.get_full_state()
init_rotation = self._rot_matrix(self.init_state.rotation)
true_position = cur_state.position - self.init_state.position
true_position = np.matmul(init_rotation.transpose(), true_position, dtype=np.float64)
cur_rotation = self._rot_matrix(cur_state.rotation)
cur_rotation = np.matmul(init_rotation.transpose(), cur_rotation, dtype=np.float64)
(r, pitch, yaw) = euler_from_matrix(cur_rotation, axes="sxzy")
return (-1 * true_position[2], -1 * true_position[0], yaw)
def stop(self):
raise NotImplementedError("Veclocity control is not supported in Habitat-Sim!!")
def set_vel(self, fwd_speed, turn_speed, exe_time=1):
raise NotImplementedError("Veclocity control is not supported in Habitat-Sim!!")
def go_to_relative(
self, xyt_position, use_map=False, close_loop=False, smooth=False
):
try:
if use_map:
raise NotImplementedError(
"Using map feature is not yet supported for Habitat-Sim"
)
if close_loop:
raise NotImplementedError(
"Closed-loop postion control is not supported in Habitat-Sim!"
)
if smooth:
raise NotImplementedError(
"Smooth position control feature is not yet for Habitat-Sim"
)
except Exception as error:
print(error)
return False
(cur_x, cur_y, cur_yaw) = self.get_state()
abs_yaw = cur_yaw + xyt_position[2]
return self._go_to_relative_pose(xyt_position[0], xyt_position[1], abs_yaw)
def go_to_absolute(
self, xyt_position, use_map=False, close_loop=False, smooth=False
):
try:
if use_map:
raise NotImplementedError(
"Using map feature is not yet supported for Habitat-Sim"
)
if close_loop:
raise NotImplementedError(
"Closed-loop postion control is not supported in Habitat-Sim!"
)
if smooth:
raise NotImplementedError(
"Smooth position control feature is not yet for Habitat-Sim"
)
except Exception as error:
print(error)
return False
(cur_x, cur_y, cur_yaw) = self.get_state()
rel_X = xyt_position[0] - cur_x
rel_Y = xyt_position[1] - cur_y
abs_yaw = xyt_position[2]
R = np.array([[np.cos(cur_yaw), np.sin(cur_yaw)],
[-np.sin(cur_yaw), np.cos(cur_yaw)]])
rel_x, rel_y = np.matmul(R, np.array([rel_X, rel_Y]).reshape(-1,1))
return self._go_to_relative_pose(rel_x[0], rel_y[0], abs_yaw)
def _act(self, action_name, actuation):
did_collide = False
act_spec = ActuationSpec(actuation)
did_collide = self.agent.controls.action(
self.agent.scene_node, action_name, act_spec, apply_filter=True
)
return did_collide
def _go_to_relative_pose(self, rel_x, rel_y, abs_yaw):
# seem to precisely move the robot beyond sub milimeter precision anyways
if abs(rel_x) < 1e-5:
rel_x = 0
if abs(rel_y) < 1e-5:
rel_y = 0
if math.sqrt(rel_x ** 2 + rel_y ** 2) > 0.0:
# rotate to point to (x, y) point
action_name = "turn_left"
if rel_y < 0.0:
action_name = "turn_right"
v1 = np.asarray([1, 0], dtype=np.float64)
v2 = np.asarray([rel_x, rel_y], dtype=np.float64)
cosine_angle = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
angle = np.arccos(cosine_angle)
did_collide = self._act(action_name, math.degrees(angle))
if did_collide:
print("Error: Collision accured while 1st rotating!")
return False
# move to (x,y) point
did_collide = self._act("move_forward", math.sqrt(rel_x ** 2 + rel_y ** 2))
if did_collide:
print("Error: Collision accured while moving straight!")
return False
# rotate to match the final yaw!
(cur_x, cur_y, cur_yaw) = self.get_state()
rel_yaw = abs_yaw - cur_yaw
# clip to micro-degree precision to preserve determinism
if abs(rel_yaw) < 1e-4:
rel_yaw = 0
action_name = "turn_left"
if rel_yaw < 0.0:
action_name = "turn_right"
rel_yaw *= -1
did_collide = self._act(action_name, math.degrees(rel_yaw))
if did_collide:
print("Error: Collision accured while rotating!")
return False
return True
def track_trajectory(self, states, controls, close_loop):
raise NotImplementedError
| true | true |
f73176e6fa8ce3c0edc651c2858ea483ba4fd4a1 | 5,318 | py | Python | fritzconnection/fritzhosts.py | deisi/fritzconnection | b5c14515e1c8e2652b06b6316a7f3913df942841 | [
"MIT"
] | 2 | 2016-11-14T18:58:56.000Z | 2021-03-12T10:15:03.000Z | fritzconnection/fritzhosts.py | deisi/fritzconnection | b5c14515e1c8e2652b06b6316a7f3913df942841 | [
"MIT"
] | 2 | 2015-12-09T20:12:08.000Z | 2016-11-02T15:03:19.000Z | fritzconnection/fritzhosts.py | deisi/fritzconnection | b5c14515e1c8e2652b06b6316a7f3913df942841 | [
"MIT"
] | 7 | 2016-10-02T18:37:20.000Z | 2021-09-14T21:29:28.000Z | # -*- coding: utf-8 -*-
__version__ = '0.4.6'
import argparse
from . import fritzconnection
SERVICE = 'Hosts'
# version-access:
def get_version():
return __version__
class FritzHosts(object):
def __init__(self,
fc=None,
address=fritzconnection.FRITZ_IP_ADDRESS,
port=fritzconnection.FRITZ_TCP_PORT,
user=fritzconnection.FRITZ_USERNAME,
password=''):
super(FritzHosts, self).__init__()
if fc is None:
fc = fritzconnection.FritzConnection(address, port, user, password)
self.fc = fc
def action(self, actionname, **kwargs):
return self.fc.call_action(SERVICE, actionname, **kwargs)
@property
def modelname(self):
return self.fc.modelname
@property
def host_numbers(self):
result = self.action('GetHostNumberOfEntries')
return result['NewHostNumberOfEntries']
def get_generic_host_entry(self, index):
result = self.action('GetGenericHostEntry', NewIndex=index)
return result
def get_specific_host_entry(self, mac_address):
result = self.action('GetSpecificHostEntry', NewMACAddress=mac_address)
return result
def get_hosts_info(self):
"""
Returns a list of dicts with information about the known hosts.
The dict-keys are: 'ip', 'name', 'mac', 'status'
"""
result = []
index = 0
while index < self.host_numbers:
host = self.get_generic_host_entry(index)
result.append({
'ip': host['NewIPAddress'],
'name': host['NewHostName'],
'mac': host['NewMACAddress'],
'status': host['NewActive']})
index += 1
return result
# ---------------------------------------------------------
# terminal-output:
# ---------------------------------------------------------
def _print_header(fh):
print('\nFritzHosts:')
print('{:<20}{}'.format('version:', get_version()))
print('{:<20}{}'.format('model:', fh.modelname))
print('{:<20}{}'.format('ip:', fh.fc.address))
def print_hosts(fh):
print('\nList of registered hosts:\n')
print('{:>3}: {:<15} {:<26} {:<17} {}\n'.format(
'n', 'ip', 'name', 'mac', 'status'))
hosts = fh.get_hosts_info()
for index, host in enumerate(hosts):
if host['status'] == '1':
status = 'active'
else:
status = '-'
print('{:>3}: {:<15} {:<26} {:<17} {}'.format(
index,
host['ip'],
host['name'],
host['mac'],
status,
)
)
print('\n')
def _print_detail(fh, detail):
mac_address = detail[0]
print('\n{:<23}{}\n'.format('Details for host:', mac_address))
info = fh.get_specific_host_entry(mac_address)
for key, value in info.items():
print('{:<23}: {}'.format(key, value))
print('\n')
def _print_nums(fh):
print('{:<20}{}\n'.format('Number of hosts:', fh.host_numbers))
# ---------------------------------------------------------
# cli-section:
# ---------------------------------------------------------
def _get_cli_arguments():
parser = argparse.ArgumentParser(description='FritzBox Hosts')
parser.add_argument('-i', '--ip-address',
nargs='?', default=fritzconnection.FRITZ_IP_ADDRESS,
dest='address',
help='ip-address of the FritzBox to connect to. '
'Default: %s' % fritzconnection.FRITZ_IP_ADDRESS)
parser.add_argument('--port',
nargs='?', default=fritzconnection.FRITZ_TCP_PORT,
dest='port',
help='port of the FritzBox to connect to. '
'Default: %s' % fritzconnection.FRITZ_TCP_PORT)
parser.add_argument('-u', '--username',
nargs=1, default=fritzconnection.FRITZ_USERNAME,
help='Fritzbox authentication username')
parser.add_argument('-p', '--password',
nargs=1, default='',
help='Fritzbox authentication password')
parser.add_argument('-a', '--all',
action='store_true',
help='Show all hosts '
'(default if no other options given)')
parser.add_argument('-n', '--nums',
action='store_true',
help='Show number of known hosts')
parser.add_argument('-d', '--detail',
nargs=1, default='',
help='Show information about a specific host '
'(DETAIL: MAC Address)')
args = parser.parse_args()
return args
def _print_status(arguments):
fh = FritzHosts(address=arguments.address,
port=arguments.port,
user=arguments.username,
password=arguments.password)
_print_header(fh)
if arguments.detail:
_print_detail(fh, arguments.detail)
elif arguments.nums:
_print_nums(fh)
else:
print_hosts(fh)
if __name__ == '__main__':
_print_status(_get_cli_arguments())
| 32.036145 | 79 | 0.520496 |
__version__ = '0.4.6'
import argparse
from . import fritzconnection
SERVICE = 'Hosts'
def get_version():
return __version__
class FritzHosts(object):
def __init__(self,
fc=None,
address=fritzconnection.FRITZ_IP_ADDRESS,
port=fritzconnection.FRITZ_TCP_PORT,
user=fritzconnection.FRITZ_USERNAME,
password=''):
super(FritzHosts, self).__init__()
if fc is None:
fc = fritzconnection.FritzConnection(address, port, user, password)
self.fc = fc
def action(self, actionname, **kwargs):
return self.fc.call_action(SERVICE, actionname, **kwargs)
@property
def modelname(self):
return self.fc.modelname
@property
def host_numbers(self):
result = self.action('GetHostNumberOfEntries')
return result['NewHostNumberOfEntries']
def get_generic_host_entry(self, index):
result = self.action('GetGenericHostEntry', NewIndex=index)
return result
def get_specific_host_entry(self, mac_address):
result = self.action('GetSpecificHostEntry', NewMACAddress=mac_address)
return result
def get_hosts_info(self):
result = []
index = 0
while index < self.host_numbers:
host = self.get_generic_host_entry(index)
result.append({
'ip': host['NewIPAddress'],
'name': host['NewHostName'],
'mac': host['NewMACAddress'],
'status': host['NewActive']})
index += 1
return result
def _print_header(fh):
print('\nFritzHosts:')
print('{:<20}{}'.format('version:', get_version()))
print('{:<20}{}'.format('model:', fh.modelname))
print('{:<20}{}'.format('ip:', fh.fc.address))
def print_hosts(fh):
print('\nList of registered hosts:\n')
print('{:>3}: {:<15} {:<26} {:<17} {}\n'.format(
'n', 'ip', 'name', 'mac', 'status'))
hosts = fh.get_hosts_info()
for index, host in enumerate(hosts):
if host['status'] == '1':
status = 'active'
else:
status = '-'
print('{:>3}: {:<15} {:<26} {:<17} {}'.format(
index,
host['ip'],
host['name'],
host['mac'],
status,
)
)
print('\n')
def _print_detail(fh, detail):
mac_address = detail[0]
print('\n{:<23}{}\n'.format('Details for host:', mac_address))
info = fh.get_specific_host_entry(mac_address)
for key, value in info.items():
print('{:<23}: {}'.format(key, value))
print('\n')
def _print_nums(fh):
print('{:<20}{}\n'.format('Number of hosts:', fh.host_numbers))
def _get_cli_arguments():
parser = argparse.ArgumentParser(description='FritzBox Hosts')
parser.add_argument('-i', '--ip-address',
nargs='?', default=fritzconnection.FRITZ_IP_ADDRESS,
dest='address',
help='ip-address of the FritzBox to connect to. '
'Default: %s' % fritzconnection.FRITZ_IP_ADDRESS)
parser.add_argument('--port',
nargs='?', default=fritzconnection.FRITZ_TCP_PORT,
dest='port',
help='port of the FritzBox to connect to. '
'Default: %s' % fritzconnection.FRITZ_TCP_PORT)
parser.add_argument('-u', '--username',
nargs=1, default=fritzconnection.FRITZ_USERNAME,
help='Fritzbox authentication username')
parser.add_argument('-p', '--password',
nargs=1, default='',
help='Fritzbox authentication password')
parser.add_argument('-a', '--all',
action='store_true',
help='Show all hosts '
'(default if no other options given)')
parser.add_argument('-n', '--nums',
action='store_true',
help='Show number of known hosts')
parser.add_argument('-d', '--detail',
nargs=1, default='',
help='Show information about a specific host '
'(DETAIL: MAC Address)')
args = parser.parse_args()
return args
def _print_status(arguments):
fh = FritzHosts(address=arguments.address,
port=arguments.port,
user=arguments.username,
password=arguments.password)
_print_header(fh)
if arguments.detail:
_print_detail(fh, arguments.detail)
elif arguments.nums:
_print_nums(fh)
else:
print_hosts(fh)
if __name__ == '__main__':
_print_status(_get_cli_arguments())
| true | true |
f731785fe68dc453314df05fd73e25b0eaf40c95 | 7,738 | py | Python | pygments/lexers/rust.py | beasleyr-vmw/pygments | bd166a3bb5452efd3a37a52d4847cae96d3d45e2 | [
"BSD-2-Clause"
] | 6,989 | 2017-07-18T06:23:18.000Z | 2022-03-31T15:58:36.000Z | pygments/lexers/rust.py | beasleyr-vmw/pygments | bd166a3bb5452efd3a37a52d4847cae96d3d45e2 | [
"BSD-2-Clause"
] | 1,978 | 2017-07-18T09:17:58.000Z | 2022-03-31T14:28:43.000Z | pygments/lexers/rust.py | beasleyr-vmw/pygments | bd166a3bb5452efd3a37a52d4847cae96d3d45e2 | [
"BSD-2-Clause"
] | 1,228 | 2017-07-18T09:03:13.000Z | 2022-03-29T05:57:40.000Z | # -*- coding: utf-8 -*-
"""
pygments.lexers.rust
~~~~~~~~~~~~~~~~~~~~
Lexers for the Rust language.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, words, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['RustLexer']
class RustLexer(RegexLexer):
"""
Lexer for the Rust programming language (version 1.10).
.. versionadded:: 1.6
"""
name = 'Rust'
filenames = ['*.rs', '*.rs.in']
aliases = ['rust', 'rs']
mimetypes = ['text/rust']
keyword_types = (
words(('u8', 'u16', 'u32', 'u64', 'i8', 'i16', 'i32', 'i64',
'i128', 'u128', 'usize', 'isize', 'f32', 'f64', 'str', 'bool'),
suffix=r'\b'),
Keyword.Type)
builtin_types = (words((
# Reexported core operators
'Copy', 'Send', 'Sized', 'Sync',
'Drop', 'Fn', 'FnMut', 'FnOnce',
# Reexported types and traits
'Box',
'ToOwned',
'Clone',
'PartialEq', 'PartialOrd', 'Eq', 'Ord',
'AsRef', 'AsMut', 'Into', 'From',
'Default',
'Iterator', 'Extend', 'IntoIterator',
'DoubleEndedIterator', 'ExactSizeIterator',
'Option',
'Some', 'None',
'Result',
'Ok', 'Err',
'SliceConcatExt',
'String', 'ToString',
'Vec'), suffix=r'\b'),
Name.Builtin)
tokens = {
'root': [
# rust allows a file to start with a shebang, but if the first line
# starts with #![ then it's not a shebang but a crate attribute.
(r'#![^[\r\n].*$', Comment.Preproc),
default('base'),
],
'base': [
# Whitespace and Comments
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'//!.*?\n', String.Doc),
(r'///(\n|[^/].*?\n)', String.Doc),
(r'//(.*?)\n', Comment.Single),
(r'/\*\*(\n|[^/*])', String.Doc, 'doccomment'),
(r'/\*!', String.Doc, 'doccomment'),
(r'/\*', Comment.Multiline, 'comment'),
# Macro parameters
(r"""\$([a-zA-Z_]\w*|\(,?|\),?|,?)""", Comment.Preproc),
# Keywords
(words((
'as', 'async', 'await', 'box', 'const', 'crate', 'else',
'extern', 'for', 'if', 'impl', 'in', 'loop', 'match', 'move',
'mut', 'pub', 'ref', 'return', 'static', 'super', 'trait',
'try', 'unsafe', 'use', 'where', 'while'), suffix=r'\b'),
Keyword),
(words(('abstract', 'alignof', 'become', 'do', 'final', 'macro',
'offsetof', 'override', 'priv', 'proc', 'pure', 'sizeof',
'typeof', 'unsized', 'virtual', 'yield'), suffix=r'\b'),
Keyword.Reserved),
(r'(true|false)\b', Keyword.Constant),
(r'mod\b', Keyword, 'modname'),
(r'let\b', Keyword.Declaration),
(r'fn\b', Keyword, 'funcname'),
(r'(struct|enum|type|union)\b', Keyword, 'typename'),
(r'(default)(\s+)(type|fn)\b', bygroups(Keyword, Text, Keyword)),
keyword_types,
(r'self\b', Name.Builtin.Pseudo),
# Prelude (taken from Rust's src/libstd/prelude.rs)
builtin_types,
# Path seperators, so types don't catch them.
(r'::\b', Text),
# Types in positions.
(r'(?::|->)', Text, 'typename'),
# Labels
(r'(break|continue)(\s*)(\'[A-Za-z_]\w*)?',
bygroups(Keyword, Text.Whitespace, Name.Label)),
# Character Literal
(r"""'(\\['"\\nrt]|\\x[0-7][0-9a-fA-F]|\\0"""
r"""|\\u\{[0-9a-fA-F]{1,6}\}|.)'""",
String.Char),
(r"""b'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\0"""
r"""|\\u\{[0-9a-fA-F]{1,6}\}|.)'""",
String.Char),
# Binary Literal
(r'0b[01_]+', Number.Bin, 'number_lit'),
# Octal Literal
(r'0o[0-7_]+', Number.Oct, 'number_lit'),
# Hexadecimal Literal
(r'0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'),
# Decimal Literal
(r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*(?!\.)|[eE][+\-]?[0-9_]+)', Number.Float,
'number_lit'),
(r'[0-9][0-9_]*', Number.Integer, 'number_lit'),
# String Literal
(r'b"', String, 'bytestring'),
(r'"', String, 'string'),
(r'b?r(#*)".*?"\1', String),
# Lifetime
(r"""'static""", Name.Builtin),
(r"""'[a-zA-Z_]\w*""", Name.Attribute),
# Operators and Punctuation
(r'[{}()\[\],.;]', Punctuation),
(r'[+\-*/%&|<>^!~@=:?]', Operator),
# Identifier
(r'[a-zA-Z_]\w*', Name),
# Attributes
(r'#!?\[', Comment.Preproc, 'attribute['),
# Macros
(r'([A-Za-z_]\w*)(!)(\s*)([A-Za-z_]\w*)?(\s*)(\{)',
bygroups(Comment.Preproc, Punctuation, Whitespace, Name,
Whitespace, Punctuation), 'macro{'),
(r'([A-Za-z_]\w*)(!)(\s*)([A-Za-z_]\w*)?(\()',
bygroups(Comment.Preproc, Punctuation, Whitespace, Name,
Punctuation), 'macro('),
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'doccomment': [
(r'[^*/]+', String.Doc),
(r'/\*', String.Doc, '#push'),
(r'\*/', String.Doc, '#pop'),
(r'[*/]', String.Doc),
],
'modname': [
(r'\s+', Text),
(r'[a-zA-Z_]\w*', Name.Namespace, '#pop'),
default('#pop'),
],
'funcname': [
(r'\s+', Text),
(r'[a-zA-Z_]\w*', Name.Function, '#pop'),
default('#pop'),
],
'typename': [
(r'\s+', Text),
(r'&', Keyword.Pseudo),
builtin_types,
keyword_types,
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'number_lit': [
(r'[ui](8|16|32|64|size)', Keyword, '#pop'),
(r'f(32|64)', Keyword, '#pop'),
default('#pop'),
],
'string': [
(r'"', String, '#pop'),
(r"""\\['"\\nrt]|\\x[0-7][0-9a-fA-F]|\\0"""
r"""|\\u\{[0-9a-fA-F]{1,6}\}""", String.Escape),
(r'[^\\"]+', String),
(r'\\', String),
],
'bytestring': [
(r"""\\x[89a-fA-F][0-9a-fA-F]""", String.Escape),
include('string'),
],
'macro{': [
(r'\{', Operator, '#push'),
(r'\}', Operator, '#pop'),
],
'macro(': [
(r'\(', Operator, '#push'),
(r'\)', Operator, '#pop'),
],
'attribute_common': [
(r'"', String, 'string'),
(r'\[', Comment.Preproc, 'attribute['),
(r'\(', Comment.Preproc, 'attribute('),
],
'attribute[': [
include('attribute_common'),
(r'\];?', Comment.Preproc, '#pop'),
(r'[^"\]]+', Comment.Preproc),
],
'attribute(': [
include('attribute_common'),
(r'\);?', Comment.Preproc, '#pop'),
(r'[^")]+', Comment.Preproc),
],
}
| 35.013575 | 79 | 0.415094 |
from pygments.lexer import RegexLexer, include, bygroups, words, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['RustLexer']
class RustLexer(RegexLexer):
name = 'Rust'
filenames = ['*.rs', '*.rs.in']
aliases = ['rust', 'rs']
mimetypes = ['text/rust']
keyword_types = (
words(('u8', 'u16', 'u32', 'u64', 'i8', 'i16', 'i32', 'i64',
'i128', 'u128', 'usize', 'isize', 'f32', 'f64', 'str', 'bool'),
suffix=r'\b'),
Keyword.Type)
builtin_types = (words((
'Copy', 'Send', 'Sized', 'Sync',
'Drop', 'Fn', 'FnMut', 'FnOnce',
'Box',
'ToOwned',
'Clone',
'PartialEq', 'PartialOrd', 'Eq', 'Ord',
'AsRef', 'AsMut', 'Into', 'From',
'Default',
'Iterator', 'Extend', 'IntoIterator',
'DoubleEndedIterator', 'ExactSizeIterator',
'Option',
'Some', 'None',
'Result',
'Ok', 'Err',
'SliceConcatExt',
'String', 'ToString',
'Vec'), suffix=r'\b'),
Name.Builtin)
tokens = {
'root': [
],
'base': [
# Whitespace and Comments
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'//!.*?\n', String.Doc),
(r'///(\n|[^/].*?\n)', String.Doc),
(r'//(.*?)\n', Comment.Single),
(r'/\*\*(\n|[^/*])', String.Doc, 'doccomment'),
(r'/\*!', String.Doc, 'doccomment'),
(r'/\*', Comment.Multiline, 'comment'),
# Macro parameters
(r"""\$([a-zA-Z_]\w*|\(,?|\),?|,?)""", Comment.Preproc),
# Keywords
(words((
'as', 'async', 'await', 'box', 'const', 'crate', 'else',
'extern', 'for', 'if', 'impl', 'in', 'loop', 'match', 'move',
'mut', 'pub', 'ref', 'return', 'static', 'super', 'trait',
'try', 'unsafe', 'use', 'where', 'while'), suffix=r'\b'),
Keyword),
(words(('abstract', 'alignof', 'become', 'do', 'final', 'macro',
'offsetof', 'override', 'priv', 'proc', 'pure', 'sizeof',
'typeof', 'unsized', 'virtual', 'yield'), suffix=r'\b'),
Keyword.Reserved),
(r'(true|false)\b', Keyword.Constant),
(r'mod\b', Keyword, 'modname'),
(r'let\b', Keyword.Declaration),
(r'fn\b', Keyword, 'funcname'),
(r'(struct|enum|type|union)\b', Keyword, 'typename'),
(r'(default)(\s+)(type|fn)\b', bygroups(Keyword, Text, Keyword)),
keyword_types,
(r'self\b', Name.Builtin.Pseudo),
# Prelude (taken from Rust's src/libstd/prelude.rs)
builtin_types,
(r'::\b', Text),
# Types in positions.
(r'(?::|->)', Text, 'typename'),
# Labels
(r'(break|continue)(\s*)(\'[A-Za-z_]\w*)?',
bygroups(Keyword, Text.Whitespace, Name.Label)),
(r"""'(\\['"\\nrt]|\\x[0-7][0-9a-fA-F]|\\0"""
r"""|\\u\{[0-9a-fA-F]{1,6}\}|.)'""",
String.Char),
(r"""b'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\0"""
r"""|\\u\{[0-9a-fA-F]{1,6}\}|.)'""",
String.Char),
(r'0b[01_]+', Number.Bin, 'number_lit'),
(r'0o[0-7_]+', Number.Oct, 'number_lit'),
(r'0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'),
(r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*(?!\.)|[eE][+\-]?[0-9_]+)', Number.Float,
'number_lit'),
(r'[0-9][0-9_]*', Number.Integer, 'number_lit'),
(r'b"', String, 'bytestring'),
(r'"', String, 'string'),
(r'b?r(#*)".*?"\1', String),
(r"""'static""", Name.Builtin),
(r"""'[a-zA-Z_]\w*""", Name.Attribute),
(r'[{}()\[\],.;]', Punctuation),
(r'[+\-*/%&|<>^!~@=:?]', Operator),
(r'[a-zA-Z_]\w*', Name),
(r'#!?\[', Comment.Preproc, 'attribute['),
(r'([A-Za-z_]\w*)(!)(\s*)([A-Za-z_]\w*)?(\s*)(\{)',
bygroups(Comment.Preproc, Punctuation, Whitespace, Name,
Whitespace, Punctuation), 'macro{'),
(r'([A-Za-z_]\w*)(!)(\s*)([A-Za-z_]\w*)?(\()',
bygroups(Comment.Preproc, Punctuation, Whitespace, Name,
Punctuation), 'macro('),
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'doccomment': [
(r'[^*/]+', String.Doc),
(r'/\*', String.Doc, '#push'),
(r'\*/', String.Doc, '#pop'),
(r'[*/]', String.Doc),
],
'modname': [
(r'\s+', Text),
(r'[a-zA-Z_]\w*', Name.Namespace, '#pop'),
default('#pop'),
],
'funcname': [
(r'\s+', Text),
(r'[a-zA-Z_]\w*', Name.Function, '#pop'),
default('#pop'),
],
'typename': [
(r'\s+', Text),
(r'&', Keyword.Pseudo),
builtin_types,
keyword_types,
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'number_lit': [
(r'[ui](8|16|32|64|size)', Keyword, '#pop'),
(r'f(32|64)', Keyword, '#pop'),
default('#pop'),
],
'string': [
(r'"', String, '#pop'),
(r"""\\['"\\nrt]|\\x[0-7][0-9a-fA-F]|\\0"""
r"""|\\u\{[0-9a-fA-F]{1,6}\}""", String.Escape),
(r'[^\\"]+', String),
(r'\\', String),
],
'bytestring': [
(r"""\\x[89a-fA-F][0-9a-fA-F]""", String.Escape),
include('string'),
],
'macro{': [
(r'\{', Operator, '#push'),
(r'\}', Operator, '#pop'),
],
'macro(': [
(r'\(', Operator, '#push'),
(r'\)', Operator, '#pop'),
],
'attribute_common': [
(r'"', String, 'string'),
(r'\[', Comment.Preproc, 'attribute['),
(r'\(', Comment.Preproc, 'attribute('),
],
'attribute[': [
include('attribute_common'),
(r'\];?', Comment.Preproc, '
(r'[^"\]]+', Comment.Preproc),
],
'attribute(': [
include('attribute_common'),
(r'\);?', Comment.Preproc, '#pop'),
(r'[^")]+', Comment.Preproc),
],
}
| true | true |
f731786dd9a6f98ebccbad8bbac311e06ceed81d | 200 | py | Python | apps/users/utils.py | Yunloop/RoadBlog | d27504096cf00357c8f18737721b9b117b0203d9 | [
"MIT"
] | 1 | 2019-09-18T10:51:55.000Z | 2019-09-18T10:51:55.000Z | apps/users/utils.py | Yunloop/road | d27504096cf00357c8f18737721b9b117b0203d9 | [
"MIT"
] | 9 | 2020-06-05T23:14:20.000Z | 2022-02-10T11:36:14.000Z | apps/users/utils.py | Yunloop/road | d27504096cf00357c8f18737721b9b117b0203d9 | [
"MIT"
] | null | null | null | def jwt_response_payload_handler(token, user=None, request=None):
"""
重写获取jwt载荷数据方法
"""
return {
'token': token,
'id': user.id,
'username': user.username
}
| 20 | 65 | 0.56 | def jwt_response_payload_handler(token, user=None, request=None):
return {
'token': token,
'id': user.id,
'username': user.username
}
| true | true |
f73179932da21703dc52d5ead50dafad85c3d14a | 11,369 | py | Python | pydantic_sqlite/_core.py | Phil997/pydantic_sqlite | d52d0f42045c90b47a8e6987ec60dd071444f427 | [
"MIT"
] | 3 | 2022-01-11T06:02:45.000Z | 2022-02-07T06:07:29.000Z | pydantic_sqlite/_core.py | Phil997/pydantic_sqlite | d52d0f42045c90b47a8e6987ec60dd071444f427 | [
"MIT"
] | null | null | null | pydantic_sqlite/_core.py | Phil997/pydantic_sqlite | d52d0f42045c90b47a8e6987ec60dd071444f427 | [
"MIT"
] | null | null | null | import importlib
import inspect
import json
import os
import sqlite3
import tempfile
import typing
from shutil import copyfile
from typing import Any, Generator, List, Union
from pydantic import BaseModel, root_validator
from pydantic.fields import ModelField
from sqlite_utils import Database as _Database
from typing_inspect import is_literal_type, is_union_type
from ._misc import iterable_in_type_repr
SPECIALTYPE = [
typing.Any,
typing.Literal,
typing.Union]
class TableBaseModel(BaseModel):
table: str
moduleclass: typing.Any
modulename: str
pks: List[str]
@root_validator(pre=True)
def extract_modulename(cls, values):
v = values['moduleclass']
values.update(
{'modulename': str(v).split("<class '")[1].split("'>")[0]})
return values
def data(self):
return dict(
table=self.table,
modulename=self.modulename,
pks=self.pks)
class DataBase():
def __init__(self, **kwargs):
self._basemodels = {}
self._db = _Database(memory=True)
def __call__(self, tablename) -> Generator[BaseModel, None, None]:
"""returns a Generator for all values in the Table. The returned values are subclasses of pydantic.BaseModel"""
try:
basemodel = self._basemodels[tablename]
foreign_refs = {key.column: key.other_table for key in self._db[tablename].foreign_keys}
except KeyError:
raise KeyError(f"can not find Table: {tablename} in Database") from None
for row in self._db[tablename].rows:
yield self._build_basemodel_from_dict(basemodel, row, foreign_refs)
def _special_conversion(self, field_value: Any) -> Union[bool, Any]:
def special_possible(obj_class):
try:
if not hasattr(obj_class.SQConfig, 'convert'):
return False
return True if obj_class.SQConfig.special_insert else False
except AttributeError:
return False
if isinstance(field_value, List):
if len(field_value) == 0:
return False
if not special_possible(obj_class := field_value[0].__class__):
return False
if not all(isinstance(value, type(field_value[0])) for value in field_value):
raise ValueError(f"not all values in the List are from the same type: '{field_value}'")
return [obj_class.SQConfig.convert(value) for value in field_value]
else:
if not special_possible(obj_class := field_value.__class__):
return False
return obj_class.SQConfig.convert(field_value)
def add(self, tablename: str, value: BaseModel, foreign_tables={}, update_nested_models=True, pk: str = "uuid") -> None:
"""adds a new value to the table tablename"""
# unkown Tablename -> means new Table -> update the table_basemodel_ref list
if tablename not in self._basemodels:
self._basemodels_add_model(table=tablename, moduleclass=value.__class__, pks=[pk])
# check whether the value matches the basemodels in the table
if not self._basemodels[tablename].moduleclass == type(value):
raise ValueError(
f"Can not add type '{type(value)}' to the table '{tablename}', which contains values of type '{self._basemodels[tablename].moduleclass}'")
# create dict for writing to the Table
data_for_save = value.dict() if not hasattr(value, "sqlite_repr") else value.sqlite_repr
foreign_keys = []
for field_name, field in value.__fields__.items():
field_value = getattr(value, field_name)
if res := self._special_conversion(field_value): # Special Insert with SQConfig.convert
data_for_save[field_name] = res
elif field.type_ in SPECIALTYPE or typing.get_origin(field.type_):
# typing._SpecialForm: Any, NoReturn, ClassVar, Union, Optional
# typing.get_origin(field.type_) -> e.g. Literal
data_for_save[field_name] = self._typing_conversion(field, field_value)
elif issubclass(field.type_, BaseModel): # nested BaseModels in this value
# the value has got a field which is of type BaseModel, so this filed must be in a foreign table
# if the field is already in the Table it continues, but if is it not in the table it will add this to the table
# !recursive call to self.add
if field_name not in foreign_tables.keys():
keys = list(foreign_tables.keys())
raise KeyError(f"detect field of Type BaseModel, but can not find '{field_name}' in foreign_tables (Keys: {keys})") from None
else:
foreign_table_name = foreign_tables[field_name]
if foreign_table_name not in self._db.table_names():
raise KeyError(f"Can not add a value, which has a foreign Key '{foreign_tables}' to a Table '{foreign_table_name}' which does not exists")
nested_obj_ids = self._upsert_value_in_foreign_table(field_value, foreign_table_name, update_nested_models)
data_for_save[field_name] = nested_obj_ids
foreign_keys.append((field_name, foreign_table_name, pk)) # ignore=True
self._db[tablename].upsert(data_for_save, pk=pk, foreign_keys=foreign_keys)
def uuid_in_table(self, tablename: str, uuid: str) -> bool:
"""checks if the given uuid is used as a primary key in the table"""
hits = [row for row in self._db[tablename].rows_where("uuid = ?", [uuid])]
if len(hits) > 1:
raise Exception("uuid is two times in table") # TODO choice correct exceptiontype
return False if not hits else True
def value_in_table(self, tablename: str, value: BaseModel) -> bool:
"""checks if the given value is in the table"""
return self.uuid_in_table(tablename, value.uuid)
def value_from_table(self, tablename: str, uuid: str) -> typing.Any:
"""searchs the Objekt with the given uuid in the table and returns it. Returns a subclass of type pydantic.BaseModel"""
hits = [row for row in self._db[tablename].rows_where("uuid = ?", [uuid])]
if len(hits) > 1:
raise Exception("uuid is two times in table") # TODO choice correct exceptiontype
model = self._basemodels[tablename]
foreign_refs = {key.column: key.other_table for key in self._db[tablename].foreign_keys}
return None if not hits else self._build_basemodel_from_dict(model, hits[0], foreign_refs=foreign_refs)
def values_in_table(self, tablename) -> int:
"""returns the number of values in the Table"""
return self._db[tablename].count
def load(self, filename: str) -> None:
"""loads all data from the given file and adds them to the in-memory database"""
if not os.path.isfile(filename):
raise FileNotFoundError(f"Can not load {filename}")
file_db = sqlite3.connect(filename)
query = "".join(line for line in file_db.iterdump())
self._db.conn.executescript(query)
file_db.close()
for model in self._db["__basemodels__"].rows:
classname = model['modulename'].split('.')[-1]
modulename = '.'.join(model['modulename'].split('.')[:-1])
my_module = importlib.import_module(modulename)
self._basemodels_add_model(
table=model['table'],
moduleclass=getattr(my_module, classname),
pks=json.loads(model['pks']))
def save(self, filename: str) -> None:
"""saves alle values from the in_memory database to a file"""
if not filename.endswith(".db"):
filename += ".db"
tmp_dir = tempfile.mkdtemp()
name = filename.split(os.path.sep)[-1]
tmp_name = tmp_dir + os.path.sep + name
backup = tmp_dir + os.path.sep + "_backup.db"
if os.path.isfile(filename):
copyfile(filename, backup)
try:
file_db = sqlite3.connect(tmp_name)
query = "".join(line for line in self._db.conn.iterdump())
file_db.executescript(query)
file_db.close()
copyfile(tmp_name, filename)
except Exception:
print(f"saved the backup file under '{backup}'")
def _basemodels_add_model(self, **kwargs):
model = TableBaseModel(**kwargs)
self._basemodels.update({kwargs['table']: model})
self._db["__basemodels__"].upsert(model.data(), pk="modulename")
def _build_basemodel_from_dict(self, basemodel: TableBaseModel, row: dict, foreign_refs: dict):
# returns a subclass object of type BaseModel which is build out of class basemodel.moduleclass and the data out of the dict
members = inspect.getmembers(basemodel.moduleclass, lambda a: not(inspect.isroutine(a)))
field_models = next(line[1] for line in members if '__fields__' in line)
d = {}
for field_name, field_value in row.items():
type_repr = field_models[field_name].__str__().split(' ')[1] # 'type=Any'
if field_name in foreign_refs.keys(): # the column contains another subclass of BaseModel
if not iterable_in_type_repr(type_repr):
data = self.value_from_table(foreign_refs[field_name], field_value)
else:
data = [self.value_from_table(foreign_refs[field_name], val) for val in json.loads(field_value)]
else:
data = field_value if not iterable_in_type_repr(type_repr) else json.loads(field_value)
d.update({field_name: data})
return basemodel.moduleclass(**d)
def _upsert_value_in_foreign_table(self, field_value, foreign_table_name, update_nested_models) -> Union[str, List[str]]:
# The nested BaseModel will be inserted or upserted to the foreign table if it is not contained there,
# or the update_nested_models parameter is True. If the value is Iterable (e.g. List) all values in the
# List will be be inserted or upserted. The function returns the ids of the values
# The foreign keys of this table are needed to add the nested basemodel object.
foreign_refs = {key.column: key.other_table for key in self._db.table(foreign_table_name).foreign_keys}
def add_nested_model(value):
if not self.value_in_table(foreign_table_name, value) or update_nested_models:
self.add(foreign_table_name, value, foreign_tables=foreign_refs)
return value.uuid
if not isinstance(field_value, List):
return add_nested_model(field_value)
else:
return [add_nested_model(element) for element in field_value]
def _typing_conversion(self, field: ModelField, field_value: typing) -> typing.Any:
if field.type_ == typing.Any:
return field_value
elif is_union_type(field.type_):
return str(field_value)
elif is_literal_type(field.type_):
return str(field_value)
else:
raise NotImplementedError(f"type {field.type_} is not supported yet")
| 46.215447 | 158 | 0.648078 | import importlib
import inspect
import json
import os
import sqlite3
import tempfile
import typing
from shutil import copyfile
from typing import Any, Generator, List, Union
from pydantic import BaseModel, root_validator
from pydantic.fields import ModelField
from sqlite_utils import Database as _Database
from typing_inspect import is_literal_type, is_union_type
from ._misc import iterable_in_type_repr
SPECIALTYPE = [
typing.Any,
typing.Literal,
typing.Union]
class TableBaseModel(BaseModel):
table: str
moduleclass: typing.Any
modulename: str
pks: List[str]
@root_validator(pre=True)
def extract_modulename(cls, values):
v = values['moduleclass']
values.update(
{'modulename': str(v).split("<class '")[1].split("'>")[0]})
return values
def data(self):
return dict(
table=self.table,
modulename=self.modulename,
pks=self.pks)
class DataBase():
def __init__(self, **kwargs):
self._basemodels = {}
self._db = _Database(memory=True)
def __call__(self, tablename) -> Generator[BaseModel, None, None]:
try:
basemodel = self._basemodels[tablename]
foreign_refs = {key.column: key.other_table for key in self._db[tablename].foreign_keys}
except KeyError:
raise KeyError(f"can not find Table: {tablename} in Database") from None
for row in self._db[tablename].rows:
yield self._build_basemodel_from_dict(basemodel, row, foreign_refs)
def _special_conversion(self, field_value: Any) -> Union[bool, Any]:
def special_possible(obj_class):
try:
if not hasattr(obj_class.SQConfig, 'convert'):
return False
return True if obj_class.SQConfig.special_insert else False
except AttributeError:
return False
if isinstance(field_value, List):
if len(field_value) == 0:
return False
if not special_possible(obj_class := field_value[0].__class__):
return False
if not all(isinstance(value, type(field_value[0])) for value in field_value):
raise ValueError(f"not all values in the List are from the same type: '{field_value}'")
return [obj_class.SQConfig.convert(value) for value in field_value]
else:
if not special_possible(obj_class := field_value.__class__):
return False
return obj_class.SQConfig.convert(field_value)
def add(self, tablename: str, value: BaseModel, foreign_tables={}, update_nested_models=True, pk: str = "uuid") -> None:
if tablename not in self._basemodels:
self._basemodels_add_model(table=tablename, moduleclass=value.__class__, pks=[pk])
if not self._basemodels[tablename].moduleclass == type(value):
raise ValueError(
f"Can not add type '{type(value)}' to the table '{tablename}', which contains values of type '{self._basemodels[tablename].moduleclass}'")
data_for_save = value.dict() if not hasattr(value, "sqlite_repr") else value.sqlite_repr
foreign_keys = []
for field_name, field in value.__fields__.items():
field_value = getattr(value, field_name)
if res := self._special_conversion(field_value):
data_for_save[field_name] = res
elif field.type_ in SPECIALTYPE or typing.get_origin(field.type_):
data_for_save[field_name] = self._typing_conversion(field, field_value)
elif issubclass(field.type_, BaseModel):
if field_name not in foreign_tables.keys():
keys = list(foreign_tables.keys())
raise KeyError(f"detect field of Type BaseModel, but can not find '{field_name}' in foreign_tables (Keys: {keys})") from None
else:
foreign_table_name = foreign_tables[field_name]
if foreign_table_name not in self._db.table_names():
raise KeyError(f"Can not add a value, which has a foreign Key '{foreign_tables}' to a Table '{foreign_table_name}' which does not exists")
nested_obj_ids = self._upsert_value_in_foreign_table(field_value, foreign_table_name, update_nested_models)
data_for_save[field_name] = nested_obj_ids
foreign_keys.append((field_name, foreign_table_name, pk))
self._db[tablename].upsert(data_for_save, pk=pk, foreign_keys=foreign_keys)
def uuid_in_table(self, tablename: str, uuid: str) -> bool:
hits = [row for row in self._db[tablename].rows_where("uuid = ?", [uuid])]
if len(hits) > 1:
raise Exception("uuid is two times in table")
return False if not hits else True
def value_in_table(self, tablename: str, value: BaseModel) -> bool:
return self.uuid_in_table(tablename, value.uuid)
def value_from_table(self, tablename: str, uuid: str) -> typing.Any:
hits = [row for row in self._db[tablename].rows_where("uuid = ?", [uuid])]
if len(hits) > 1:
raise Exception("uuid is two times in table")
model = self._basemodels[tablename]
foreign_refs = {key.column: key.other_table for key in self._db[tablename].foreign_keys}
return None if not hits else self._build_basemodel_from_dict(model, hits[0], foreign_refs=foreign_refs)
def values_in_table(self, tablename) -> int:
return self._db[tablename].count
def load(self, filename: str) -> None:
if not os.path.isfile(filename):
raise FileNotFoundError(f"Can not load {filename}")
file_db = sqlite3.connect(filename)
query = "".join(line for line in file_db.iterdump())
self._db.conn.executescript(query)
file_db.close()
for model in self._db["__basemodels__"].rows:
classname = model['modulename'].split('.')[-1]
modulename = '.'.join(model['modulename'].split('.')[:-1])
my_module = importlib.import_module(modulename)
self._basemodels_add_model(
table=model['table'],
moduleclass=getattr(my_module, classname),
pks=json.loads(model['pks']))
def save(self, filename: str) -> None:
if not filename.endswith(".db"):
filename += ".db"
tmp_dir = tempfile.mkdtemp()
name = filename.split(os.path.sep)[-1]
tmp_name = tmp_dir + os.path.sep + name
backup = tmp_dir + os.path.sep + "_backup.db"
if os.path.isfile(filename):
copyfile(filename, backup)
try:
file_db = sqlite3.connect(tmp_name)
query = "".join(line for line in self._db.conn.iterdump())
file_db.executescript(query)
file_db.close()
copyfile(tmp_name, filename)
except Exception:
print(f"saved the backup file under '{backup}'")
def _basemodels_add_model(self, **kwargs):
model = TableBaseModel(**kwargs)
self._basemodels.update({kwargs['table']: model})
self._db["__basemodels__"].upsert(model.data(), pk="modulename")
def _build_basemodel_from_dict(self, basemodel: TableBaseModel, row: dict, foreign_refs: dict):
members = inspect.getmembers(basemodel.moduleclass, lambda a: not(inspect.isroutine(a)))
field_models = next(line[1] for line in members if '__fields__' in line)
d = {}
for field_name, field_value in row.items():
type_repr = field_models[field_name].__str__().split(' ')[1]
if field_name in foreign_refs.keys():
if not iterable_in_type_repr(type_repr):
data = self.value_from_table(foreign_refs[field_name], field_value)
else:
data = [self.value_from_table(foreign_refs[field_name], val) for val in json.loads(field_value)]
else:
data = field_value if not iterable_in_type_repr(type_repr) else json.loads(field_value)
d.update({field_name: data})
return basemodel.moduleclass(**d)
def _upsert_value_in_foreign_table(self, field_value, foreign_table_name, update_nested_models) -> Union[str, List[str]]:
foreign_refs = {key.column: key.other_table for key in self._db.table(foreign_table_name).foreign_keys}
def add_nested_model(value):
if not self.value_in_table(foreign_table_name, value) or update_nested_models:
self.add(foreign_table_name, value, foreign_tables=foreign_refs)
return value.uuid
if not isinstance(field_value, List):
return add_nested_model(field_value)
else:
return [add_nested_model(element) for element in field_value]
def _typing_conversion(self, field: ModelField, field_value: typing) -> typing.Any:
if field.type_ == typing.Any:
return field_value
elif is_union_type(field.type_):
return str(field_value)
elif is_literal_type(field.type_):
return str(field_value)
else:
raise NotImplementedError(f"type {field.type_} is not supported yet")
| true | true |
f7317ac183acdc3fef988d0b36002f1c1f9db05e | 7,610 | py | Python | env/lib/python3.6/site-packages/nibabel/tests/test_filename_parser.py | Raniac/NEURO-LEARN | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | env/lib/python3.6/site-packages/nibabel/tests/test_filename_parser.py | Raniac/neurolearn_dev | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | env/lib/python3.6/site-packages/nibabel/tests/test_filename_parser.py | Raniac/NEURO-LEARN | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 1 | 2020-07-17T12:49:49.000Z | 2020-07-17T12:49:49.000Z | # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
''' Tests for filename container '''
from ..filename_parser import (types_filenames, TypesFilenamesError,
parse_filename, splitext_addext)
from nose.tools import (assert_equal, assert_true, assert_false,
assert_raises)
def test_filenames():
types_exts = (('image', '.img'), ('header', '.hdr'))
for t_fname in ('test.img', 'test.hdr', 'test', 'test.'):
tfns = types_filenames(t_fname, types_exts)
assert_equal(tfns,
{'image': 'test.img',
'header': 'test.hdr'})
# enforcing extensions raises an error for bad extension
assert_raises(TypesFilenamesError,
types_filenames,
'test.funny',
types_exts)
# If not enforcing extensions, it does the best job it can,
# assuming the passed filename is for the first type (in this case
# 'image')
tfns = types_filenames('test.funny', types_exts,
enforce_extensions=False)
assert_equal(tfns,
{'header': 'test.hdr',
'image': 'test.funny'})
# .gz and .bz2 suffixes to extensions, by default, are removed
# before extension checking etc, and then put back onto every
# returned filename.
tfns = types_filenames('test.img.gz', types_exts)
assert_equal(tfns,
{'header': 'test.hdr.gz',
'image': 'test.img.gz'})
tfns = types_filenames('test.img.bz2', types_exts)
assert_equal(tfns,
{'header': 'test.hdr.bz2',
'image': 'test.img.bz2'})
# of course, if we don't know about e.g. gz, and enforce_extensions
# is on, we get an errror
assert_raises(TypesFilenamesError,
types_filenames,
'test.img.gz',
types_exts, ())
# if we don't know about .gz extension, and not enforcing, then we
# get something a bit odd
tfns = types_filenames('test.img.gz', types_exts,
trailing_suffixes=(),
enforce_extensions=False)
assert_equal(tfns,
{'header': 'test.img.hdr',
'image': 'test.img.gz'})
# the suffixes we remove and replaces can be any suffixes.
tfns = types_filenames('test.img.bzr', types_exts, ('.bzr',))
assert_equal(tfns,
{'header': 'test.hdr.bzr',
'image': 'test.img.bzr'})
# If we specifically pass the remove / replace suffixes, then we
# don't remove / replace the .gz and .bz2, unless they are passed
# specifically.
tfns = types_filenames('test.img.bzr', types_exts,
trailing_suffixes=('.bzr',),
enforce_extensions=False)
assert_equal(tfns,
{'header': 'test.hdr.bzr',
'image': 'test.img.bzr'})
# but, just .gz or .bz2 as extension gives an error, if enforcing is on
assert_raises(TypesFilenamesError,
types_filenames,
'test.gz',
types_exts)
assert_raises(TypesFilenamesError,
types_filenames,
'test.bz2',
types_exts)
# if enforcing is off, it tries to work out what the other files
# should be assuming the passed filename is of the first input type
tfns = types_filenames('test.gz', types_exts,
enforce_extensions=False)
assert_equal(tfns,
{'image': 'test.gz',
'header': 'test.hdr.gz'})
# case (in)sensitivity, and effect of uppercase, lowercase
tfns = types_filenames('test.IMG', types_exts)
assert_equal(tfns,
{'image': 'test.IMG',
'header': 'test.HDR'})
tfns = types_filenames('test.img',
(('image', '.IMG'), ('header', '.HDR')))
assert_equal(tfns,
{'header': 'test.hdr',
'image': 'test.img'})
tfns = types_filenames('test.IMG.Gz', types_exts)
assert_equal(tfns,
{'image': 'test.IMG.Gz',
'header': 'test.HDR.Gz'})
def test_parse_filename():
types_exts = (('t1', 'ext1'), ('t2', 'ext2'))
exp_in_outs = (
(('/path/fname.funny', ()),
('/path/fname', '.funny', None, None)),
(('/path/fnameext2', ()),
('/path/fname', 'ext2', None, 't2')),
(('/path/fnameext2', ('.gz',)),
('/path/fname', 'ext2', None, 't2')),
(('/path/fnameext2.gz', ('.gz',)),
('/path/fname', 'ext2', '.gz', 't2'))
)
for inps, exps in exp_in_outs:
pth, sufs = inps
res = parse_filename(pth, types_exts, sufs)
assert_equal(res, exps)
upth = pth.upper()
uexps = (exps[0].upper(), exps[1].upper(),
exps[2].upper() if exps[2] else None,
exps[3])
res = parse_filename(upth, types_exts, sufs)
assert_equal(res, uexps)
# test case sensitivity
res = parse_filename('/path/fnameext2.GZ',
types_exts,
('.gz',), False) # case insensitive again
assert_equal(res, ('/path/fname', 'ext2', '.GZ', 't2'))
res = parse_filename('/path/fnameext2.GZ',
types_exts,
('.gz',), True) # case sensitive
assert_equal(res, ('/path/fnameext2', '.GZ', None, None))
res = parse_filename('/path/fnameEXT2.gz',
types_exts,
('.gz',), False) # case insensitive
assert_equal(res, ('/path/fname', 'EXT2', '.gz', 't2'))
res = parse_filename('/path/fnameEXT2.gz',
types_exts,
('.gz',), True) # case sensitive
assert_equal(res, ('/path/fnameEXT2', '', '.gz', None))
def test_splitext_addext():
res = splitext_addext('fname.ext.gz')
assert_equal(res, ('fname', '.ext', '.gz'))
res = splitext_addext('fname.ext')
assert_equal(res, ('fname', '.ext', ''))
res = splitext_addext('fname.ext.foo', ('.foo', '.bar'))
assert_equal(res, ('fname', '.ext', '.foo'))
res = splitext_addext('fname.ext.FOO', ('.foo', '.bar'))
assert_equal(res, ('fname', '.ext', '.FOO'))
# case sensitive
res = splitext_addext('fname.ext.FOO', ('.foo', '.bar'), True)
assert_equal(res, ('fname.ext', '.FOO', ''))
# edge cases
res = splitext_addext('.nii')
assert_equal(res, ('', '.nii', ''))
res = splitext_addext('...nii')
assert_equal(res, ('..', '.nii', ''))
res = splitext_addext('.')
assert_equal(res, ('.', '', ''))
res = splitext_addext('..')
assert_equal(res, ('..', '', ''))
res = splitext_addext('...')
assert_equal(res, ('...', '', ''))
| 43.988439 | 79 | 0.49724 |
.upper(), exps[1].upper(),
exps[2].upper() if exps[2] else None,
exps[3])
res = parse_filename(upth, types_exts, sufs)
assert_equal(res, uexps)
# test case sensitivity
res = parse_filename('/path/fnameext2.GZ',
types_exts,
('.gz',), False) # case insensitive again
assert_equal(res, ('/path/fname', 'ext2', '.GZ', 't2'))
res = parse_filename('/path/fnameext2.GZ',
types_exts,
('.gz',), True) # case sensitive
assert_equal(res, ('/path/fnameext2', '.GZ', None, None))
res = parse_filename('/path/fnameEXT2.gz',
types_exts,
('.gz',), False) # case insensitive
assert_equal(res, ('/path/fname', 'EXT2', '.gz', 't2'))
res = parse_filename('/path/fnameEXT2.gz',
types_exts,
('.gz',), True) # case sensitive
assert_equal(res, ('/path/fnameEXT2', '', '.gz', None))
def test_splitext_addext():
res = splitext_addext('fname.ext.gz')
assert_equal(res, ('fname', '.ext', '.gz'))
res = splitext_addext('fname.ext')
assert_equal(res, ('fname', '.ext', ''))
res = splitext_addext('fname.ext.foo', ('.foo', '.bar'))
assert_equal(res, ('fname', '.ext', '.foo'))
res = splitext_addext('fname.ext.FOO', ('.foo', '.bar'))
assert_equal(res, ('fname', '.ext', '.FOO'))
# case sensitive
res = splitext_addext('fname.ext.FOO', ('.foo', '.bar'), True)
assert_equal(res, ('fname.ext', '.FOO', ''))
# edge cases
res = splitext_addext('.nii')
assert_equal(res, ('', '.nii', ''))
res = splitext_addext('...nii')
assert_equal(res, ('..', '.nii', ''))
res = splitext_addext('.')
assert_equal(res, ('.', '', ''))
res = splitext_addext('..')
assert_equal(res, ('..', '', ''))
res = splitext_addext('...')
assert_equal(res, ('...', '', ''))
| true | true |
f7317b0eda9c7facface10dbec8642b54e7fc9d2 | 2,521 | py | Python | polling_stations/apps/data_collection/management/commands/import_tower_hamlets.py | chris48s/UK-Polling-Stations | 4742b527dae94f0276d35c80460837be743b7d17 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_collection/management/commands/import_tower_hamlets.py | chris48s/UK-Polling-Stations | 4742b527dae94f0276d35c80460837be743b7d17 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_collection/management/commands/import_tower_hamlets.py | chris48s/UK-Polling-Stations | 4742b527dae94f0276d35c80460837be743b7d17 | [
"BSD-3-Clause"
] | null | null | null | """
Import Tower Hamlets
"""
from time import sleep
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseCsvStationsCsvAddressesImporter
from data_finder.helpers import geocode, geocode_point_only, PostcodeError
from addressbase.models import Address
class Command(BaseCsvStationsCsvAddressesImporter):
"""
Imports the Polling Station data from Tower Hamlets Council
"""
council_id = 'E09000030'
addresses_name = '2016/Polling Stations with Addresses.csv'
stations_name = '2016/Polling Stations with Addresses.csv'
csv_delimiter = ','
elections = [
'ref.2016-06-23'
]
def get_station_hash(self, record):
return "-".join([
record.station_na,
record.code,
record.polling_na,
])
def station_record_to_dict(self, record):
if not record.polling_na:
return
# format address
address = record.station_na
while "\n\n" in address:
address = address.replace("\n\n", "\n").strip()
postcode = " ".join(address.split(' ')[-2:]).strip().split('\n')[-1]
location = None
if float(record.polling_station_x) and float(record.polling_station_y):
if "Shapla Primary School" in address:
location = Point(
-0.066990,
51.510020,
srid=4326
)
else:
location = Point(
float(record.polling_station_x),
float(record.polling_station_y),
srid=27700)
else:
# no points supplied, so attempt to attach them by geocoding
try:
location_data = geocode_point_only(postcode)
except PostcodeError:
pass
if location_data:
location = Point(
location_data['wgs84_lon'],
location_data['wgs84_lat'],
srid=4326)
return {
'internal_council_id': record.code,
'postcode' : postcode,
'address' : address,
'location' : location
}
def address_record_to_dict(self, record):
return {
'address' : record.fulladdress.strip(),
'postcode' : record.postcode.strip(),
'polling_station_id': record.code
}
| 31.5125 | 83 | 0.548195 | from time import sleep
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseCsvStationsCsvAddressesImporter
from data_finder.helpers import geocode, geocode_point_only, PostcodeError
from addressbase.models import Address
class Command(BaseCsvStationsCsvAddressesImporter):
council_id = 'E09000030'
addresses_name = '2016/Polling Stations with Addresses.csv'
stations_name = '2016/Polling Stations with Addresses.csv'
csv_delimiter = ','
elections = [
'ref.2016-06-23'
]
def get_station_hash(self, record):
return "-".join([
record.station_na,
record.code,
record.polling_na,
])
def station_record_to_dict(self, record):
if not record.polling_na:
return
address = record.station_na
while "\n\n" in address:
address = address.replace("\n\n", "\n").strip()
postcode = " ".join(address.split(' ')[-2:]).strip().split('\n')[-1]
location = None
if float(record.polling_station_x) and float(record.polling_station_y):
if "Shapla Primary School" in address:
location = Point(
-0.066990,
51.510020,
srid=4326
)
else:
location = Point(
float(record.polling_station_x),
float(record.polling_station_y),
srid=27700)
else:
try:
location_data = geocode_point_only(postcode)
except PostcodeError:
pass
if location_data:
location = Point(
location_data['wgs84_lon'],
location_data['wgs84_lat'],
srid=4326)
return {
'internal_council_id': record.code,
'postcode' : postcode,
'address' : address,
'location' : location
}
def address_record_to_dict(self, record):
return {
'address' : record.fulladdress.strip(),
'postcode' : record.postcode.strip(),
'polling_station_id': record.code
}
| true | true |
f7317c3b8b64c75edaf8033692d2b70b5b070677 | 10,874 | py | Python | test/shake_test.py | arkottke/MapIO | dd6e347dce2d65b7bd4c489a03d8883d0e4210fc | [
"CC0-1.0"
] | null | null | null | test/shake_test.py | arkottke/MapIO | dd6e347dce2d65b7bd4c489a03d8883d0e4210fc | [
"CC0-1.0"
] | null | null | null | test/shake_test.py | arkottke/MapIO | dd6e347dce2d65b7bd4c489a03d8883d0e4210fc | [
"CC0-1.0"
] | 1 | 2019-11-09T16:05:37.000Z | 2019-11-09T16:05:37.000Z | #!/usr/bin/env python
#python 3 compatibility
from __future__ import print_function
#stdlib imports
from xml.dom import minidom
from datetime import datetime
from collections import OrderedDict
import re
import sys
import tempfile
import time
import shutil
if sys.version_info.major == 2:
import StringIO
else:
from io import StringIO
import os.path
#hack the path so that I can debug these functions if I need to
homedir = os.path.dirname(os.path.abspath(__file__)) #where is this script?
mapiodir = os.path.abspath(os.path.join(homedir,'..'))
sys.path.insert(0,mapiodir) #put this at the front of the system path, ignoring any installed mapio stuff
#third party
from mapio.shake import ShakeGrid
from mapio.gridbase import Grid
from mapio.multiple import MultiGrid
from mapio.dataset import DataSetException
from mapio.grid2d import Grid2D
from mapio.geodict import GeoDict
import numpy as np
def test_modify():
print('Testing ShakeGrid interpolate() method...')
geodict = GeoDict({'xmin':0.5,'xmax':6.5,'ymin':1.5,'ymax':6.5,'dx':1.0,'dy':1.0,'ny':6,'nx':7})
data = np.arange(14,56).reshape(6,7)
layers = OrderedDict()
layers['pga'] = data
shakeDict = {'event_id':'usabcd1234',
'shakemap_id':'usabcd1234',
'shakemap_version':1,
'code_version':'4.0',
'process_timestamp':datetime.utcnow(),
'shakemap_originator':'us',
'map_status':'RELEASED',
'shakemap_event_type':'ACTUAL'}
eventDict = {'event_id':'usabcd1234',
'magnitude':7.6,
'depth':1.4,
'lat':2.0,
'lon':2.0,
'event_timestamp':datetime.utcnow(),
'event_network':'us',
'event_description':'sample event'}
uncDict = {'pga':(0.0,0)}
shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict)
rdata = np.random.rand(data.shape[0],data.shape[1])
shake.setLayer('pga',rdata)
newdata = shake.getLayer('pga').getData()
np.testing.assert_almost_equal(rdata,newdata)
def test_interpolate():
print('Testing ShakeGrid interpolate() method...')
geodict = GeoDict({'xmin':0.5,'xmax':6.5,'ymin':1.5,'ymax':6.5,'dx':1.0,'dy':1.0,'ny':6,'nx':7})
data = np.arange(14,56).reshape(6,7)
layers = OrderedDict()
layers['pga'] = data
shakeDict = {'event_id':'usabcd1234',
'shakemap_id':'usabcd1234',
'shakemap_version':1,
'code_version':'4.0',
'process_timestamp':datetime.utcnow(),
'shakemap_originator':'us',
'map_status':'RELEASED',
'shakemap_event_type':'ACTUAL'}
eventDict = {'event_id':'usabcd1234',
'magnitude':7.6,
'depth':1.4,
'lat':2.0,
'lon':2.0,
'event_timestamp':datetime.utcnow(),
'event_network':'us',
'event_description':'sample event'}
uncDict = {'pga':(0.0,0)}
shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict)
sampledict = GeoDict({'xmin':3.0,'xmax':4.0,
'ymin':3.0,'ymax':4.0,
'dx':1.0,'dy':1.0,
'ny':2,'nx':2})
shake2 = shake.interpolateToGrid(sampledict,method='linear')
output = np.array([[34.,35.],[41.,42.]])
np.testing.assert_almost_equal(output,shake2.getLayer('pga').getData())
print('Passed test of ShakeGrid interpolate() method.')
def test_read():
xmlfile = os.path.join(homedir,'data','northridge.xml')
tdir = tempfile.mkdtemp()
testfile = os.path.join(tdir,'test.xml')
try:
shakegrid = ShakeGrid.load(xmlfile,adjust='res')
t1 = time.time()
shakegrid.save(testfile)
t2 = time.time()
print('Saving shakemap took %.2f seconds' % (t2-t1))
except Exception as error:
print('Failed to read grid.xml format file "%s". Error "%s".' % (xmlfile,str(error)))
assert 0 == 1
finally:
if os.path.isdir(tdir):
shutil.rmtree(tdir)
def test_save():
tdir = tempfile.mkdtemp()
testfile = os.path.join(tdir,'test.xml')
try:
print('Testing save/read functionality for shakemap grids...')
pga = np.arange(0,16,dtype=np.float32).reshape(4,4)
pgv = np.arange(1,17,dtype=np.float32).reshape(4,4)
mmi = np.arange(2,18,dtype=np.float32).reshape(4,4)
geodict = GeoDict({'xmin':0.5,'xmax':3.5,
'ymin':0.5,'ymax':3.5,
'dx':1.0,'dy':1.0,
'ny':4,'nx':4})
layers = OrderedDict()
layers['pga'] = pga
layers['pgv'] = pgv
layers['mmi'] = mmi
shakeDict = {'event_id':'usabcd1234',
'shakemap_id':'usabcd1234',
'shakemap_version':1,
'code_version':'4.0',
'process_timestamp':datetime.utcnow(),
'shakemap_originator':'us',
'map_status':'RELEASED',
'shakemap_event_type':'ACTUAL'}
eventDict = {'event_id':'usabcd1234',
'magnitude':7.6,
'depth':1.4,
'lat':2.0,
'lon':2.0,
'event_timestamp':datetime.utcnow(),
'event_network':'us',
'event_description':'sample event'}
uncDict = {'pga':(0.0,0),
'pgv':(0.0,0),
'mmi':(0.0,0)}
shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict)
print('Testing save/read functionality...')
shake.save(testfile,version=3)
shake2 = ShakeGrid.load(testfile)
for layer in ['pga','pgv','mmi']:
tdata = shake2.getLayer(layer).getData()
np.testing.assert_almost_equal(tdata,layers[layer])
print('Passed save/read functionality for shakemap grids.')
print('Testing getFileGeoDict method...')
fgeodict = ShakeGrid.getFileGeoDict(testfile)
print('Passed save/read functionality for shakemap grids.')
print('Testing loading with bounds (no resampling or padding)...')
sampledict = GeoDict({'xmin':-0.5,'xmax':3.5,
'ymin':-0.5,'ymax':3.5,
'dx':1.0,'dy':1.0,
'ny':5,'nx':5})
shake3 = ShakeGrid.load(testfile,samplegeodict=sampledict,
resample=False,doPadding=False,padValue=np.nan)
tdata = shake3.getLayer('pga').getData()
np.testing.assert_almost_equal(tdata,layers['pga'])
print('Passed loading with bounds (no resampling or padding)...')
print('Testing loading shakemap with padding, no resampling...')
newdict = GeoDict({'xmin':-0.5,'xmax':4.5,
'ymin':-0.5,'ymax':4.5,
'dx':1.0,'dy':1.0,
'ny':6,'nx':6})
shake4 = ShakeGrid.load(testfile,samplegeodict=newdict,
resample=False,doPadding=True,padValue=np.nan)
output = np.array([[np.nan,np.nan,np.nan,np.nan,np.nan,np.nan],
[np.nan,0.0,1.0,2.0,3.0,np.nan],
[np.nan,4.0,5.0,6.0,7.0,np.nan],
[np.nan,8.0,9.0,10.0,11.0,np.nan],
[np.nan,12.0,13.0,14.0,15.0,np.nan],
[np.nan,np.nan,np.nan,np.nan,np.nan,np.nan]])
tdata = shake4.getLayer('pga').getData()
np.testing.assert_almost_equal(tdata,output)
print('Passed loading shakemap with padding, no resampling...')
#make a bigger grid
pga = np.arange(0,36,dtype=np.float32).reshape(6,6)
pgv = np.arange(1,37,dtype=np.float32).reshape(6,6)
mmi = np.arange(2,38,dtype=np.float32).reshape(6,6)
layers = OrderedDict()
layers['pga'] = pga
layers['pgv'] = pgv
layers['mmi'] = mmi
geodict = GeoDict({'xmin':0.5,'xmax':5.5,
'ymin':0.5,'ymax':5.5,
'dx':1.0,'dy':1.0,
'ny':6,'nx':6})
shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict)
shake.save(testfile,version=3)
print('Testing resampling, no padding...')
littledict = GeoDict({'xmin':2.0,'xmax':4.0,
'ymin':2.0,'ymax':4.0,
'dx':1.0,'dy':1.0,
'ny':3,'nx':3})
shake5 = ShakeGrid.load(testfile,samplegeodict=littledict,resample=True,doPadding=False,padValue=np.nan)
output = np.array([[10.5,11.5,12.5],
[16.5,17.5,18.5],
[22.5,23.5,24.5]])
tdata = shake5.getLayer('pga').getData()
np.testing.assert_almost_equal(tdata,output)
print('Passed resampling, no padding...')
print('Testing resampling and padding...')
pga = np.arange(0,16,dtype=np.float32).reshape(4,4)
pgv = np.arange(1,17,dtype=np.float32).reshape(4,4)
mmi = np.arange(2,18,dtype=np.float32).reshape(4,4)
geodict = GeoDict({'xmin':0.5,'ymax':3.5,
'ymin':0.5,'xmax':3.5,
'dx':1.0,'dy':1.0,
'ny':4,'nx':4})
layers = OrderedDict()
layers['pga'] = pga
layers['pgv'] = pgv
layers['mmi'] = mmi
shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict)
shake.save(testfile,version=3)
bigdict = GeoDict({'xmin':0.0,'xmax':4.0,
'ymin':0.0,'ymax':4.0,
'dx':1.0,'dy':1.0,
'ny':5,'nx':5})
shake6 = ShakeGrid.load(testfile,samplegeodict=bigdict,resample=True,doPadding=True,padValue=np.nan)
tdata = shake6.getLayer('pga').getData()
output = np.array([[np.nan,np.nan,np.nan,np.nan,np.nan],
[np.nan,2.5,3.5,4.5,np.nan],
[np.nan,6.5,7.5,8.5,np.nan],
[np.nan,10.5,11.5,12.5,np.nan],
[np.nan,np.nan,np.nan,np.nan,np.nan]])
np.testing.assert_almost_equal(tdata,output)
print('Passed resampling and padding...')
except Exception as error:
print('Failed to read grid.xml format file "%s". Error "%s".' % (xmlfile,str(error)))
assert 0 == 1
finally:
if os.path.isdir(tdir):
shutil.rmtree(tdir)
if __name__ == '__main__':
test_modify()
test_interpolate()
test_read()
test_save()
| 41.503817 | 112 | 0.532279 |
from __future__ import print_function
from xml.dom import minidom
from datetime import datetime
from collections import OrderedDict
import re
import sys
import tempfile
import time
import shutil
if sys.version_info.major == 2:
import StringIO
else:
from io import StringIO
import os.path
homedir = os.path.dirname(os.path.abspath(__file__))
mapiodir = os.path.abspath(os.path.join(homedir,'..'))
sys.path.insert(0,mapiodir)
from mapio.shake import ShakeGrid
from mapio.gridbase import Grid
from mapio.multiple import MultiGrid
from mapio.dataset import DataSetException
from mapio.grid2d import Grid2D
from mapio.geodict import GeoDict
import numpy as np
def test_modify():
print('Testing ShakeGrid interpolate() method...')
geodict = GeoDict({'xmin':0.5,'xmax':6.5,'ymin':1.5,'ymax':6.5,'dx':1.0,'dy':1.0,'ny':6,'nx':7})
data = np.arange(14,56).reshape(6,7)
layers = OrderedDict()
layers['pga'] = data
shakeDict = {'event_id':'usabcd1234',
'shakemap_id':'usabcd1234',
'shakemap_version':1,
'code_version':'4.0',
'process_timestamp':datetime.utcnow(),
'shakemap_originator':'us',
'map_status':'RELEASED',
'shakemap_event_type':'ACTUAL'}
eventDict = {'event_id':'usabcd1234',
'magnitude':7.6,
'depth':1.4,
'lat':2.0,
'lon':2.0,
'event_timestamp':datetime.utcnow(),
'event_network':'us',
'event_description':'sample event'}
uncDict = {'pga':(0.0,0)}
shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict)
rdata = np.random.rand(data.shape[0],data.shape[1])
shake.setLayer('pga',rdata)
newdata = shake.getLayer('pga').getData()
np.testing.assert_almost_equal(rdata,newdata)
def test_interpolate():
print('Testing ShakeGrid interpolate() method...')
geodict = GeoDict({'xmin':0.5,'xmax':6.5,'ymin':1.5,'ymax':6.5,'dx':1.0,'dy':1.0,'ny':6,'nx':7})
data = np.arange(14,56).reshape(6,7)
layers = OrderedDict()
layers['pga'] = data
shakeDict = {'event_id':'usabcd1234',
'shakemap_id':'usabcd1234',
'shakemap_version':1,
'code_version':'4.0',
'process_timestamp':datetime.utcnow(),
'shakemap_originator':'us',
'map_status':'RELEASED',
'shakemap_event_type':'ACTUAL'}
eventDict = {'event_id':'usabcd1234',
'magnitude':7.6,
'depth':1.4,
'lat':2.0,
'lon':2.0,
'event_timestamp':datetime.utcnow(),
'event_network':'us',
'event_description':'sample event'}
uncDict = {'pga':(0.0,0)}
shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict)
sampledict = GeoDict({'xmin':3.0,'xmax':4.0,
'ymin':3.0,'ymax':4.0,
'dx':1.0,'dy':1.0,
'ny':2,'nx':2})
shake2 = shake.interpolateToGrid(sampledict,method='linear')
output = np.array([[34.,35.],[41.,42.]])
np.testing.assert_almost_equal(output,shake2.getLayer('pga').getData())
print('Passed test of ShakeGrid interpolate() method.')
def test_read():
xmlfile = os.path.join(homedir,'data','northridge.xml')
tdir = tempfile.mkdtemp()
testfile = os.path.join(tdir,'test.xml')
try:
shakegrid = ShakeGrid.load(xmlfile,adjust='res')
t1 = time.time()
shakegrid.save(testfile)
t2 = time.time()
print('Saving shakemap took %.2f seconds' % (t2-t1))
except Exception as error:
print('Failed to read grid.xml format file "%s". Error "%s".' % (xmlfile,str(error)))
assert 0 == 1
finally:
if os.path.isdir(tdir):
shutil.rmtree(tdir)
def test_save():
tdir = tempfile.mkdtemp()
testfile = os.path.join(tdir,'test.xml')
try:
print('Testing save/read functionality for shakemap grids...')
pga = np.arange(0,16,dtype=np.float32).reshape(4,4)
pgv = np.arange(1,17,dtype=np.float32).reshape(4,4)
mmi = np.arange(2,18,dtype=np.float32).reshape(4,4)
geodict = GeoDict({'xmin':0.5,'xmax':3.5,
'ymin':0.5,'ymax':3.5,
'dx':1.0,'dy':1.0,
'ny':4,'nx':4})
layers = OrderedDict()
layers['pga'] = pga
layers['pgv'] = pgv
layers['mmi'] = mmi
shakeDict = {'event_id':'usabcd1234',
'shakemap_id':'usabcd1234',
'shakemap_version':1,
'code_version':'4.0',
'process_timestamp':datetime.utcnow(),
'shakemap_originator':'us',
'map_status':'RELEASED',
'shakemap_event_type':'ACTUAL'}
eventDict = {'event_id':'usabcd1234',
'magnitude':7.6,
'depth':1.4,
'lat':2.0,
'lon':2.0,
'event_timestamp':datetime.utcnow(),
'event_network':'us',
'event_description':'sample event'}
uncDict = {'pga':(0.0,0),
'pgv':(0.0,0),
'mmi':(0.0,0)}
shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict)
print('Testing save/read functionality...')
shake.save(testfile,version=3)
shake2 = ShakeGrid.load(testfile)
for layer in ['pga','pgv','mmi']:
tdata = shake2.getLayer(layer).getData()
np.testing.assert_almost_equal(tdata,layers[layer])
print('Passed save/read functionality for shakemap grids.')
print('Testing getFileGeoDict method...')
fgeodict = ShakeGrid.getFileGeoDict(testfile)
print('Passed save/read functionality for shakemap grids.')
print('Testing loading with bounds (no resampling or padding)...')
sampledict = GeoDict({'xmin':-0.5,'xmax':3.5,
'ymin':-0.5,'ymax':3.5,
'dx':1.0,'dy':1.0,
'ny':5,'nx':5})
shake3 = ShakeGrid.load(testfile,samplegeodict=sampledict,
resample=False,doPadding=False,padValue=np.nan)
tdata = shake3.getLayer('pga').getData()
np.testing.assert_almost_equal(tdata,layers['pga'])
print('Passed loading with bounds (no resampling or padding)...')
print('Testing loading shakemap with padding, no resampling...')
newdict = GeoDict({'xmin':-0.5,'xmax':4.5,
'ymin':-0.5,'ymax':4.5,
'dx':1.0,'dy':1.0,
'ny':6,'nx':6})
shake4 = ShakeGrid.load(testfile,samplegeodict=newdict,
resample=False,doPadding=True,padValue=np.nan)
output = np.array([[np.nan,np.nan,np.nan,np.nan,np.nan,np.nan],
[np.nan,0.0,1.0,2.0,3.0,np.nan],
[np.nan,4.0,5.0,6.0,7.0,np.nan],
[np.nan,8.0,9.0,10.0,11.0,np.nan],
[np.nan,12.0,13.0,14.0,15.0,np.nan],
[np.nan,np.nan,np.nan,np.nan,np.nan,np.nan]])
tdata = shake4.getLayer('pga').getData()
np.testing.assert_almost_equal(tdata,output)
print('Passed loading shakemap with padding, no resampling...')
pga = np.arange(0,36,dtype=np.float32).reshape(6,6)
pgv = np.arange(1,37,dtype=np.float32).reshape(6,6)
mmi = np.arange(2,38,dtype=np.float32).reshape(6,6)
layers = OrderedDict()
layers['pga'] = pga
layers['pgv'] = pgv
layers['mmi'] = mmi
geodict = GeoDict({'xmin':0.5,'xmax':5.5,
'ymin':0.5,'ymax':5.5,
'dx':1.0,'dy':1.0,
'ny':6,'nx':6})
shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict)
shake.save(testfile,version=3)
print('Testing resampling, no padding...')
littledict = GeoDict({'xmin':2.0,'xmax':4.0,
'ymin':2.0,'ymax':4.0,
'dx':1.0,'dy':1.0,
'ny':3,'nx':3})
shake5 = ShakeGrid.load(testfile,samplegeodict=littledict,resample=True,doPadding=False,padValue=np.nan)
output = np.array([[10.5,11.5,12.5],
[16.5,17.5,18.5],
[22.5,23.5,24.5]])
tdata = shake5.getLayer('pga').getData()
np.testing.assert_almost_equal(tdata,output)
print('Passed resampling, no padding...')
print('Testing resampling and padding...')
pga = np.arange(0,16,dtype=np.float32).reshape(4,4)
pgv = np.arange(1,17,dtype=np.float32).reshape(4,4)
mmi = np.arange(2,18,dtype=np.float32).reshape(4,4)
geodict = GeoDict({'xmin':0.5,'ymax':3.5,
'ymin':0.5,'xmax':3.5,
'dx':1.0,'dy':1.0,
'ny':4,'nx':4})
layers = OrderedDict()
layers['pga'] = pga
layers['pgv'] = pgv
layers['mmi'] = mmi
shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict)
shake.save(testfile,version=3)
bigdict = GeoDict({'xmin':0.0,'xmax':4.0,
'ymin':0.0,'ymax':4.0,
'dx':1.0,'dy':1.0,
'ny':5,'nx':5})
shake6 = ShakeGrid.load(testfile,samplegeodict=bigdict,resample=True,doPadding=True,padValue=np.nan)
tdata = shake6.getLayer('pga').getData()
output = np.array([[np.nan,np.nan,np.nan,np.nan,np.nan],
[np.nan,2.5,3.5,4.5,np.nan],
[np.nan,6.5,7.5,8.5,np.nan],
[np.nan,10.5,11.5,12.5,np.nan],
[np.nan,np.nan,np.nan,np.nan,np.nan]])
np.testing.assert_almost_equal(tdata,output)
print('Passed resampling and padding...')
except Exception as error:
print('Failed to read grid.xml format file "%s". Error "%s".' % (xmlfile,str(error)))
assert 0 == 1
finally:
if os.path.isdir(tdir):
shutil.rmtree(tdir)
if __name__ == '__main__':
test_modify()
test_interpolate()
test_read()
test_save()
| true | true |
f7317c790976f2907b99356b3b5fdcac78c33a12 | 1,014 | py | Python | python/tests/integration/postgres/test_postgres_results.py | Vjrx/airship-drydock | 315fb9864e6d55a66d5266f76c160be55d22c98b | [
"Apache-2.0"
] | 14 | 2017-03-07T17:00:22.000Z | 2021-04-02T14:15:04.000Z | python/tests/integration/postgres/test_postgres_results.py | Vjrx/airship-drydock | 315fb9864e6d55a66d5266f76c160be55d22c98b | [
"Apache-2.0"
] | 82 | 2017-02-16T16:54:18.000Z | 2018-06-04T13:40:32.000Z | python/tests/integration/postgres/test_postgres_results.py | Vjrx/airship-drydock | 315fb9864e6d55a66d5266f76c160be55d22c98b | [
"Apache-2.0"
] | 16 | 2017-02-14T19:47:00.000Z | 2018-04-26T10:13:05.000Z | import pytest
from drydock_provisioner import objects
class TestPostgres(object):
def test_result_message_insert(self, populateddb, drydock_state):
"""Test that a result message for a task can be added."""
msg1 = objects.TaskStatusMessage('Error 1', True, 'node', 'node1')
msg2 = objects.TaskStatusMessage('Status 1', False, 'node', 'node1')
result = drydock_state.post_result_message(populateddb.task_id, msg1)
assert result
result = drydock_state.post_result_message(populateddb.task_id, msg2)
assert result
task = drydock_state.get_task(populateddb.task_id)
assert task.result.error_count == 1
assert len(task.result.message_list) == 2
@pytest.fixture(scope='function')
def populateddb(self, blank_state):
"""Add dummy task to test against."""
task = objects.Task(
action='prepare_site', design_ref='http://test.com/design')
blank_state.post_task(task)
return task
| 31.6875 | 77 | 0.675542 | import pytest
from drydock_provisioner import objects
class TestPostgres(object):
def test_result_message_insert(self, populateddb, drydock_state):
msg1 = objects.TaskStatusMessage('Error 1', True, 'node', 'node1')
msg2 = objects.TaskStatusMessage('Status 1', False, 'node', 'node1')
result = drydock_state.post_result_message(populateddb.task_id, msg1)
assert result
result = drydock_state.post_result_message(populateddb.task_id, msg2)
assert result
task = drydock_state.get_task(populateddb.task_id)
assert task.result.error_count == 1
assert len(task.result.message_list) == 2
@pytest.fixture(scope='function')
def populateddb(self, blank_state):
task = objects.Task(
action='prepare_site', design_ref='http://test.com/design')
blank_state.post_task(task)
return task
| true | true |
f7317c8199b599b34a14ce19dda9e2ac6e37c688 | 42,329 | py | Python | modin/pandas/test/dataframe/test_map_metadata.py | palash247/modin | 3f1e275b67a760f09db6944600c4b7f5e601cbde | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | modin/pandas/test/dataframe/test_map_metadata.py | palash247/modin | 3f1e275b67a760f09db6944600c4b7f5e601cbde | [
"ECL-2.0",
"Apache-2.0"
] | 46 | 2020-08-28T09:12:51.000Z | 2021-04-20T00:01:04.000Z | modin/pandas/test/dataframe/test_map_metadata.py | monocilindro/modin | ffea4ee2d3556dc48c05dac7abb54b62c66f3153 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import numpy as np
import pandas
from pandas.testing import assert_index_equal
import matplotlib
import modin.pandas as pd
from modin.utils import get_current_backend
from modin.pandas.test.utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
test_func_keys,
test_func_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
eval_general,
create_test_dfs,
)
from modin.config import NPartitions
NPartitions.put(4)
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
def eval_insert(modin_df, pandas_df, **kwargs):
if "col" in kwargs and "column" not in kwargs:
kwargs["column"] = kwargs.pop("col")
_kwargs = {"loc": 0, "column": "New column"}
_kwargs.update(kwargs)
eval_general(
modin_df,
pandas_df,
operation=lambda df, **kwargs: df.insert(**kwargs),
**_kwargs,
)
def test_indexing():
modin_df = pd.DataFrame(
dict(a=[1, 2, 3], b=[4, 5, 6], c=[7, 8, 9]), index=["a", "b", "c"]
)
pandas_df = pandas.DataFrame(
dict(a=[1, 2, 3], b=[4, 5, 6], c=[7, 8, 9]), index=["a", "b", "c"]
)
modin_result = modin_df
pandas_result = pandas_df
df_equals(modin_result, pandas_result)
modin_result = modin_df["b"]
pandas_result = pandas_df["b"]
df_equals(modin_result, pandas_result)
modin_result = modin_df[["b"]]
pandas_result = pandas_df[["b"]]
df_equals(modin_result, pandas_result)
modin_result = modin_df[["b", "a"]]
pandas_result = pandas_df[["b", "a"]]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc["b"]
pandas_result = pandas_df.loc["b"]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[["b"]]
pandas_result = pandas_df.loc[["b"]]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[["b", "a"]]
pandas_result = pandas_df.loc[["b", "a"]]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[["b", "a"], ["a", "c"]]
pandas_result = pandas_df.loc[["b", "a"], ["a", "c"]]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[:, ["a", "c"]]
pandas_result = pandas_df.loc[:, ["a", "c"]]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[:, ["c"]]
pandas_result = pandas_df.loc[:, ["c"]]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[[]]
pandas_result = pandas_df.loc[[]]
df_equals(modin_result, pandas_result)
def test_empty_df():
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame()
pd_df = pandas.DataFrame()
df["a"] = [1, 2, 3, 4, 5]
pd_df["a"] = [1, 2, 3, 4, 5]
df_equals(df, pd_df)
df = pd.DataFrame()
pd_df = pandas.DataFrame()
df["a"] = list("ABCDEF")
pd_df["a"] = list("ABCDEF")
df_equals(df, pd_df)
df = pd.DataFrame()
pd_df = pandas.DataFrame()
df["a"] = pd.Series([1, 2, 3, 4, 5])
pd_df["a"] = pandas.Series([1, 2, 3, 4, 5])
df_equals(df, pd_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
key1 = modin_df.columns[0]
# Scaler
df_equals(modin_df.at[0, key1], pandas_df.at[0, key1])
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
if get_current_backend() != "BaseOnPython":
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas_df.isna()
modin_result = modin_df.isna()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isnull(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas_df.isnull()
modin_result = modin_df.isnull()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
# Pandas has bug where sort=False is ignored
# (https://github.com/pandas-dev/pandas/issues/35092), but Modin
# now does the right thing, so for now manually sort to workaround
# this. Once the Pandas bug is fixed and Modin upgrades to that
# Pandas release, this sort will cause the test to fail, and the
# next three lines should be deleted.
if get_current_backend() != "BaseOnPython":
assert list(modin_result.columns) == list(modin_df.columns) + [0]
modin_result = modin_result[[0] + sorted(modin_df.columns)]
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append([modin_df, modin_df], verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(modin_df, verify_integrity=verify_integrity)
df_equals(modin_result, pandas_result)
def test_astype():
td = pandas.DataFrame(test_data["int_data"])[["col1", "index", "col3", "col4"]]
modin_df = pd.DataFrame(td.values, index=td.index, columns=td.columns)
expected_df = pandas.DataFrame(td.values, index=td.index, columns=td.columns)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"col1": np.int32, "index": np.int64, "col3": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"index": np.int32, "index": np.int64, "index": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
modin_df = pd.DataFrame(index=["row1"], columns=["col1"])
modin_df["col1"]["row1"] = 11
modin_df_casted = modin_df.astype(int)
expected_df = pandas.DataFrame(index=["row1"], columns=["col1"])
expected_df["col1"]["row1"] = 11
expected_df_casted = expected_df.astype(int)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category():
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_astype_category_large():
series_length = 10_000
modin_df = pd.DataFrame(
{
"col1": ["str{0}".format(i) for i in range(0, series_length)],
"col2": [i for i in range(0, series_length)],
}
)
pandas_df = pandas.DataFrame(
{
"col1": ["str{0}".format(i) for i in range(0, series_length)],
"col2": [i for i in range(0, series_length)],
}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
def test_drop():
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence():
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(frame_data, index=["a", "b", "c"], columns=["d", "e", "f"])
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel():
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
df.droplevel("a")
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset",
[None, "col1", "name", ("col1", "col3"), ["col1", "col3", "col7"]],
ids=["None", "string", "name", "tuple", "list"],
)
def test_drop_duplicates(data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset)
except Exception as e:
with pytest.raises(type(e)):
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset)
else:
df_equals(
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
try:
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
else:
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values():
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(data["data"], index=data["index"], columns=data["columns"])
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort():
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(["value"])
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(["value"])
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(data):
modin_df = pd.DataFrame(data)
with pytest.raises(TypeError):
modin_df.dropna(how="all", axis=[0, 1])
with pytest.raises(TypeError):
modin_df.dropna(how="all", axis=(0, 1))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis,subset", [(0, list("EF")), (1, [4, 5])])
def test_dropna_subset_error(data, axis, subset):
eval_general(*create_test_dfs(data), lambda df: df.dropna(axis=axis, subset=subset))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("astype", ["category", "int32", "float"])
def test_insert_dtypes(data, astype):
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
# categories with NaN works incorrect for now
if astype == "category" and pandas_df.iloc[:, 0].isnull().any():
return
eval_insert(
modin_df,
pandas_df,
col="TypeSaver",
value=lambda df: df.iloc[:, 0].astype(astype),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("loc", int_arg_values, ids=arg_keys("loc", int_arg_keys))
def test_insert_loc(data, loc):
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
value = modin_df.iloc[:, 0]
eval_insert(modin_df, pandas_df, loc=loc, value=value)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_insert(data):
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
eval_insert(
modin_df, pandas_df, col="Duplicate", value=lambda df: df[df.columns[0]]
)
eval_insert(modin_df, pandas_df, col="Scalar", value=100)
eval_insert(
pd.DataFrame(columns=list("ab")),
pandas.DataFrame(columns=list("ab")),
col=lambda df: df.columns[0],
value=lambda df: df[df.columns[0]],
)
eval_insert(
pd.DataFrame(index=modin_df.index),
pandas.DataFrame(index=pandas_df.index),
col=lambda df: df.columns[0],
value=lambda df: df[df.columns[0]],
)
eval_insert(
modin_df,
pandas_df,
col="DataFrame insert",
value=lambda df: df[[df.columns[0]]],
)
eval_insert(
modin_df,
pandas_df,
col="Different indices",
value=lambda df: df[[df.columns[0]]].set_index(df.index[::-1]),
)
# Bad inserts
eval_insert(modin_df, pandas_df, col="Bad Column", value=lambda df: df)
eval_insert(
modin_df,
pandas_df,
col="Too Short",
value=lambda df: list(df[df.columns[0]])[:-1],
)
eval_insert(
modin_df,
pandas_df,
col=lambda df: df.columns[0],
value=lambda df: df[df.columns[0]],
)
eval_insert(
modin_df,
pandas_df,
loc=lambda df: len(df.columns) + 100,
col="Bad Loc",
value=100,
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ndim(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.ndim == pandas_df.ndim
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notna(), pandas_df.notna())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notnull(), pandas_df.notnull())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_round(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.round(), pandas_df.round())
df_equals(modin_df.round(1), pandas_df.round(1))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_set_axis(data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
x = pandas.DataFrame()._get_axis_number(axis)
index = modin_df.columns if x else modin_df.index
labels = ["{0}_{1}".format(index[i], i) for i in range(modin_df.shape[x])]
modin_result = modin_df.set_axis(labels, axis=axis, inplace=False)
pandas_result = pandas_df.set_axis(labels, axis=axis, inplace=False)
df_equals(modin_result, pandas_result)
modin_df_copy = modin_df.copy()
modin_df.set_axis(labels, axis=axis, inplace=True)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_axis(labels, axis=axis, inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("drop", bool_arg_values, ids=arg_keys("drop", bool_arg_keys))
@pytest.mark.parametrize(
"append", bool_arg_values, ids=arg_keys("append", bool_arg_keys)
)
def test_set_index(request, data, drop, append):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.set_index(key, drop=drop, append=append, inplace=False)
pandas_result = pandas_df.set_index(
key, drop=drop, append=append, inplace=False
)
df_equals(modin_result, pandas_result)
modin_df_copy = modin_df.copy()
modin_df.set_index(key, drop=drop, append=append, inplace=True)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_index(key, drop=drop, append=append, inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_shape(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.shape == pandas_df.shape
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_size(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.size == pandas_df.size
def test_squeeze():
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
frame_data_2 = {"col1": [0, 1, 2, 3]}
frame_data_3 = {
"col1": [0],
"col2": [4],
"col3": [8],
"col4": [12],
"col5": [0],
}
frame_data_4 = {"col1": [2]}
frame_data_5 = {"col1": ["string"]}
# Different data for different cases
pandas_df = pandas.DataFrame(frame_data).squeeze()
modin_df = pd.DataFrame(frame_data).squeeze()
df_equals(modin_df, pandas_df)
pandas_df_2 = pandas.DataFrame(frame_data_2).squeeze()
modin_df_2 = pd.DataFrame(frame_data_2).squeeze()
df_equals(modin_df_2, pandas_df_2)
pandas_df_3 = pandas.DataFrame(frame_data_3).squeeze()
modin_df_3 = pd.DataFrame(frame_data_3).squeeze()
df_equals(modin_df_3, pandas_df_3)
pandas_df_4 = pandas.DataFrame(frame_data_4).squeeze()
modin_df_4 = pd.DataFrame(frame_data_4).squeeze()
df_equals(modin_df_4, pandas_df_4)
pandas_df_5 = pandas.DataFrame(frame_data_5).squeeze()
modin_df_5 = pd.DataFrame(frame_data_5).squeeze()
df_equals(modin_df_5, pandas_df_5)
data = [
[
pd.Timestamp("2019-01-02"),
pd.Timestamp("2019-01-03"),
pd.Timestamp("2019-01-04"),
pd.Timestamp("2019-01-05"),
],
[1, 1, 1, 2],
]
df = pd.DataFrame(data, index=["date", "value"]).T
pf = pandas.DataFrame(data, index=["date", "value"]).T
df.set_index("date", inplace=True)
pf.set_index("date", inplace=True)
df_equals(df.iloc[0], pf.iloc[0])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_transpose(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.T, pandas_df.T)
df_equals(modin_df.transpose(), pandas_df.transpose())
# Test for map across full axis for select indices
df_equals(modin_df.T.dropna(), pandas_df.T.dropna())
# Test for map across full axis
df_equals(modin_df.T.nunique(), pandas_df.T.nunique())
# Test for map across blocks
df_equals(modin_df.T.notna(), pandas_df.T.notna())
@pytest.mark.parametrize(
"data, other_data",
[
({"A": [1, 2, 3], "B": [400, 500, 600]}, {"B": [4, 5, 6], "C": [7, 8, 9]}),
(
{"A": ["a", "b", "c"], "B": ["x", "y", "z"]},
{"B": ["d", "e", "f", "g", "h", "i"]},
),
({"A": [1, 2, 3], "B": [400, 500, 600]}, {"B": [4, np.nan, 6]}),
],
)
def test_update(data, other_data):
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
other_modin_df, other_pandas_df = (
pd.DataFrame(other_data),
pandas.DataFrame(other_data),
)
modin_df.update(other_modin_df)
pandas_df.update(other_pandas_df)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.update(other_modin_df, errors="raise")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___neg__(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.__neg__()
except Exception as e:
with pytest.raises(type(e)):
modin_df.__neg__()
else:
modin_result = modin_df.__neg__()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___invert__(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = ~pandas_df
except Exception as e:
with pytest.raises(type(e)):
repr(~modin_df)
else:
modin_result = ~modin_df
df_equals(modin_result, pandas_result)
def test___hash__():
data = test_data_values[0]
with pytest.warns(UserWarning):
try:
pd.DataFrame(data).__hash__()
except TypeError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___delitem__(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = pandas_df.columns[0]
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
modin_df.__delitem__(key)
pandas_df.__delitem__(key)
df_equals(modin_df, pandas_df)
# Issue 2027
last_label = pandas_df.iloc[:, -1].name
modin_df.__delitem__(last_label)
pandas_df.__delitem__(last_label)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___nonzero__(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
# Always raises ValueError
modin_df.__nonzero__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___abs__(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = abs(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
abs(modin_df)
else:
modin_result = abs(modin_df)
df_equals(modin_result, pandas_result)
def test___round__():
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).__round__()
| 32.585835 | 88 | 0.644924 |
import pytest
import numpy as np
import pandas
from pandas.testing import assert_index_equal
import matplotlib
import modin.pandas as pd
from modin.utils import get_current_backend
from modin.pandas.test.utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
test_func_keys,
test_func_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
eval_general,
create_test_dfs,
)
from modin.config import NPartitions
NPartitions.put(4)
matplotlib.use("Agg")
def eval_insert(modin_df, pandas_df, **kwargs):
if "col" in kwargs and "column" not in kwargs:
kwargs["column"] = kwargs.pop("col")
_kwargs = {"loc": 0, "column": "New column"}
_kwargs.update(kwargs)
eval_general(
modin_df,
pandas_df,
operation=lambda df, **kwargs: df.insert(**kwargs),
**_kwargs,
)
def test_indexing():
modin_df = pd.DataFrame(
dict(a=[1, 2, 3], b=[4, 5, 6], c=[7, 8, 9]), index=["a", "b", "c"]
)
pandas_df = pandas.DataFrame(
dict(a=[1, 2, 3], b=[4, 5, 6], c=[7, 8, 9]), index=["a", "b", "c"]
)
modin_result = modin_df
pandas_result = pandas_df
df_equals(modin_result, pandas_result)
modin_result = modin_df["b"]
pandas_result = pandas_df["b"]
df_equals(modin_result, pandas_result)
modin_result = modin_df[["b"]]
pandas_result = pandas_df[["b"]]
df_equals(modin_result, pandas_result)
modin_result = modin_df[["b", "a"]]
pandas_result = pandas_df[["b", "a"]]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc["b"]
pandas_result = pandas_df.loc["b"]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[["b"]]
pandas_result = pandas_df.loc[["b"]]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[["b", "a"]]
pandas_result = pandas_df.loc[["b", "a"]]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[["b", "a"], ["a", "c"]]
pandas_result = pandas_df.loc[["b", "a"], ["a", "c"]]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[:, ["a", "c"]]
pandas_result = pandas_df.loc[:, ["a", "c"]]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[:, ["c"]]
pandas_result = pandas_df.loc[:, ["c"]]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[[]]
pandas_result = pandas_df.loc[[]]
df_equals(modin_result, pandas_result)
def test_empty_df():
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame()
pd_df = pandas.DataFrame()
df["a"] = [1, 2, 3, 4, 5]
pd_df["a"] = [1, 2, 3, 4, 5]
df_equals(df, pd_df)
df = pd.DataFrame()
pd_df = pandas.DataFrame()
df["a"] = list("ABCDEF")
pd_df["a"] = list("ABCDEF")
df_equals(df, pd_df)
df = pd.DataFrame()
pd_df = pandas.DataFrame()
df["a"] = pd.Series([1, 2, 3, 4, 5])
pd_df["a"] = pandas.Series([1, 2, 3, 4, 5])
df_equals(df, pd_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
key1 = modin_df.columns[0]
df_equals(modin_df.at[0, key1], pandas_df.at[0, key1])
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
if get_current_backend() != "BaseOnPython":
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas_df.isna()
modin_result = modin_df.isna()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isnull(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas_df.isnull()
modin_result = modin_df.isnull()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
# Pandas has bug where sort=False is ignored
# (https://github.com/pandas-dev/pandas/issues/35092), but Modin
# now does the right thing, so for now manually sort to workaround
# this. Once the Pandas bug is fixed and Modin upgrades to that
# Pandas release, this sort will cause the test to fail, and the
# next three lines should be deleted.
if get_current_backend() != "BaseOnPython":
assert list(modin_result.columns) == list(modin_df.columns) + [0]
modin_result = modin_result[[0] + sorted(modin_df.columns)]
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append([modin_df, modin_df], verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(modin_df, verify_integrity=verify_integrity)
df_equals(modin_result, pandas_result)
def test_astype():
td = pandas.DataFrame(test_data["int_data"])[["col1", "index", "col3", "col4"]]
modin_df = pd.DataFrame(td.values, index=td.index, columns=td.columns)
expected_df = pandas.DataFrame(td.values, index=td.index, columns=td.columns)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"col1": np.int32, "index": np.int64, "col3": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"index": np.int32, "index": np.int64, "index": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
modin_df = pd.DataFrame(index=["row1"], columns=["col1"])
modin_df["col1"]["row1"] = 11
modin_df_casted = modin_df.astype(int)
expected_df = pandas.DataFrame(index=["row1"], columns=["col1"])
expected_df["col1"]["row1"] = 11
expected_df_casted = expected_df.astype(int)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category():
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_astype_category_large():
series_length = 10_000
modin_df = pd.DataFrame(
{
"col1": ["str{0}".format(i) for i in range(0, series_length)],
"col2": [i for i in range(0, series_length)],
}
)
pandas_df = pandas.DataFrame(
{
"col1": ["str{0}".format(i) for i in range(0, series_length)],
"col2": [i for i in range(0, series_length)],
}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
def test_drop():
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence():
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(frame_data, index=["a", "b", "c"], columns=["d", "e", "f"])
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel():
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
df.droplevel("a")
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset",
[None, "col1", "name", ("col1", "col3"), ["col1", "col3", "col7"]],
ids=["None", "string", "name", "tuple", "list"],
)
def test_drop_duplicates(data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset)
except Exception as e:
with pytest.raises(type(e)):
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset)
else:
df_equals(
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
try:
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
else:
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values():
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(data["data"], index=data["index"], columns=data["columns"])
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort():
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(["value"])
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(["value"])
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(data):
modin_df = pd.DataFrame(data)
with pytest.raises(TypeError):
modin_df.dropna(how="all", axis=[0, 1])
with pytest.raises(TypeError):
modin_df.dropna(how="all", axis=(0, 1))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis,subset", [(0, list("EF")), (1, [4, 5])])
def test_dropna_subset_error(data, axis, subset):
eval_general(*create_test_dfs(data), lambda df: df.dropna(axis=axis, subset=subset))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("astype", ["category", "int32", "float"])
def test_insert_dtypes(data, astype):
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
if astype == "category" and pandas_df.iloc[:, 0].isnull().any():
return
eval_insert(
modin_df,
pandas_df,
col="TypeSaver",
value=lambda df: df.iloc[:, 0].astype(astype),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("loc", int_arg_values, ids=arg_keys("loc", int_arg_keys))
def test_insert_loc(data, loc):
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
value = modin_df.iloc[:, 0]
eval_insert(modin_df, pandas_df, loc=loc, value=value)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_insert(data):
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
eval_insert(
modin_df, pandas_df, col="Duplicate", value=lambda df: df[df.columns[0]]
)
eval_insert(modin_df, pandas_df, col="Scalar", value=100)
eval_insert(
pd.DataFrame(columns=list("ab")),
pandas.DataFrame(columns=list("ab")),
col=lambda df: df.columns[0],
value=lambda df: df[df.columns[0]],
)
eval_insert(
pd.DataFrame(index=modin_df.index),
pandas.DataFrame(index=pandas_df.index),
col=lambda df: df.columns[0],
value=lambda df: df[df.columns[0]],
)
eval_insert(
modin_df,
pandas_df,
col="DataFrame insert",
value=lambda df: df[[df.columns[0]]],
)
eval_insert(
modin_df,
pandas_df,
col="Different indices",
value=lambda df: df[[df.columns[0]]].set_index(df.index[::-1]),
)
eval_insert(modin_df, pandas_df, col="Bad Column", value=lambda df: df)
eval_insert(
modin_df,
pandas_df,
col="Too Short",
value=lambda df: list(df[df.columns[0]])[:-1],
)
eval_insert(
modin_df,
pandas_df,
col=lambda df: df.columns[0],
value=lambda df: df[df.columns[0]],
)
eval_insert(
modin_df,
pandas_df,
loc=lambda df: len(df.columns) + 100,
col="Bad Loc",
value=100,
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ndim(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.ndim == pandas_df.ndim
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notna(), pandas_df.notna())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notnull(), pandas_df.notnull())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_round(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.round(), pandas_df.round())
df_equals(modin_df.round(1), pandas_df.round(1))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_set_axis(data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
x = pandas.DataFrame()._get_axis_number(axis)
index = modin_df.columns if x else modin_df.index
labels = ["{0}_{1}".format(index[i], i) for i in range(modin_df.shape[x])]
modin_result = modin_df.set_axis(labels, axis=axis, inplace=False)
pandas_result = pandas_df.set_axis(labels, axis=axis, inplace=False)
df_equals(modin_result, pandas_result)
modin_df_copy = modin_df.copy()
modin_df.set_axis(labels, axis=axis, inplace=True)
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_axis(labels, axis=axis, inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("drop", bool_arg_values, ids=arg_keys("drop", bool_arg_keys))
@pytest.mark.parametrize(
"append", bool_arg_values, ids=arg_keys("append", bool_arg_keys)
)
def test_set_index(request, data, drop, append):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.set_index(key, drop=drop, append=append, inplace=False)
pandas_result = pandas_df.set_index(
key, drop=drop, append=append, inplace=False
)
df_equals(modin_result, pandas_result)
modin_df_copy = modin_df.copy()
modin_df.set_index(key, drop=drop, append=append, inplace=True)
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_index(key, drop=drop, append=append, inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_shape(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.shape == pandas_df.shape
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_size(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.size == pandas_df.size
def test_squeeze():
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
frame_data_2 = {"col1": [0, 1, 2, 3]}
frame_data_3 = {
"col1": [0],
"col2": [4],
"col3": [8],
"col4": [12],
"col5": [0],
}
frame_data_4 = {"col1": [2]}
frame_data_5 = {"col1": ["string"]}
pandas_df = pandas.DataFrame(frame_data).squeeze()
modin_df = pd.DataFrame(frame_data).squeeze()
df_equals(modin_df, pandas_df)
pandas_df_2 = pandas.DataFrame(frame_data_2).squeeze()
modin_df_2 = pd.DataFrame(frame_data_2).squeeze()
df_equals(modin_df_2, pandas_df_2)
pandas_df_3 = pandas.DataFrame(frame_data_3).squeeze()
modin_df_3 = pd.DataFrame(frame_data_3).squeeze()
df_equals(modin_df_3, pandas_df_3)
pandas_df_4 = pandas.DataFrame(frame_data_4).squeeze()
modin_df_4 = pd.DataFrame(frame_data_4).squeeze()
df_equals(modin_df_4, pandas_df_4)
pandas_df_5 = pandas.DataFrame(frame_data_5).squeeze()
modin_df_5 = pd.DataFrame(frame_data_5).squeeze()
df_equals(modin_df_5, pandas_df_5)
data = [
[
pd.Timestamp("2019-01-02"),
pd.Timestamp("2019-01-03"),
pd.Timestamp("2019-01-04"),
pd.Timestamp("2019-01-05"),
],
[1, 1, 1, 2],
]
df = pd.DataFrame(data, index=["date", "value"]).T
pf = pandas.DataFrame(data, index=["date", "value"]).T
df.set_index("date", inplace=True)
pf.set_index("date", inplace=True)
df_equals(df.iloc[0], pf.iloc[0])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_transpose(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.T, pandas_df.T)
df_equals(modin_df.transpose(), pandas_df.transpose())
df_equals(modin_df.T.dropna(), pandas_df.T.dropna())
df_equals(modin_df.T.nunique(), pandas_df.T.nunique())
df_equals(modin_df.T.notna(), pandas_df.T.notna())
@pytest.mark.parametrize(
"data, other_data",
[
({"A": [1, 2, 3], "B": [400, 500, 600]}, {"B": [4, 5, 6], "C": [7, 8, 9]}),
(
{"A": ["a", "b", "c"], "B": ["x", "y", "z"]},
{"B": ["d", "e", "f", "g", "h", "i"]},
),
({"A": [1, 2, 3], "B": [400, 500, 600]}, {"B": [4, np.nan, 6]}),
],
)
def test_update(data, other_data):
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
other_modin_df, other_pandas_df = (
pd.DataFrame(other_data),
pandas.DataFrame(other_data),
)
modin_df.update(other_modin_df)
pandas_df.update(other_pandas_df)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.update(other_modin_df, errors="raise")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___neg__(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.__neg__()
except Exception as e:
with pytest.raises(type(e)):
modin_df.__neg__()
else:
modin_result = modin_df.__neg__()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___invert__(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = ~pandas_df
except Exception as e:
with pytest.raises(type(e)):
repr(~modin_df)
else:
modin_result = ~modin_df
df_equals(modin_result, pandas_result)
def test___hash__():
data = test_data_values[0]
with pytest.warns(UserWarning):
try:
pd.DataFrame(data).__hash__()
except TypeError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___delitem__(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = pandas_df.columns[0]
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
modin_df.__delitem__(key)
pandas_df.__delitem__(key)
df_equals(modin_df, pandas_df)
last_label = pandas_df.iloc[:, -1].name
modin_df.__delitem__(last_label)
pandas_df.__delitem__(last_label)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___nonzero__(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.__nonzero__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___abs__(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = abs(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
abs(modin_df)
else:
modin_result = abs(modin_df)
df_equals(modin_result, pandas_result)
def test___round__():
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).__round__()
| true | true |
f7317cf731b0cf356e1cea4ede7915e02f90b539 | 7,087 | py | Python | litex_boards/targets/alveo_u250.py | quiatvn/litex-boards | 70c32a978fb588b3144a9e3cf9a63562f5505b7f | [
"BSD-2-Clause"
] | null | null | null | litex_boards/targets/alveo_u250.py | quiatvn/litex-boards | 70c32a978fb588b3144a9e3cf9a63562f5505b7f | [
"BSD-2-Clause"
] | null | null | null | litex_boards/targets/alveo_u250.py | quiatvn/litex-boards | 70c32a978fb588b3144a9e3cf9a63562f5505b7f | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2020 Fei Gao <feig@princeton.edu>
# Copyright (c) 2020 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2020 David Shah <dave@ds0.me>
# SPDX-License-Identifier: BSD-2-Clause
import argparse, os
from migen import *
from litex_boards.platforms import alveo_u250
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.soc_sdram import *
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
from litedram.modules import MTA18ASF2G72PZ
from litedram.phy import usddrphy
from litepcie.phy.usppciephy import USPPCIEPHY
from litepcie.core import LitePCIeEndpoint, LitePCIeMSI
from litepcie.frontend.dma import LitePCIeDMA
from litepcie.frontend.wishbone import LitePCIeWishboneBridge
from litepcie.software import generate_litepcie_software
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_pll4x = ClockDomain(reset_less=True)
self.clock_domains.cd_clk500 = ClockDomain()
# # #
self.submodules.pll = pll = USMMCM(speedgrade=-2)
self.comb += pll.reset.eq(0) # FIXME
pll.register_clkin(platform.request("clk300", 0), 300e6)
pll.create_clkout(self.cd_pll4x, sys_clk_freq*4, buf=None, with_reset=False)
pll.create_clkout(self.cd_clk500, 500e6, with_reset=False)
self.specials += [
Instance("BUFGCE_DIV", name="main_bufgce_div",
p_BUFGCE_DIVIDE=4,
i_CE=1, i_I=self.cd_pll4x.clk, o_O=self.cd_sys.clk),
Instance("BUFGCE", name="main_bufgce",
i_CE=1, i_I=self.cd_pll4x.clk, o_O=self.cd_sys4x.clk),
AsyncResetSynchronizer(self.cd_clk500, ~pll.locked),
]
self.submodules.idelayctrl = USIDELAYCTRL(cd_ref=self.cd_clk500, cd_sys=self.cd_sys)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, sys_clk_freq=int(125e6), with_pcie=False, **kwargs):
platform = alveo_u250.Platform()
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on Alveo U250",
ident_version = True,
**kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# DDR4 SDRAM -------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.ddrphy = usddrphy.USPDDRPHY(platform.request("ddram"),
memtype = "DDR4",
sys_clk_freq = sys_clk_freq,
iodelay_clk_freq = 500e6,
cmd_latency = 1,
is_rdimm = True)
self.add_csr("ddrphy")
self.add_sdram("sdram",
phy = self.ddrphy,
module = MTA18ASF2G72PZ(sys_clk_freq, "1:4"),
origin = self.mem_map["main_ram"],
size = kwargs.get("max_sdram_size", 0x40000000),
l2_cache_size = kwargs.get("l2_size", 8192),
l2_cache_min_data_width = kwargs.get("min_l2_data_width", 128),
l2_cache_reverse = True
)
# Firmware RAM (To ease initial LiteDRAM calibration support) ------------------------------
self.add_ram("firmware_ram", 0x20000000, 0x8000)
# PCIe -------------------------------------------------------------------------------------
if with_pcie:
# PHY
self.submodules.pcie_phy = USPPCIEPHY(platform, platform.request("pcie_x4"),
data_width = 128,
bar0_size = 0x20000)
#self.pcie_phy.add_timing_constraints(platform) # FIXME
platform.add_false_path_constraints(self.crg.cd_sys.clk, self.pcie_phy.cd_pcie.clk)
self.add_csr("pcie_phy")
# Endpoint
self.submodules.pcie_endpoint = LitePCIeEndpoint(self.pcie_phy, max_pending_requests=8)
# Wishbone bridge
self.submodules.pcie_bridge = LitePCIeWishboneBridge(self.pcie_endpoint,
base_address = self.mem_map["csr"])
self.add_wb_master(self.pcie_bridge.wishbone)
# DMA0
self.submodules.pcie_dma0 = LitePCIeDMA(self.pcie_phy, self.pcie_endpoint,
with_buffering = True, buffering_depth=1024,
with_loopback = True)
self.add_csr("pcie_dma0")
self.add_constant("DMA_CHANNELS", 1)
# MSI
self.submodules.pcie_msi = LitePCIeMSI()
self.add_csr("pcie_msi")
self.comb += self.pcie_msi.source.connect(self.pcie_phy.msi)
self.interrupts = {
"PCIE_DMA0_WRITER": self.pcie_dma0.writer.irq,
"PCIE_DMA0_READER": self.pcie_dma0.reader.irq,
}
for i, (k, v) in enumerate(sorted(self.interrupts.items())):
self.comb += self.pcie_msi.irqs[i].eq(v)
self.add_constant(k + "_INTERRUPT", i)
# Leds -------------------------------------------------------------------------------------
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
self.add_csr("leds")
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on Alveo U250")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--with-pcie", action="store_true", help="Enable PCIe support")
parser.add_argument("--driver", action="store_true", help="Generate PCIe driver")
parser.add_argument("--load", action="store_true", help="Load bitstream")
builder_args(parser)
soc_sdram_args(parser)
args = parser.parse_args()
# Enforce arguments
args.csr_data_width = 32
soc = BaseSoC(with_pcie=args.with_pcie, **soc_sdram_argdict(args))
builder = Builder(soc, **builder_argdict(args))
builder.build(run=args.build)
if args.driver:
generate_litepcie_software(soc, os.path.join(builder.output_dir, "driver"))
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
if __name__ == "__main__":
main()
| 41.934911 | 100 | 0.56272 |
import argparse, os
from migen import *
from litex_boards.platforms import alveo_u250
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.soc_sdram import *
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
from litedram.modules import MTA18ASF2G72PZ
from litedram.phy import usddrphy
from litepcie.phy.usppciephy import USPPCIEPHY
from litepcie.core import LitePCIeEndpoint, LitePCIeMSI
from litepcie.frontend.dma import LitePCIeDMA
from litepcie.frontend.wishbone import LitePCIeWishboneBridge
from litepcie.software import generate_litepcie_software
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_pll4x = ClockDomain(reset_less=True)
self.clock_domains.cd_clk500 = ClockDomain()
self.submodules.pll = pll = USMMCM(speedgrade=-2)
self.comb += pll.reset.eq(0)
pll.register_clkin(platform.request("clk300", 0), 300e6)
pll.create_clkout(self.cd_pll4x, sys_clk_freq*4, buf=None, with_reset=False)
pll.create_clkout(self.cd_clk500, 500e6, with_reset=False)
self.specials += [
Instance("BUFGCE_DIV", name="main_bufgce_div",
p_BUFGCE_DIVIDE=4,
i_CE=1, i_I=self.cd_pll4x.clk, o_O=self.cd_sys.clk),
Instance("BUFGCE", name="main_bufgce",
i_CE=1, i_I=self.cd_pll4x.clk, o_O=self.cd_sys4x.clk),
AsyncResetSynchronizer(self.cd_clk500, ~pll.locked),
]
self.submodules.idelayctrl = USIDELAYCTRL(cd_ref=self.cd_clk500, cd_sys=self.cd_sys)
class BaseSoC(SoCCore):
def __init__(self, sys_clk_freq=int(125e6), with_pcie=False, **kwargs):
platform = alveo_u250.Platform()
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on Alveo U250",
ident_version = True,
**kwargs)
self.submodules.crg = _CRG(platform, sys_clk_freq)
if not self.integrated_main_ram_size:
self.submodules.ddrphy = usddrphy.USPDDRPHY(platform.request("ddram"),
memtype = "DDR4",
sys_clk_freq = sys_clk_freq,
iodelay_clk_freq = 500e6,
cmd_latency = 1,
is_rdimm = True)
self.add_csr("ddrphy")
self.add_sdram("sdram",
phy = self.ddrphy,
module = MTA18ASF2G72PZ(sys_clk_freq, "1:4"),
origin = self.mem_map["main_ram"],
size = kwargs.get("max_sdram_size", 0x40000000),
l2_cache_size = kwargs.get("l2_size", 8192),
l2_cache_min_data_width = kwargs.get("min_l2_data_width", 128),
l2_cache_reverse = True
)
self.add_ram("firmware_ram", 0x20000000, 0x8000)
if with_pcie:
self.submodules.pcie_phy = USPPCIEPHY(platform, platform.request("pcie_x4"),
data_width = 128,
bar0_size = 0x20000)
platform.add_false_path_constraints(self.crg.cd_sys.clk, self.pcie_phy.cd_pcie.clk)
self.add_csr("pcie_phy")
self.submodules.pcie_endpoint = LitePCIeEndpoint(self.pcie_phy, max_pending_requests=8)
self.submodules.pcie_bridge = LitePCIeWishboneBridge(self.pcie_endpoint,
base_address = self.mem_map["csr"])
self.add_wb_master(self.pcie_bridge.wishbone)
self.submodules.pcie_dma0 = LitePCIeDMA(self.pcie_phy, self.pcie_endpoint,
with_buffering = True, buffering_depth=1024,
with_loopback = True)
self.add_csr("pcie_dma0")
self.add_constant("DMA_CHANNELS", 1)
self.submodules.pcie_msi = LitePCIeMSI()
self.add_csr("pcie_msi")
self.comb += self.pcie_msi.source.connect(self.pcie_phy.msi)
self.interrupts = {
"PCIE_DMA0_WRITER": self.pcie_dma0.writer.irq,
"PCIE_DMA0_READER": self.pcie_dma0.reader.irq,
}
for i, (k, v) in enumerate(sorted(self.interrupts.items())):
self.comb += self.pcie_msi.irqs[i].eq(v)
self.add_constant(k + "_INTERRUPT", i)
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
self.add_csr("leds")
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on Alveo U250")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--with-pcie", action="store_true", help="Enable PCIe support")
parser.add_argument("--driver", action="store_true", help="Generate PCIe driver")
parser.add_argument("--load", action="store_true", help="Load bitstream")
builder_args(parser)
soc_sdram_args(parser)
args = parser.parse_args()
args.csr_data_width = 32
soc = BaseSoC(with_pcie=args.with_pcie, **soc_sdram_argdict(args))
builder = Builder(soc, **builder_argdict(args))
builder.build(run=args.build)
if args.driver:
generate_litepcie_software(soc, os.path.join(builder.output_dir, "driver"))
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
if __name__ == "__main__":
main()
| true | true |
f7317da5336aa017fc94a0299d3bac8f2c5c34b4 | 7,740 | py | Python | facebook_insights/metrics.py | jaylynch/django-facebook-insights | b10f1662f2f346bea19bc84629a8079257c9d710 | [
"MIT"
] | null | null | null | facebook_insights/metrics.py | jaylynch/django-facebook-insights | b10f1662f2f346bea19bc84629a8079257c9d710 | [
"MIT"
] | null | null | null | facebook_insights/metrics.py | jaylynch/django-facebook-insights | b10f1662f2f346bea19bc84629a8079257c9d710 | [
"MIT"
] | 1 | 2019-05-30T06:23:47.000Z | 2019-05-30T06:23:47.000Z | """Tools to fetch and extract Facebook Insights metrics.
>>> graph_id = '1234567890'
>>> metrics = ['page_impressions', 'page_engaged_users']
>>> page_metrics = fetch_metrics(graph_id, metrics)
>>> page_impressions = page_metrics['page_impressions']
>>> page_impressions.values
{'day': [
{'end_time': '2016-11-15T08:00:00+0000', 'value': 0},
{'end_time': '2016-11-16T08:00:00+0000', 'value': 1},
{'end_time': '2016-11-17T08:00:00+0000', 'value': 2},
],
'week': [
{'end_time': '2016-11-15T08:00:00+0000', 'value': 10},
{'end_time': '2016-11-16T08:00:00+0000', 'value': 11},
{'end_time': '2016-11-17T08:00:00+0000', 'value': 12},
],
'days_28': [
{'end_time': '2016-11-15T08:00:00+0000', 'value': 100},
{'end_time': '2016-11-16T08:00:00+0000', 'value': 101},
{'end_time': '2016-11-17T08:00:00+0000', 'value': 102},
]
}
>>> page_impressions.get_value('day')
{'end_time': '2016-11-17T08:00:00+0000', 'value': 2}
>>> page_impressions.get_value('day', extract=True)
2
>>> page_impressions.get_value('week', index=0)
{'end_time': '2016-11-15T08:00:00+0000', 'value': 10}
>>> page_impressions.get_value('week', index=0, extract=True)
10
>>> get_all_values()
{'day': {'end_time': '2016-11-17T08:00:00+0000', 'value': 2},
'week': {'end_time': '2016-11-17T08:00:00+0000', 'value': 12},
'days_28': {'end_time': '2016-11-17T08:00:00+0000', 'value': 102}}
>>> get_all_values(extract=True)
{'day': 2, 'week': 12, 'days_28': 102}
>>> get_all_values(index=0, extract=True)
{'day': 0, 'week': 10, 'days_28': 100}
"""
import json
from django.conf import settings
from facebook import GraphAPI, GraphAPIError
from facebook_insights.exceptions import EmptyData, MetricsNotSpecified
__all__ = ['fetch_metrics', 'Metric']
access_token = settings.FACEBOOK_INSIGHTS_ACCESS_TOKEN
api_version = getattr(settings, 'FACEBOOK_INSIGHTS_API_VERSION', None)
graph_api = GraphAPI(access_token=access_token, version=api_version)
def fetch_metrics(graph_id, metrics, token=None):
"""Fetch Facebook Insights metrics for an object with a given id.
Parameters
----------
graph_id : str
The Facebook ID of a Graph API object.
metrics : iterable of str
The object's metrics to fetch (e.g. 'page_engaged_users').
token: str
A Facebook Graph API access token
Returns
-------
dict
A dictionary of mappings between metric names and instances
of class 'Metric'.
"""
if not metrics:
raise MetricsNotSpecified('Specify metrics you want to fetch.')
batch = []
for metric in metrics:
request_data = {
'method': 'GET',
'relative_url': '{}/insights/{}/'.format(graph_id, metric)
}
batch.append(request_data)
# ##TODON'T##
global graph_api
if token and (token != graph_api.access_token):
graph_api = GraphAPI(access_token=token, version=api_version)
batch_response = graph_api.put_object(
parent_object='/',
connection_name='',
batch=json.dumps(batch),
)
extracted_metrics = {}
for response in batch_response:
body = json.loads(response['body'])
# (nevimov/2016-11-09): Currently facebook-sdk is not
# able to catch errors in responses to batch requests, so
# we have to take care of those ourselves.
if 'error' in body:
raise GraphAPIError(body)
data = body['data']
if not data:
# We need a better middle ground for this but just
# raising exceptions doesn't work when some of a
# set can legitimately be empty
continue
# raise EmptyData
rearranged_values = {}
for datum in data:
name = datum['name']
period = datum['period']
rearranged_values[period] = datum['values']
extracted_metrics[name] = Metric(name, rearranged_values)
return extracted_metrics
class Metric(object):
"""A Facebook Insights metric.
Parameters
----------
name : str
The name of a metric (e.g. 'post_impressions' or 'page_engaged_users').
values : dict of list of dict
Values to associate with the metric. Must be a dictionary of mappings
between periods ('day', 'week', 'days_28', 'lifetime') and lists of
their respective values, for example:
# The format typical for post metrics
{'lifetime': [{'value': 1000}]}
# The format typical for page metrics
{'day': [
{'end_time': '2016-11-15T08:00:00+0000', 'value': 0},
{'end_time': '2016-11-16T08:00:00+0000', 'value': 1},
{'end_time': '2016-11-17T08:00:00+0000', 'value': 2},
],
'week': [
{'end_time': '2016-11-15T08:00:00+0000', 'value': 10},
{'end_time': '2016-11-16T08:00:00+0000', 'value': 11},
{'end_time': '2016-11-17T08:00:00+0000', 'value': 12},
],
'days_28': [
{'end_time': '2016-11-15T08:00:00+0000', 'value': 100},
{'end_time': '2016-11-16T08:00:00+0000', 'value': 101},
{'end_time': '2016-11-17T08:00:00+0000', 'value': 102},
]}
Attributes
----------
name : str
The name of the metric.
values : list of dict of list
The values associated with the metric.
"""
def __init__(self, name, values):
self.name = name
self.values = values
def get_value(self, period=None, index=-1, extract=False):
"""Get the metric's value for a given period.
Parameters
----------
period: {None, 'day', 'week', 'days_28', 'lifetime'}
A period for which you want to get the value.
Can be omitted for metrics available only for one period
(e.g. all the post_impressions_* metrics).
index : int
For many metrics (e.g. most of page metrics) Facebook sends
values for 3 consecutive days. By default this method returns
the last value. If you want to get a previous value, pass
`index` in range from 0 to 2 (or from -1 to -3).
extract : bool
By default the return value is a dictionary containing key
'value' (most of page metrics also have 'end_time').
If `extract` is True, then simply the value associated with
this key is returned.
Returns
-------
The return value can be either:
* dictionary containing one key, 'value' (most of post metrics)
* dictionary containing two keys, 'value' and 'end_time'
(most of page metrics)
Pass `extract=True`, if you don't care about the 'end_time' and
need only the value.
"""
values = self.values
if not period:
if len(values) == 1:
period = list(values.keys())[0]
else:
raise TypeError(
"Can't get a period. Argument 'period' can be omitted "
"only for metrics that have one period."
)
value = values[period][index]
if extract:
return value['value']
return value
def get_all_values(self, index=-1, extract=False):
"""Get values for all periods.
Parameters
----------
Arguments `index` and `extract` have the same meaning as for
get_value().
Returns
-------
dict
A mapping of periods to values.
"""
all_values = {}
for period in self.values:
all_values[period] = self.get_value(period, index, extract)
return all_values
| 34.247788 | 79 | 0.589922 | import json
from django.conf import settings
from facebook import GraphAPI, GraphAPIError
from facebook_insights.exceptions import EmptyData, MetricsNotSpecified
__all__ = ['fetch_metrics', 'Metric']
access_token = settings.FACEBOOK_INSIGHTS_ACCESS_TOKEN
api_version = getattr(settings, 'FACEBOOK_INSIGHTS_API_VERSION', None)
graph_api = GraphAPI(access_token=access_token, version=api_version)
def fetch_metrics(graph_id, metrics, token=None):
if not metrics:
raise MetricsNotSpecified('Specify metrics you want to fetch.')
batch = []
for metric in metrics:
request_data = {
'method': 'GET',
'relative_url': '{}/insights/{}/'.format(graph_id, metric)
}
batch.append(request_data)
if token and (token != graph_api.access_token):
graph_api = GraphAPI(access_token=token, version=api_version)
batch_response = graph_api.put_object(
parent_object='/',
connection_name='',
batch=json.dumps(batch),
)
extracted_metrics = {}
for response in batch_response:
body = json.loads(response['body'])
# (nevimov/2016-11-09): Currently facebook-sdk is not
# able to catch errors in responses to batch requests, so
# we have to take care of those ourselves.
if 'error' in body:
raise GraphAPIError(body)
data = body['data']
if not data:
# We need a better middle ground for this but just
# raising exceptions doesn't work when some of a
continue
rearranged_values = {}
for datum in data:
name = datum['name']
period = datum['period']
rearranged_values[period] = datum['values']
extracted_metrics[name] = Metric(name, rearranged_values)
return extracted_metrics
class Metric(object):
def __init__(self, name, values):
self.name = name
self.values = values
def get_value(self, period=None, index=-1, extract=False):
values = self.values
if not period:
if len(values) == 1:
period = list(values.keys())[0]
else:
raise TypeError(
"Can't get a period. Argument 'period' can be omitted "
"only for metrics that have one period."
)
value = values[period][index]
if extract:
return value['value']
return value
def get_all_values(self, index=-1, extract=False):
all_values = {}
for period in self.values:
all_values[period] = self.get_value(period, index, extract)
return all_values
| true | true |
f7317dea433a8c808e75878075c74d8bf4391756 | 11,458 | py | Python | Lib/test/test_timeout.py | sireliah/polish-python | 605df4944c2d3bc25f8bf6964b274c0a0d297cc3 | [
"PSF-2.0"
] | 1 | 2018-06-21T18:21:24.000Z | 2018-06-21T18:21:24.000Z | Lib/test/test_timeout.py | sireliah/polish-python | 605df4944c2d3bc25f8bf6964b274c0a0d297cc3 | [
"PSF-2.0"
] | null | null | null | Lib/test/test_timeout.py | sireliah/polish-python | 605df4944c2d3bc25f8bf6964b274c0a0d297cc3 | [
"PSF-2.0"
] | null | null | null | """Unit tests dla socket timeout feature."""
zaimportuj functools
zaimportuj unittest
z test zaimportuj support
# This requires the 'network' resource jako given on the regrtest command line.
skip_expected = nie support.is_resource_enabled('network')
zaimportuj time
zaimportuj errno
zaimportuj socket
@functools.lru_cache()
def resolve_address(host, port):
"""Resolve an (host, port) to an address.
We must perform name resolution before timeout tests, otherwise it will be
performed by connect().
"""
przy support.transient_internet(host):
zwróć socket.getaddrinfo(host, port, socket.AF_INET,
socket.SOCK_STREAM)[0][4]
klasa CreationTestCase(unittest.TestCase):
"""Test case dla socket.gettimeout() oraz socket.settimeout()"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def testObjectCreation(self):
# Test Socket creation
self.assertEqual(self.sock.gettimeout(), Nic,
"timeout nie disabled by default")
def testFloatReturnValue(self):
# Test zwróć value of gettimeout()
self.sock.settimeout(7.345)
self.assertEqual(self.sock.gettimeout(), 7.345)
self.sock.settimeout(3)
self.assertEqual(self.sock.gettimeout(), 3)
self.sock.settimeout(Nic)
self.assertEqual(self.sock.gettimeout(), Nic)
def testReturnType(self):
# Test zwróć type of gettimeout()
self.sock.settimeout(1)
self.assertEqual(type(self.sock.gettimeout()), type(1.0))
self.sock.settimeout(3.9)
self.assertEqual(type(self.sock.gettimeout()), type(1.0))
def testTypeCheck(self):
# Test type checking by settimeout()
self.sock.settimeout(0)
self.sock.settimeout(0)
self.sock.settimeout(0.0)
self.sock.settimeout(Nic)
self.assertRaises(TypeError, self.sock.settimeout, "")
self.assertRaises(TypeError, self.sock.settimeout, "")
self.assertRaises(TypeError, self.sock.settimeout, ())
self.assertRaises(TypeError, self.sock.settimeout, [])
self.assertRaises(TypeError, self.sock.settimeout, {})
self.assertRaises(TypeError, self.sock.settimeout, 0j)
def testRangeCheck(self):
# Test range checking by settimeout()
self.assertRaises(ValueError, self.sock.settimeout, -1)
self.assertRaises(ValueError, self.sock.settimeout, -1)
self.assertRaises(ValueError, self.sock.settimeout, -1.0)
def testTimeoutThenBlocking(self):
# Test settimeout() followed by setblocking()
self.sock.settimeout(10)
self.sock.setblocking(1)
self.assertEqual(self.sock.gettimeout(), Nic)
self.sock.setblocking(0)
self.assertEqual(self.sock.gettimeout(), 0.0)
self.sock.settimeout(10)
self.sock.setblocking(0)
self.assertEqual(self.sock.gettimeout(), 0.0)
self.sock.setblocking(1)
self.assertEqual(self.sock.gettimeout(), Nic)
def testBlockingThenTimeout(self):
# Test setblocking() followed by settimeout()
self.sock.setblocking(0)
self.sock.settimeout(1)
self.assertEqual(self.sock.gettimeout(), 1)
self.sock.setblocking(1)
self.sock.settimeout(1)
self.assertEqual(self.sock.gettimeout(), 1)
klasa TimeoutTestCase(unittest.TestCase):
# There are a number of tests here trying to make sure that an operation
# doesn't take too much longer than expected. But competing machine
# activity makes it inevitable that such tests will fail at times.
# When fuzz was at 1.0, I (tim) routinely saw bogus failures on Win2K
# oraz Win98SE. Boosting it to 2.0 helped a lot, but isn't a real
# solution.
fuzz = 2.0
localhost = support.HOST
def setUp(self):
podnieś NotImplementedError()
tearDown = setUp
def _sock_operation(self, count, timeout, method, *args):
"""
Test the specified socket method.
The method jest run at most `count` times oraz must podnieś a socket.timeout
within `timeout` + self.fuzz seconds.
"""
self.sock.settimeout(timeout)
method = getattr(self.sock, method)
dla i w range(count):
t1 = time.time()
spróbuj:
method(*args)
wyjąwszy socket.timeout jako e:
delta = time.time() - t1
przerwij
inaczej:
self.fail('socket.timeout was nie podnieśd')
# These checks should account dla timing unprecision
self.assertLess(delta, timeout + self.fuzz)
self.assertGreater(delta, timeout - 1.0)
klasa TCPTimeoutTestCase(TimeoutTestCase):
"""TCP test case dla socket.socket() timeout functions"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addr_remote = resolve_address('www.python.org.', 80)
def tearDown(self):
self.sock.close()
def testConnectTimeout(self):
# Testing connect timeout jest tricky: we need to have IP connectivity
# to a host that silently drops our packets. We can't simulate this
# z Python because it's a function of the underlying TCP/IP stack.
# So, the following Snakebite host has been defined:
blackhole = resolve_address('blackhole.snakebite.net', 56666)
# Blackhole has been configured to silently drop any incoming packets.
# No RSTs (dla TCP) albo ICMP UNREACH (dla UDP/ICMP) will be sent back
# to hosts that attempt to connect to this address: which jest exactly
# what we need to confidently test connect timeout.
# However, we want to prevent false positives. It's nie unreasonable
# to expect certain hosts may nie be able to reach the blackhole, due
# to firewalling albo general network configuration. In order to improve
# our confidence w testing the blackhole, a corresponding 'whitehole'
# has also been set up using one port higher:
whitehole = resolve_address('whitehole.snakebite.net', 56667)
# This address has been configured to immediately drop any incoming
# packets jako well, but it does it respectfully przy regards to the
# incoming protocol. RSTs are sent dla TCP packets, oraz ICMP UNREACH
# jest sent dla UDP/ICMP packets. This means our attempts to connect to
# it should be met immediately przy ECONNREFUSED. The test case has
# been structured around this premise: jeżeli we get an ECONNREFUSED from
# the whitehole, we proceed przy testing connect timeout against the
# blackhole. If we don't, we skip the test (przy a message about nie
# getting the required RST z the whitehole within the required
# timeframe).
# For the records, the whitehole/blackhole configuration has been set
# up using the 'pf' firewall (available on BSDs), using the following:
#
# ext_if="bge0"
#
# blackhole_ip="35.8.247.6"
# whitehole_ip="35.8.247.6"
# blackhole_port="56666"
# whitehole_port="56667"
#
# block zwróć w log quick on $ext_jeżeli proto { tcp udp } \
# z any to $whitehole_ip port $whitehole_port
# block drop w log quick on $ext_jeżeli proto { tcp udp } \
# z any to $blackhole_ip port $blackhole_port
#
skip = Prawda
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Use a timeout of 3 seconds. Why 3? Because it's more than 1, oraz
# less than 5. i.e. no particular reason. Feel free to tweak it if
# you feel a different value would be more appropriate.
timeout = 3
sock.settimeout(timeout)
spróbuj:
sock.connect((whitehole))
wyjąwszy socket.timeout:
dalej
wyjąwszy OSError jako err:
jeżeli err.errno == errno.ECONNREFUSED:
skip = Nieprawda
w_końcu:
sock.close()
usuń sock
jeżeli skip:
self.skipTest(
"We didn't receive a connection reset (RST) packet z "
"{}:{} within {} seconds, so we're unable to test connect "
"timeout against the corresponding {}:{} (which jest "
"configured to silently drop packets)."
.format(
whitehole[0],
whitehole[1],
timeout,
blackhole[0],
blackhole[1],
)
)
# All that hard work just to test jeżeli connect times out w 0.001s ;-)
self.addr_remote = blackhole
przy support.transient_internet(self.addr_remote[0]):
self._sock_operation(1, 0.001, 'connect', self.addr_remote)
def testRecvTimeout(self):
# Test recv() timeout
przy support.transient_internet(self.addr_remote[0]):
self.sock.connect(self.addr_remote)
self._sock_operation(1, 1.5, 'recv', 1024)
def testAcceptTimeout(self):
# Test accept() timeout
support.bind_port(self.sock, self.localhost)
self.sock.listen()
self._sock_operation(1, 1.5, 'accept')
def testSend(self):
# Test send() timeout
przy socket.socket(socket.AF_INET, socket.SOCK_STREAM) jako serv:
support.bind_port(serv, self.localhost)
serv.listen()
self.sock.connect(serv.getsockname())
# Send a lot of data w order to bypass buffering w the TCP stack.
self._sock_operation(100, 1.5, 'send', b"X" * 200000)
def testSendto(self):
# Test sendto() timeout
przy socket.socket(socket.AF_INET, socket.SOCK_STREAM) jako serv:
support.bind_port(serv, self.localhost)
serv.listen()
self.sock.connect(serv.getsockname())
# The address argument jest ignored since we already connected.
self._sock_operation(100, 1.5, 'sendto', b"X" * 200000,
serv.getsockname())
def testSendall(self):
# Test sendall() timeout
przy socket.socket(socket.AF_INET, socket.SOCK_STREAM) jako serv:
support.bind_port(serv, self.localhost)
serv.listen()
self.sock.connect(serv.getsockname())
# Send a lot of data w order to bypass buffering w the TCP stack.
self._sock_operation(100, 1.5, 'sendall', b"X" * 200000)
klasa UDPTimeoutTestCase(TimeoutTestCase):
"""UDP test case dla socket.socket() timeout functions"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def tearDown(self):
self.sock.close()
def testRecvfromTimeout(self):
# Test recvfrom() timeout
# Prevent "Address already w use" socket exceptions
support.bind_port(self.sock, self.localhost)
self._sock_operation(1, 1.5, 'recvfrom', 1024)
def test_main():
support.requires('network')
support.run_unittest(
CreationTestCase,
TCPTimeoutTestCase,
UDPTimeoutTestCase,
)
jeżeli __name__ == "__main__":
test_main()
| 37.690789 | 84 | 0.629516 | """Unit tests dla socket timeout feature."""
zaimportuj functools
zaimportuj unittest
z test zaimportuj support
skip_expected = nie support.is_resource_enabled('network')
zaimportuj time
zaimportuj errno
zaimportuj socket
@functools.lru_cache()
def resolve_address(host, port):
"""Resolve an (host, port) to an address.
We must perform name resolution before timeout tests, otherwise it will be
performed by connect().
"""
przy support.transient_internet(host):
zwróć socket.getaddrinfo(host, port, socket.AF_INET,
socket.SOCK_STREAM)[0][4]
klasa CreationTestCase(unittest.TestCase):
"""Test case dla socket.gettimeout() oraz socket.settimeout()"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def testObjectCreation(self):
self.assertEqual(self.sock.gettimeout(), Nic,
"timeout nie disabled by default")
def testFloatReturnValue(self):
self.sock.settimeout(7.345)
self.assertEqual(self.sock.gettimeout(), 7.345)
self.sock.settimeout(3)
self.assertEqual(self.sock.gettimeout(), 3)
self.sock.settimeout(Nic)
self.assertEqual(self.sock.gettimeout(), Nic)
def testReturnType(self):
self.sock.settimeout(1)
self.assertEqual(type(self.sock.gettimeout()), type(1.0))
self.sock.settimeout(3.9)
self.assertEqual(type(self.sock.gettimeout()), type(1.0))
def testTypeCheck(self):
self.sock.settimeout(0)
self.sock.settimeout(0)
self.sock.settimeout(0.0)
self.sock.settimeout(Nic)
self.assertRaises(TypeError, self.sock.settimeout, "")
self.assertRaises(TypeError, self.sock.settimeout, "")
self.assertRaises(TypeError, self.sock.settimeout, ())
self.assertRaises(TypeError, self.sock.settimeout, [])
self.assertRaises(TypeError, self.sock.settimeout, {})
self.assertRaises(TypeError, self.sock.settimeout, 0j)
def testRangeCheck(self):
self.assertRaises(ValueError, self.sock.settimeout, -1)
self.assertRaises(ValueError, self.sock.settimeout, -1)
self.assertRaises(ValueError, self.sock.settimeout, -1.0)
def testTimeoutThenBlocking(self):
self.sock.settimeout(10)
self.sock.setblocking(1)
self.assertEqual(self.sock.gettimeout(), Nic)
self.sock.setblocking(0)
self.assertEqual(self.sock.gettimeout(), 0.0)
self.sock.settimeout(10)
self.sock.setblocking(0)
self.assertEqual(self.sock.gettimeout(), 0.0)
self.sock.setblocking(1)
self.assertEqual(self.sock.gettimeout(), Nic)
def testBlockingThenTimeout(self):
self.sock.setblocking(0)
self.sock.settimeout(1)
self.assertEqual(self.sock.gettimeout(), 1)
self.sock.setblocking(1)
self.sock.settimeout(1)
self.assertEqual(self.sock.gettimeout(), 1)
klasa TimeoutTestCase(unittest.TestCase):
# activity makes it inevitable that such tests will fail at times.
# When fuzz was at 1.0, I (tim) routinely saw bogus failures on Win2K
# oraz Win98SE. Boosting it to 2.0 helped a lot, but isn't a real
fuzz = 2.0
localhost = support.HOST
def setUp(self):
podnieś NotImplementedError()
tearDown = setUp
def _sock_operation(self, count, timeout, method, *args):
"""
Test the specified socket method.
The method jest run at most `count` times oraz must podnieś a socket.timeout
within `timeout` + self.fuzz seconds.
"""
self.sock.settimeout(timeout)
method = getattr(self.sock, method)
dla i w range(count):
t1 = time.time()
spróbuj:
method(*args)
wyjąwszy socket.timeout jako e:
delta = time.time() - t1
przerwij
inaczej:
self.fail('socket.timeout was nie podnieśd')
self.assertLess(delta, timeout + self.fuzz)
self.assertGreater(delta, timeout - 1.0)
klasa TCPTimeoutTestCase(TimeoutTestCase):
"""TCP test case dla socket.socket() timeout functions"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addr_remote = resolve_address('www.python.org.', 80)
def tearDown(self):
self.sock.close()
def testConnectTimeout(self):
# z Python because it's a function of the underlying TCP/IP stack.
blackhole = resolve_address('blackhole.snakebite.net', 56666)
# to expect certain hosts may nie be able to reach the blackhole, due
# to firewalling albo general network configuration. In order to improve
# our confidence w testing the blackhole, a corresponding 'whitehole'
# has also been set up using one port higher:
whitehole = resolve_address('whitehole.snakebite.net', 56667)
# This address has been configured to immediately drop any incoming
# packets jako well, but it does it respectfully przy regards to the
# incoming protocol. RSTs are sent dla TCP packets, oraz ICMP UNREACH
# jest sent dla UDP/ICMP packets. This means our attempts to connect to
# it should be met immediately przy ECONNREFUSED. The test case has
# been structured around this premise: jeżeli we get an ECONNREFUSED from
# the whitehole, we proceed przy testing connect timeout against the
# blackhole. If we don't, we skip the test (przy a message about nie
skip = Prawda
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# less than 5. i.e. no particular reason. Feel free to tweak it if
# you feel a different value would be more appropriate.
timeout = 3
sock.settimeout(timeout)
spróbuj:
sock.connect((whitehole))
wyjąwszy socket.timeout:
dalej
wyjąwszy OSError jako err:
jeżeli err.errno == errno.ECONNREFUSED:
skip = Nieprawda
w_końcu:
sock.close()
usuń sock
jeżeli skip:
self.skipTest(
"We didn't receive a connection reset (RST) packet z "
"{}:{} within {} seconds, so we're unable to test connect "
"timeout against the corresponding {}:{} (which jest "
"configured to silently drop packets)."
.format(
whitehole[0],
whitehole[1],
timeout,
blackhole[0],
blackhole[1],
)
)
# All that hard work just to test jeżeli connect times out w 0.001s ;-)
self.addr_remote = blackhole
przy support.transient_internet(self.addr_remote[0]):
self._sock_operation(1, 0.001, 'connect', self.addr_remote)
def testRecvTimeout(self):
# Test recv() timeout
przy support.transient_internet(self.addr_remote[0]):
self.sock.connect(self.addr_remote)
self._sock_operation(1, 1.5, 'recv', 1024)
def testAcceptTimeout(self):
# Test accept() timeout
support.bind_port(self.sock, self.localhost)
self.sock.listen()
self._sock_operation(1, 1.5, 'accept')
def testSend(self):
# Test send() timeout
przy socket.socket(socket.AF_INET, socket.SOCK_STREAM) jako serv:
support.bind_port(serv, self.localhost)
serv.listen()
self.sock.connect(serv.getsockname())
# Send a lot of data w order to bypass buffering w the TCP stack.
self._sock_operation(100, 1.5, 'send', b"X" * 200000)
def testSendto(self):
# Test sendto() timeout
przy socket.socket(socket.AF_INET, socket.SOCK_STREAM) jako serv:
support.bind_port(serv, self.localhost)
serv.listen()
self.sock.connect(serv.getsockname())
# The address argument jest ignored since we already connected.
self._sock_operation(100, 1.5, 'sendto', b"X" * 200000,
serv.getsockname())
def testSendall(self):
# Test sendall() timeout
przy socket.socket(socket.AF_INET, socket.SOCK_STREAM) jako serv:
support.bind_port(serv, self.localhost)
serv.listen()
self.sock.connect(serv.getsockname())
# Send a lot of data w order to bypass buffering w the TCP stack.
self._sock_operation(100, 1.5, 'sendall', b"X" * 200000)
klasa UDPTimeoutTestCase(TimeoutTestCase):
"""UDP test case dla socket.socket() timeout functions"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def tearDown(self):
self.sock.close()
def testRecvfromTimeout(self):
# Test recvfrom() timeout
# Prevent "Address already w use" socket exceptions
support.bind_port(self.sock, self.localhost)
self._sock_operation(1, 1.5, 'recvfrom', 1024)
def test_main():
support.requires('network')
support.run_unittest(
CreationTestCase,
TCPTimeoutTestCase,
UDPTimeoutTestCase,
)
jeżeli __name__ == "__main__":
test_main()
| false | true |
f7317e2c185510c822951fefebfbee8e10479664 | 38,094 | py | Python | t3f/riemannian.py | aiboyko/t3f | 0361b80f36a06eb5aa5d536650eef9e006289139 | [
"MIT"
] | null | null | null | t3f/riemannian.py | aiboyko/t3f | 0361b80f36a06eb5aa5d536650eef9e006289139 | [
"MIT"
] | null | null | null | t3f/riemannian.py | aiboyko/t3f | 0361b80f36a06eb5aa5d536650eef9e006289139 | [
"MIT"
] | null | null | null | import tensorflow.compat.v1 as tf
from t3f.tensor_train import TensorTrain
from t3f.tensor_train_batch import TensorTrainBatch
from t3f import shapes
from t3f import decompositions
def project_sum(what, where, weights=None):
"""Project sum of `what` TTs on the tangent space of `where` TT.
project_sum(what, x) = P_x(what)
project_sum(batch_what, x) = P_x(\sum_i batch_what[i])
project_sum(batch_what, x, weights) = P_x(\sum_j weights[j] * batch_what[j])
This function implements the algorithm from the paper [1], theorem 3.1.
[1] C. Lubich, I. Oseledets and B. Vandereycken, Time integration of
Tensor Trains.
Args:
what: TensorTrain or TensorTrainBatch. In the case of batch returns
projection of the sum of elements in the batch.
where: TensorTrain, TT-tensor or TT-matrix on which tangent space to project
weights: python list or tf.Tensor of numbers or None, weights of the sum
Returns:
a TensorTrain with the TT-ranks equal 2 * tangent_space_tens.get_tt_ranks()
Complexity:
O(d r_where^3 m) for orthogonalizing the TT-cores of where
+O(batch_size d r_what r_where n (r_what + r_where))
d is the number of TT-cores (what.ndims());
r_what is the largest TT-rank of what max(what.get_tt_rank())
r_where is the largest TT-rank of where
n is the size of the axis dimension of what and where e.g.
for a tensor of size 4 x 4 x 4, n is 4;
for a 9 x 64 matrix of raw shape (3, 3, 3) x (4, 4, 4) n is 12
"""
# Always work with batch of TT objects for simplicity.
what = shapes.expand_batch_dim(what)
if weights is not None:
weights = tf.convert_to_tensor(weights, dtype=where.dtype)
if not isinstance(where, TensorTrain):
raise ValueError('The first argument should be a TensorTrain object, got '
'"%s".' % where)
if where.get_raw_shape() != what.get_raw_shape():
raise ValueError('The shapes of the tensor we want to project and of the '
'tensor on which tangent space we want to project should '
'match, got %s and %s.' %
(where.get_raw_shape(),
what.get_raw_shape()))
dtypes_compatible = (where.dtype.is_compatible_with(what.dtype) or
what.dtype.is_compatible_with(where.dtype))
if not dtypes_compatible:
raise ValueError('Dtypes of the arguments should coincide, got %s and %s.' %
(where.dtype,
what.dtype))
left_tangent_space_tens = decompositions.orthogonalize_tt_cores(
where)
right_tangent_space_tens = decompositions.orthogonalize_tt_cores(
left_tangent_space_tens, left_to_right=False)
ndims = where.ndims()
dtype = where.dtype
raw_shape = shapes.lazy_raw_shape(where)
batch_size = shapes.lazy_batch_size(what)
right_tangent_tt_ranks = shapes.lazy_tt_ranks(right_tangent_space_tens)
left_tangent_tt_ranks = shapes.lazy_tt_ranks(left_tangent_space_tens)
# For einsum notation.
mode_str = 'ij' if where.is_tt_matrix() else 'i'
right_rank_dim = where.right_tt_rank_dim
left_rank_dim = where.left_tt_rank_dim
if weights is not None:
weights_shape = weights.get_shape()
output_is_batch = len(weights_shape) > 1 and weights_shape[1] > 1
else:
output_is_batch = False
output_batch_str = 'o' if output_is_batch else ''
if output_is_batch:
right_rank_dim += 1
left_rank_dim += 1
output_batch_size = weights.get_shape()[1].value
# Prepare rhs vectors.
# rhs[core_idx] is of size
# batch_size x tensor_tt_ranks[core_idx] x tangent_tt_ranks[core_idx]
rhs = [None] * (ndims + 1)
rhs[ndims] = tf.ones((batch_size, 1, 1), dtype=dtype)
for core_idx in range(ndims - 1, 0, -1):
tens_core = what.tt_cores[core_idx]
right_tang_core = right_tangent_space_tens.tt_cores[core_idx]
einsum_str = 'sa{0}b,sbd,c{0}d->sac'.format(mode_str)
rhs[core_idx] = tf.einsum(einsum_str, tens_core, rhs[core_idx + 1],
right_tang_core)
# Prepare lhs vectors.
# lhs[core_idx] is of size
# batch_size x tangent_tt_ranks[core_idx] x tensor_tt_ranks[core_idx]
lhs = [None] * (ndims + 1)
lhs[0] = tf.ones((batch_size, 1, 1), dtype=dtype)
for core_idx in range(ndims - 1):
tens_core = what.tt_cores[core_idx]
left_tang_core = left_tangent_space_tens.tt_cores[core_idx]
einsum_str = 'sab,a{0}c,sb{0}d->scd'.format(mode_str)
lhs[core_idx + 1] = tf.einsum(einsum_str, lhs[core_idx], left_tang_core,
tens_core)
# Left to right sweep.
res_cores_list = []
for core_idx in range(ndims):
tens_core = what.tt_cores[core_idx]
left_tang_core = left_tangent_space_tens.tt_cores[core_idx]
right_tang_core = right_tangent_space_tens.tt_cores[core_idx]
if core_idx < ndims - 1:
einsum_str = 'sab,sb{0}c->sa{0}c'.format(mode_str)
proj_core = tf.einsum(einsum_str, lhs[core_idx], tens_core)
einsum_str = 'a{0}b,sbc->sa{0}c'.format(mode_str)
proj_core -= tf.einsum(einsum_str, left_tang_core, lhs[core_idx + 1])
if weights is None:
einsum_str = 'sa{0}b,sbc->a{0}c'.format(mode_str)
proj_core = tf.einsum(einsum_str, proj_core, rhs[core_idx + 1])
else:
einsum_str = 'sa{0}b,sbc->sa{0}c'.format(mode_str, output_batch_str)
proj_core_s = tf.einsum(einsum_str, proj_core, rhs[core_idx + 1])
einsum_str = 's{1},sa{0}c->{1}a{0}c'.format(mode_str, output_batch_str)
proj_core = tf.einsum(einsum_str, weights, proj_core_s)
if core_idx == ndims - 1:
if weights is None:
einsum_str = 'sab,sb{0}c->a{0}c'.format(mode_str)
proj_core = tf.einsum(einsum_str, lhs[core_idx], tens_core)
else:
einsum_str = 'sab,sb{0}c->sa{0}c'.format(mode_str, output_batch_str)
proj_core_s = tf.einsum(einsum_str, lhs[core_idx], tens_core)
einsum_str = 's{1},sa{0}c->{1}a{0}c'.format(mode_str, output_batch_str)
proj_core = tf.einsum(einsum_str, weights, proj_core_s)
if output_is_batch:
# Add batch dimension of size output_batch_size to left_tang_core and
# right_tang_core
extended_left_tang_core = tf.expand_dims(left_tang_core, 0)
extended_right_tang_core = tf.expand_dims(right_tang_core, 0)
if where.is_tt_matrix():
extended_left_tang_core = tf.tile(extended_left_tang_core,
[output_batch_size, 1, 1, 1, 1])
extended_right_tang_core = tf.tile(extended_right_tang_core,
[output_batch_size, 1, 1, 1, 1])
else:
extended_left_tang_core = tf.tile(extended_left_tang_core,
[output_batch_size, 1, 1, 1])
extended_right_tang_core = tf.tile(extended_right_tang_core,
[output_batch_size, 1, 1, 1])
else:
extended_left_tang_core = left_tang_core
extended_right_tang_core = right_tang_core
if core_idx == 0:
res_core = tf.concat((proj_core, extended_left_tang_core),
axis=right_rank_dim)
elif core_idx == ndims - 1:
res_core = tf.concat((extended_right_tang_core, proj_core), axis=left_rank_dim)
else:
rank_1 = right_tangent_tt_ranks[core_idx]
rank_2 = left_tangent_tt_ranks[core_idx + 1]
if where.is_tt_matrix():
mode_size_n = raw_shape[0][core_idx]
mode_size_m = raw_shape[1][core_idx]
shape = [rank_1, mode_size_n, mode_size_m, rank_2]
else:
mode_size = raw_shape[0][core_idx]
shape = [rank_1, mode_size, rank_2]
if output_is_batch:
shape = [output_batch_size] + shape
zeros = tf.zeros(shape, dtype)
upper = tf.concat((extended_right_tang_core, zeros), axis=right_rank_dim)
lower = tf.concat((proj_core, extended_left_tang_core),
axis=right_rank_dim)
res_core = tf.concat((upper, lower), axis=left_rank_dim)
res_cores_list.append(res_core)
# TODO: TT-ranks.
if output_is_batch:
res = TensorTrainBatch(res_cores_list, where.get_raw_shape(),
batch_size=output_batch_size)
else:
res = TensorTrain(res_cores_list, where.get_raw_shape())
res.projection_on = where
return res
def project(what, where):
"""Project `what` TTs on the tangent space of `where` TT.
project(what, x) = P_x(what)
project(batch_what, x) = batch(P_x(batch_what[0]), ..., P_x(batch_what[N]))
This function implements the algorithm from the paper [1], theorem 3.1.
[1] C. Lubich, I. Oseledets and B. Vandereycken, Time integration of
Tensor Trains.
Args:
what: TensorTrain or TensorTrainBatch. In the case of batch returns
batch with projection of each individual tensor.
where: TensorTrain, TT-tensor or TT-matrix on which tangent space to project
Returns:
a TensorTrain with the TT-ranks equal 2 * tangent_space_tens.get_tt_ranks()
Complexity:
O(d r_where^3 m) for orthogonalizing the TT-cores of where
+O(batch_size d r_what r_where n (r_what + r_where))
d is the number of TT-cores (what.ndims());
r_what is the largest TT-rank of what max(what.get_tt_rank())
r_where is the largest TT-rank of where
n is the size of the axis dimension of what and where e.g.
for a tensor of size 4 x 4 x 4, n is 4;
for a 9 x 64 matrix of raw shape (3, 3, 3) x (4, 4, 4) n is 12
"""
if not isinstance(where, TensorTrain):
raise ValueError('The first argument should be a TensorTrain object, got '
'"%s".' % where)
if where.get_raw_shape() != what.get_raw_shape():
raise ValueError('The shapes of the tensor we want to project and of the '
'tensor on which tangent space we want to project should '
'match, got %s and %s.' %
(where.get_raw_shape(),
what.get_raw_shape()))
dtypes_compatible = (where.dtype.is_compatible_with(what.dtype) or
what.dtype.is_compatible_with(where.dtype))
if not dtypes_compatible:
raise ValueError('Dtypes of the arguments should coincide, got %s and %s.' %
(where.dtype,
what.dtype))
left_tangent_space_tens = decompositions.orthogonalize_tt_cores(
where)
right_tangent_space_tens = decompositions.orthogonalize_tt_cores(
left_tangent_space_tens, left_to_right=False)
ndims = where.ndims()
dtype = where.dtype
raw_shape = shapes.lazy_raw_shape(where)
right_tangent_tt_ranks = shapes.lazy_tt_ranks(right_tangent_space_tens)
left_tangent_tt_ranks = shapes.lazy_tt_ranks(left_tangent_space_tens)
# For einsum notation.
mode_str = 'ij' if where.is_tt_matrix() else 'i'
right_rank_dim = what.right_tt_rank_dim
left_rank_dim = what.left_tt_rank_dim
output_is_batch = isinstance(what, TensorTrainBatch)
if output_is_batch:
output_batch_size = what.batch_size
# Always work with batch of TT objects for simplicity.
what = shapes.expand_batch_dim(what)
batch_size = shapes.lazy_batch_size(what)
# Prepare rhs vectors.
# rhs[core_idx] is of size
# batch_size x tensor_tt_ranks[core_idx] x tangent_tt_ranks[core_idx]
rhs = [None] * (ndims + 1)
rhs[ndims] = tf.ones((batch_size, 1, 1), dtype=dtype)
for core_idx in range(ndims - 1, 0, -1):
tens_core = what.tt_cores[core_idx]
right_tang_core = right_tangent_space_tens.tt_cores[core_idx]
einsum_str = 'sa{0}b,sbd,c{0}d->sac'.format(mode_str)
rhs[core_idx] = tf.einsum(einsum_str, tens_core, rhs[core_idx + 1],
right_tang_core)
# Prepare lhs vectors.
# lhs[core_idx] is of size
# batch_size x tangent_tt_ranks[core_idx] x tensor_tt_ranks[core_idx]
lhs = [None] * (ndims + 1)
lhs[0] = tf.ones((batch_size, 1, 1), dtype=dtype)
for core_idx in range(ndims - 1):
tens_core = what.tt_cores[core_idx]
left_tang_core = left_tangent_space_tens.tt_cores[core_idx]
einsum_str = 'sab,a{0}c,sb{0}d->scd'.format(mode_str)
lhs[core_idx + 1] = tf.einsum(einsum_str, lhs[core_idx], left_tang_core,
tens_core)
# Left to right sweep.
res_cores_list = []
for core_idx in range(ndims):
tens_core = what.tt_cores[core_idx]
left_tang_core = left_tangent_space_tens.tt_cores[core_idx]
right_tang_core = right_tangent_space_tens.tt_cores[core_idx]
if core_idx < ndims - 1:
einsum_str = 'sab,sb{0}c->sa{0}c'.format(mode_str)
proj_core = tf.einsum(einsum_str, lhs[core_idx], tens_core)
einsum_str = 'a{0}b,sbc->sa{0}c'.format(mode_str)
proj_core -= tf.einsum(einsum_str, left_tang_core, lhs[core_idx + 1])
if output_is_batch:
einsum_str = 'sa{0}b,sbc->sa{0}c'.format(mode_str)
else:
einsum_str = 'sa{0}b,sbc->a{0}c'.format(mode_str)
proj_core = tf.einsum(einsum_str, proj_core, rhs[core_idx + 1])
if core_idx == ndims - 1:
if output_is_batch:
einsum_str = 'sab,sb{0}c->sa{0}c'.format(mode_str)
else:
einsum_str = 'sab,sb{0}c->a{0}c'.format(mode_str)
proj_core = tf.einsum(einsum_str, lhs[core_idx], tens_core)
if output_is_batch:
# Add batch dimension of size output_batch_size to left_tang_core and
# right_tang_core
extended_left_tang_core = tf.expand_dims(left_tang_core, 0)
extended_right_tang_core = tf.expand_dims(right_tang_core, 0)
if where.is_tt_matrix():
extended_left_tang_core = tf.tile(extended_left_tang_core,
[output_batch_size, 1, 1, 1, 1])
extended_right_tang_core = tf.tile(extended_right_tang_core,
[output_batch_size, 1, 1, 1, 1])
else:
extended_left_tang_core = tf.tile(extended_left_tang_core,
[output_batch_size, 1, 1, 1])
extended_right_tang_core = tf.tile(extended_right_tang_core,
[output_batch_size, 1, 1, 1])
else:
extended_left_tang_core = left_tang_core
extended_right_tang_core = right_tang_core
if core_idx == 0:
res_core = tf.concat((proj_core, extended_left_tang_core),
axis=right_rank_dim)
elif core_idx == ndims - 1:
res_core = tf.concat((extended_right_tang_core, proj_core), axis=left_rank_dim)
else:
rank_1 = right_tangent_tt_ranks[core_idx]
rank_2 = left_tangent_tt_ranks[core_idx + 1]
if where.is_tt_matrix():
mode_size_n = raw_shape[0][core_idx]
mode_size_m = raw_shape[1][core_idx]
shape = [rank_1, mode_size_n, mode_size_m, rank_2]
else:
mode_size = raw_shape[0][core_idx]
shape = [rank_1, mode_size, rank_2]
if output_is_batch:
shape = [output_batch_size] + shape
zeros = tf.zeros(shape, dtype)
upper = tf.concat((extended_right_tang_core, zeros), axis=right_rank_dim)
lower = tf.concat((proj_core, extended_left_tang_core),
axis=right_rank_dim)
res_core = tf.concat((upper, lower), axis=left_rank_dim)
res_cores_list.append(res_core)
# TODO: TT-ranks.
if output_is_batch:
res = TensorTrainBatch(res_cores_list, where.get_raw_shape(),
batch_size=output_batch_size)
else:
res = TensorTrain(res_cores_list, where.get_raw_shape())
res.projection_on = where
return res
def project_matmul(what, where, matrix):
"""Project `matrix` * `what` TTs on the tangent space of `where` TT.
project(what, x) = P_x(what)
project(batch_what, x) = batch(P_x(batch_what[0]), ..., P_x(batch_what[N]))
This function implements the algorithm from the paper [1], theorem 3.1.
[1] C. Lubich, I. Oseledets and B. Vandereycken, Time integration of
Tensor Trains.
Args:
what: TensorTrain or TensorTrainBatch. In the case of batch returns
batch with projection of each individual tensor.
where: TensorTrain, TT-tensor or TT-matrix on which tangent space to project
matrix: TensorTrain, TT-matrix to multiply by what
Returns:
a TensorTrain with the TT-ranks equal 2 * tangent_space_tens.get_tt_ranks()
Complexity:
O(d r_where^3 m) for orthogonalizing the TT-cores of where
+O(batch_size d R r_what r_where (n r_what + n m R + m r_where))
d is the number of TT-cores (what.ndims());
r_what is the largest TT-rank of what max(what.get_tt_rank())
r_where is the largest TT-rank of where
matrix is of TT-rank R and of raw-shape (m, m, ..., m) x (n, n, ..., n).
"""
if not isinstance(where, TensorTrain):
raise ValueError('The first argument should be a TensorTrain object, got '
'"%s".' % where)
if where.get_raw_shape() != what.get_raw_shape():
raise ValueError('The shapes of the tensor we want to project and of the '
'tensor on which tangent space we want to project should '
'match, got %s and %s.' %
(where.get_raw_shape(),
what.get_raw_shape()))
dtypes_compatible = (where.dtype.is_compatible_with(what.dtype) or
what.dtype.is_compatible_with(where.dtype))
if not dtypes_compatible:
raise ValueError('Dtypes of the arguments should coincide, got %s and %s.' %
(where.dtype,
what.dtype))
left_tangent_space_tens = decompositions.orthogonalize_tt_cores(
where)
right_tangent_space_tens = decompositions.orthogonalize_tt_cores(
left_tangent_space_tens, left_to_right=False)
ndims = where.ndims()
dtype = where.dtype
raw_shape = shapes.lazy_raw_shape(where)
batch_size = shapes.lazy_batch_size(what)
right_tangent_tt_ranks = shapes.lazy_tt_ranks(right_tangent_space_tens)
left_tangent_tt_ranks = shapes.lazy_tt_ranks(left_tangent_space_tens)
# For einsum notation.
right_rank_dim = what.right_tt_rank_dim
left_rank_dim = what.left_tt_rank_dim
output_is_batch = isinstance(what, TensorTrainBatch)
if output_is_batch:
output_batch_size = what.batch_size
# Always work with batch of TT objects for simplicity.
what = shapes.expand_batch_dim(what)
# Prepare rhs vectors.
# rhs[core_idx] is of size
# batch_size x tensor_tt_ranks[core_idx] x matrix_tt_ranks[core_idx] x tangent_tt_ranks[core_idx]
rhs = [None] * (ndims + 1)
rhs[ndims] = tf.ones((batch_size, 1, 1, 1), dtype=dtype)
for core_idx in range(ndims - 1, 0, -1):
tens_core = what.tt_cores[core_idx]
right_tang_core = right_tangent_space_tens.tt_cores[core_idx]
matrix_core = matrix.tt_cores[core_idx]
rhs[core_idx] = tf.einsum('bije,cikf,sdef,sajkd->sabc', matrix_core,
right_tang_core, rhs[core_idx + 1], tens_core)
# Prepare lhs vectors.
# lhs[core_idx] is of size
# batch_size x tangent_tt_ranks[core_idx] x matrix_tt_ranks[core_idx] x tensor_tt_ranks[core_idx]
lhs = [None] * (ndims + 1)
lhs[0] = tf.ones((batch_size, 1, 1, 1), dtype=dtype)
for core_idx in range(ndims - 1):
tens_core = what.tt_cores[core_idx]
left_tang_core = left_tangent_space_tens.tt_cores[core_idx]
matrix_core = matrix.tt_cores[core_idx]
# TODO: brutforce order of indices in lhs??
lhs[core_idx + 1] = tf.einsum('bije,aikd,sabc,scjkf->sdef', matrix_core,
left_tang_core, lhs[core_idx], tens_core)
# Left to right sweep.
res_cores_list = []
for core_idx in range(ndims):
tens_core = what.tt_cores[core_idx]
matrix_core = matrix.tt_cores[core_idx]
left_tang_core = left_tangent_space_tens.tt_cores[core_idx]
right_tang_core = right_tangent_space_tens.tt_cores[core_idx]
if core_idx < ndims - 1:
proj_core = tf.einsum('scjke,sabc,bijd->saikde', tens_core,
lhs[core_idx], matrix_core)
proj_core -= tf.einsum('aikb,sbcd->saikcd', left_tang_core,
lhs[core_idx + 1])
proj_core = tf.einsum('saikcb,sbcd->saikd', proj_core, rhs[core_idx + 1])
if core_idx == ndims - 1:
# d and e dimensions take 1 value, since its the last rank.
# To make the result shape (?, ?, ?, 1), we are summing d and leaving e,
# but we could have done the opposite -- sum e and leave d.
proj_core = tf.einsum('sabc,bijd,scjke->saike', lhs[core_idx], matrix_core,
tens_core)
if output_is_batch:
# Add batch dimension of size output_batch_size to left_tang_core and
# right_tang_core
extended_left_tang_core = tf.expand_dims(left_tang_core, 0)
extended_right_tang_core = tf.expand_dims(right_tang_core, 0)
extended_left_tang_core = tf.tile(extended_left_tang_core,
[output_batch_size, 1, 1, 1, 1])
extended_right_tang_core = tf.tile(extended_right_tang_core,
[output_batch_size, 1, 1, 1, 1])
else:
extended_left_tang_core = left_tang_core
extended_right_tang_core = right_tang_core
if core_idx == 0:
res_core = tf.concat((proj_core, extended_left_tang_core),
axis=right_rank_dim)
elif core_idx == ndims - 1:
res_core = tf.concat((extended_right_tang_core, proj_core),
axis=left_rank_dim)
else:
rank_1 = right_tangent_tt_ranks[core_idx]
rank_2 = left_tangent_tt_ranks[core_idx + 1]
mode_size_n = raw_shape[0][core_idx]
mode_size_m = raw_shape[1][core_idx]
shape = [rank_1, mode_size_n, mode_size_m, rank_2]
if output_is_batch:
shape = [output_batch_size] + shape
zeros = tf.zeros(shape, dtype)
upper = tf.concat((extended_right_tang_core, zeros),
axis=right_rank_dim)
lower = tf.concat((proj_core, extended_left_tang_core),
axis=right_rank_dim)
res_core = tf.concat((upper, lower), axis=left_rank_dim)
res_cores_list.append(res_core)
# TODO: TT-ranks.
if output_is_batch:
res = TensorTrainBatch(res_cores_list, where.get_raw_shape(),
batch_size=output_batch_size)
else:
res = TensorTrain(res_cores_list, where.get_raw_shape())
res.projection_on = where
return res
def pairwise_flat_inner_projected(projected_tt_vectors_1,
projected_tt_vectors_2):
"""Scalar products between two batches of TTs from the same tangent space.
res[i, j] = t3f.flat_inner(projected_tt_vectors_1[i], projected_tt_vectors_1[j]).
pairwise_flat_inner_projected(projected_tt_vectors_1, projected_tt_vectors_2)
is equivalent to
pairwise_flat_inner(projected_tt_vectors_1, projected_tt_vectors_2)
, but works only on objects from the same tangent space and is much faster
than general pairwise_flat_inner.
Args:
projected_tt_vectors_1: TensorTrainBatch of tensors projected on the same
tangent space as projected_tt_vectors_2.
projected_tt_vectors_2: TensorTrainBatch.
Returns:
tf.tensor with the scalar product matrix.
Complexity:
O(batch_size^2 d r^2 n), where
d is the number of TT-cores (projected_tt_vectors_1.ndims());
r is the largest TT-rank max(projected_tt_vectors_1.get_tt_rank())
(i.e. 2 * {the TT-rank of the object we projected vectors onto}.
and n is the size of the axis dimension, e.g.
for a tensor of size 4 x 4 x 4, n is 4;
for a 9 x 64 matrix of raw shape (3, 3, 3) x (4, 4, 4) n is 12.
"""
if not hasattr(projected_tt_vectors_1, 'projection_on') or \
not hasattr(projected_tt_vectors_2, 'projection_on'):
raise ValueError('Both arguments should be projections on the tangent '
'space of some other TT-object. All projection* functions '
'leave .projection_on field in the resulting TT-object '
'which is not present in the arguments you\'ve provided')
if projected_tt_vectors_1.projection_on != projected_tt_vectors_2.projection_on:
raise ValueError('Both arguments should be projections on the tangent '
'space of the same TT-object. The provided arguments are '
'projections on different TT-objects (%s and %s). Or at '
'least the pointers are different.' %
(projected_tt_vectors_1.projection_on,
projected_tt_vectors_2.projection_on))
# Always work with batches of objects for simplicity.
projected_tt_vectors_1 = shapes.expand_batch_dim(projected_tt_vectors_1)
projected_tt_vectors_2 = shapes.expand_batch_dim(projected_tt_vectors_2)
ndims = projected_tt_vectors_1.ndims()
tt_ranks = shapes.lazy_tt_ranks(projected_tt_vectors_1)
if projected_tt_vectors_1.is_tt_matrix():
right_size = tt_ranks[1] // 2
curr_core_1 = projected_tt_vectors_1.tt_cores[0]
curr_core_2 = projected_tt_vectors_2.tt_cores[0]
curr_du_1 = curr_core_1[:, :, :, :, :right_size]
curr_du_2 = curr_core_2[:, :, :, :, :right_size]
res = tf.einsum('paijb,qaijb->pq', curr_du_1, curr_du_2)
for core_idx in range(1, ndims):
left_size = tt_ranks[core_idx] // 2
right_size = tt_ranks[core_idx + 1] // 2
curr_core_1 = projected_tt_vectors_1.tt_cores[core_idx]
curr_core_2 = projected_tt_vectors_2.tt_cores[core_idx]
curr_du_1 = curr_core_1[:, left_size:, :, :, :right_size]
curr_du_2 = curr_core_2[:, left_size:, :, :, :right_size]
res += tf.einsum('paijb,qaijb->pq', curr_du_1, curr_du_2)
left_size = tt_ranks[-2] // 2
curr_core_1 = projected_tt_vectors_1.tt_cores[-1]
curr_core_2 = projected_tt_vectors_2.tt_cores[-1]
curr_du_1 = curr_core_1[:, left_size:, :, :, :]
curr_du_2 = curr_core_2[:, left_size:, :, :, :]
res += tf.einsum('paijb,qaijb->pq', curr_du_1, curr_du_2)
else:
# Working with TT-tensor, not TT-matrix.
right_size = tt_ranks[1] // 2
curr_core_1 = projected_tt_vectors_1.tt_cores[0]
curr_core_2 = projected_tt_vectors_2.tt_cores[0]
curr_du_1 = curr_core_1[:, :, :, :right_size]
curr_du_2 = curr_core_2[:, :, :, :right_size]
res = tf.einsum('paib,qaib->pq', curr_du_1, curr_du_2)
for core_idx in range(1, ndims):
left_size = tt_ranks[core_idx] // 2
right_size = tt_ranks[core_idx + 1] // 2
curr_core_1 = projected_tt_vectors_1.tt_cores[core_idx]
curr_core_2 = projected_tt_vectors_2.tt_cores[core_idx]
curr_du_1 = curr_core_1[:, left_size:, :, :right_size]
curr_du_2 = curr_core_2[:, left_size:, :, :right_size]
res += tf.einsum('paib,qaib->pq', curr_du_1, curr_du_2)
left_size = tt_ranks[-2] // 2
curr_core_1 = projected_tt_vectors_1.tt_cores[-1]
curr_core_2 = projected_tt_vectors_2.tt_cores[-1]
curr_du_1 = curr_core_1[:, left_size:, :, :]
curr_du_2 = curr_core_2[:, left_size:, :, :]
res += tf.einsum('paib,qaib->pq', curr_du_1, curr_du_2)
return res
def add_n_projected(tt_objects, coef=None):
"""Adds all input TT-objects that are projections on the same tangent space.
add_projected((a, b)) is equivalent add(a, b) for a and b that are from the
same tangent space, but doesn't increase the TT-ranks.
Args:
tt_objects: a list of TT-objects that are projections on the same tangent
space.
coef: a list of numbers or anything else convertable to tf.Tensor.
If provided, computes weighted sum. The size of this array should be
len(tt_objects) x tt_objects[0].batch_size
Returns:
TT-objects representing the sum of the tt_objects (weighted sum if coef is
provided). The TT-rank of the result equals to the TT-ranks of the arguments.
"""
for tt in tt_objects:
if not hasattr(tt, 'projection_on'):
raise ValueError('Both arguments should be projections on the tangent '
'space of some other TT-object. All projection* functions '
'leave .projection_on field in the resulting TT-object '
'which is not present in the argument you\'ve provided.')
projection_on = tt_objects[0].projection_on
for tt in tt_objects[1:]:
if tt.projection_on != projection_on:
raise ValueError('All tt_objects should be projections on the tangent '
'space of the same TT-object. The provided arguments are '
'projections on different TT-objects (%s and %s). Or at '
'least the pointers are different.' % (tt.projection_on,
projection_on))
if coef is not None:
coef = tf.convert_to_tensor(coef, dtype=tt_objects[0].dtype)
if coef.get_shape().ndims > 1:
# In batch case we will need to multiply each core by this coefficients
# along the first axis. To do it need to reshape the coefs to match
# the TT-cores number of dimensions.
some_core = tt_objects[0].tt_cores[0]
dim_array = [1] * (some_core.get_shape().ndims + 1)
dim_array[0] = coef.get_shape()[0].value
dim_array[1] = coef.get_shape()[1].value
coef = tf.reshape(coef, dim_array)
ndims = tt_objects[0].ndims()
tt_ranks = shapes.lazy_tt_ranks(tt_objects[0])
left_rank_dim = tt_objects[0].left_tt_rank_dim
right_rank_dim = tt_objects[0].right_tt_rank_dim
res_cores = []
def slice_tt_core(tt_core, left_idx, right_idx):
num_tt_core_dims = len(tt_core.get_shape())
idx = [slice(None)] * num_tt_core_dims
idx[left_rank_dim] = left_idx
idx[right_rank_dim] = right_idx
return tt_core[idx]
right_half_rank = tt_ranks[1] // 2
left_chunks = []
for obj_idx, tt in enumerate(tt_objects):
curr_core = slice_tt_core(tt.tt_cores[0], slice(None),
slice(0, right_half_rank))
if coef is not None:
curr_core *= coef[obj_idx]
left_chunks.append(curr_core)
left_part = tf.add_n(left_chunks)
first_obj_core = tt_objects[0].tt_cores[0]
right_part = slice_tt_core(first_obj_core, slice(None),
slice(right_half_rank, None))
first_core = tf.concat((left_part, right_part), axis=right_rank_dim)
res_cores.append(first_core)
for core_idx in range(1, ndims - 1):
first_obj_core = tt_objects[0].tt_cores[core_idx]
left_half_rank = tt_ranks[core_idx] // 2
right_half_rank = tt_ranks[core_idx + 1] // 2
upper_part = slice_tt_core(tt.tt_cores[core_idx], slice(0, left_half_rank),
slice(None))
lower_right_part = slice_tt_core(first_obj_core,
slice(left_half_rank, None),
slice(right_half_rank, None))
lower_left_chunks = []
for obj_idx, tt in enumerate(tt_objects):
curr_core = slice_tt_core(tt.tt_cores[core_idx],
slice(left_half_rank, None),
slice(0, right_half_rank))
if coef is not None:
curr_core *= coef[obj_idx]
lower_left_chunks.append(curr_core)
lower_left_part = tf.add_n(lower_left_chunks)
lower_part = tf.concat((lower_left_part, lower_right_part),
axis=right_rank_dim)
curr_core = tf.concat((upper_part, lower_part), axis=left_rank_dim)
res_cores.append(curr_core)
left_half_rank = tt_ranks[ndims - 1] // 2
upper_part = slice_tt_core(tt.tt_cores[-1], slice(0, left_half_rank),
slice(None))
lower_chunks = []
for obj_idx, tt in enumerate(tt_objects):
curr_core = slice_tt_core(tt.tt_cores[-1], slice(left_half_rank, None),
slice(None))
if coef is not None:
curr_core *= coef[obj_idx]
lower_chunks.append(curr_core)
lower_part = tf.add_n(lower_chunks)
last_core = tf.concat((upper_part, lower_part), axis=left_rank_dim)
res_cores.append(last_core)
raw_shape = tt_objects[0].get_raw_shape()
static_tt_ranks = tt_objects[0].get_tt_ranks()
if isinstance(tt_objects[0], TensorTrain):
res = TensorTrain(res_cores, raw_shape, static_tt_ranks)
elif isinstance(tt_objects[0], TensorTrainBatch):
res = TensorTrainBatch(res_cores, raw_shape, static_tt_ranks,
tt_objects[0].batch_size)
# Maintain the projection_on property.
res.projection_on = tt_objects[0].projection_on
return res
def tangent_space_to_deltas(tt, name='t3f_tangent_space_to_deltas'):
"""Convert an element of the tangent space to deltas representation.
Tangent space elements (outputs of t3f.project) look like:
dP1 V2 ... Vd + U1 dP2 V3 ... Vd + ... + U1 ... Ud-1 dPd.
This function takes as input an element of the tangent space and converts
it to the list of deltas [dP1, ..., dPd].
Args:
tt: `TensorTrain` or `TensorTrainBatch` that is a result of t3f.project,
t3f.project_matmul, or other similar functions.
name: string, name of the Op.
Returns:
A list of delta-cores (tf.Tensors).
"""
if not hasattr(tt, 'projection_on') or tt.projection_on is None:
raise ValueError('tt argument is supposed to be a projection, but it '
'lacks projection_on field')
num_dims = tt.ndims()
left_tt_rank_dim = tt.left_tt_rank_dim
right_tt_rank_dim = tt.right_tt_rank_dim
deltas = [None] * num_dims
tt_ranks = shapes.lazy_tt_ranks(tt)
for i in range(1, num_dims - 1):
if int(tt_ranks[i] / 2) != tt_ranks[i] / 2:
raise ValueError('tt argument is supposed to be a projection, but its '
'ranks are not even.')
with tf.name_scope(name, values=tt.tt_cores):
for i in range(1, num_dims - 1):
r1, r2 = tt_ranks[i], tt_ranks[i + 1]
curr_core = tt.tt_cores[i]
slc = [slice(None)] * len(curr_core.shape)
slc[left_tt_rank_dim] = slice(int(r1 / 2), None)
slc[right_tt_rank_dim] = slice(0, int(r2 / 2))
deltas[i] = curr_core[slc]
slc = [slice(None)] * len(tt.tt_cores[0].shape)
slc[right_tt_rank_dim] = slice(0, int(tt_ranks[1] / 2))
deltas[0] = tt.tt_cores[0][slc]
slc = [slice(None)] * len(tt.tt_cores[0].shape)
slc[left_tt_rank_dim] = slice(int(tt_ranks[-2] / 2), None)
deltas[num_dims - 1] = tt.tt_cores[num_dims - 1][slc]
return deltas
def deltas_to_tangent_space(deltas, tt, left=None, right=None,
name='t3f_deltas_to_tangent_space'):
"""Converts deltas representation of tangent space vector to TT object.
Takes as input a list of [dP1, ..., dPd] and returns
dP1 V2 ... Vd + U1 dP2 V3 ... Vd + ... + U1 ... Ud-1 dPd.
This function is hard to use correctly because deltas should abey the
so called gauge conditions. If the don't, the function will silently return
incorrect result. This is why this function is not imported in __init__.
Args:
deltas: a list of deltas (essentially TT-cores) obeying the gauge
conditions.
tt: `TensorTrain` object on which the tangent space tensor represented by
delta is projected.
left: t3f.orthogonilize_tt_cores(tt). If you have it already compute, you
may pass it as argument to avoid recomputing.
right: t3f.orthogonilize_tt_cores(left, left_to_right=False). If you have
it already compute, you may pass it as argument to avoid recomputing.
name: string, name of the Op.
Returns:
`TensorTrain` object constructed from deltas, that is from the tangent
space at point `tt`.
"""
cores = []
dtype = tt.dtype
num_dims = tt.ndims()
# TODO: add cache instead of mannually pasisng precomputed stuff?
input_tensors = list(tt.tt_cores) + list(deltas)
if left is not None:
input_tensors += list(left.tt_cores)
if right is not None:
input_tensors += list(right.tt_cores)
with tf.name_scope(name, values=input_tensors):
if left is None:
left = decompositions.orthogonalize_tt_cores(tt)
if right is None:
right = decompositions.orthogonalize_tt_cores(left, left_to_right=False)
left_tangent_tt_ranks = shapes.lazy_tt_ranks(left)
right_tangent_tt_ranks = shapes.lazy_tt_ranks(left)
raw_shape = shapes.lazy_raw_shape(left)
right_rank_dim = left.right_tt_rank_dim
left_rank_dim = left.left_tt_rank_dim
is_batch_case = len(deltas[0].shape) > len(tt.tt_cores[0].shape)
if is_batch_case:
right_rank_dim += 1
left_rank_dim += 1
batch_size = deltas[0].shape.as_list()[0]
for i in range(num_dims):
left_tt_core = left.tt_cores[i]
right_tt_core = right.tt_cores[i]
if is_batch_case:
tile = [1] * len(left_tt_core.shape)
tile = [batch_size] + tile
left_tt_core = tf.tile(left_tt_core[None, ...], tile)
right_tt_core = tf.tile(right_tt_core[None, ...], tile)
if i == 0:
tangent_core = tf.concat((deltas[i], left_tt_core),
axis=right_rank_dim)
elif i == num_dims - 1:
tangent_core = tf.concat((right_tt_core, deltas[i]),
axis=left_rank_dim)
else:
rank_1 = right_tangent_tt_ranks[i]
rank_2 = left_tangent_tt_ranks[i + 1]
if tt.is_tt_matrix():
mode_size_n = raw_shape[0][i]
mode_size_m = raw_shape[1][i]
shape = [rank_1, mode_size_n, mode_size_m, rank_2]
else:
mode_size_n = raw_shape[0][i]
shape = [rank_1, mode_size_n, rank_2]
if is_batch_case:
shape = [batch_size] + shape
zeros = tf.zeros(shape, dtype=dtype)
upper = tf.concat((right_tt_core, zeros), axis=right_rank_dim)
lower = tf.concat((deltas[i], left_tt_core), axis=right_rank_dim)
tangent_core = tf.concat((upper, lower), axis=left_rank_dim)
cores.append(tangent_core)
if is_batch_case:
tangent = TensorTrainBatch(cores, batch_size=batch_size)
else:
tangent = TensorTrain(cores)
tangent.projection_on = tt
return tangent
| 42.898649 | 101 | 0.664908 | import tensorflow.compat.v1 as tf
from t3f.tensor_train import TensorTrain
from t3f.tensor_train_batch import TensorTrainBatch
from t3f import shapes
from t3f import decompositions
def project_sum(what, where, weights=None):
what = shapes.expand_batch_dim(what)
if weights is not None:
weights = tf.convert_to_tensor(weights, dtype=where.dtype)
if not isinstance(where, TensorTrain):
raise ValueError('The first argument should be a TensorTrain object, got '
'"%s".' % where)
if where.get_raw_shape() != what.get_raw_shape():
raise ValueError('The shapes of the tensor we want to project and of the '
'tensor on which tangent space we want to project should '
'match, got %s and %s.' %
(where.get_raw_shape(),
what.get_raw_shape()))
dtypes_compatible = (where.dtype.is_compatible_with(what.dtype) or
what.dtype.is_compatible_with(where.dtype))
if not dtypes_compatible:
raise ValueError('Dtypes of the arguments should coincide, got %s and %s.' %
(where.dtype,
what.dtype))
left_tangent_space_tens = decompositions.orthogonalize_tt_cores(
where)
right_tangent_space_tens = decompositions.orthogonalize_tt_cores(
left_tangent_space_tens, left_to_right=False)
ndims = where.ndims()
dtype = where.dtype
raw_shape = shapes.lazy_raw_shape(where)
batch_size = shapes.lazy_batch_size(what)
right_tangent_tt_ranks = shapes.lazy_tt_ranks(right_tangent_space_tens)
left_tangent_tt_ranks = shapes.lazy_tt_ranks(left_tangent_space_tens)
mode_str = 'ij' if where.is_tt_matrix() else 'i'
right_rank_dim = where.right_tt_rank_dim
left_rank_dim = where.left_tt_rank_dim
if weights is not None:
weights_shape = weights.get_shape()
output_is_batch = len(weights_shape) > 1 and weights_shape[1] > 1
else:
output_is_batch = False
output_batch_str = 'o' if output_is_batch else ''
if output_is_batch:
right_rank_dim += 1
left_rank_dim += 1
output_batch_size = weights.get_shape()[1].value
rhs = [None] * (ndims + 1)
rhs[ndims] = tf.ones((batch_size, 1, 1), dtype=dtype)
for core_idx in range(ndims - 1, 0, -1):
tens_core = what.tt_cores[core_idx]
right_tang_core = right_tangent_space_tens.tt_cores[core_idx]
einsum_str = 'sa{0}b,sbd,c{0}d->sac'.format(mode_str)
rhs[core_idx] = tf.einsum(einsum_str, tens_core, rhs[core_idx + 1],
right_tang_core)
lhs = [None] * (ndims + 1)
lhs[0] = tf.ones((batch_size, 1, 1), dtype=dtype)
for core_idx in range(ndims - 1):
tens_core = what.tt_cores[core_idx]
left_tang_core = left_tangent_space_tens.tt_cores[core_idx]
einsum_str = 'sab,a{0}c,sb{0}d->scd'.format(mode_str)
lhs[core_idx + 1] = tf.einsum(einsum_str, lhs[core_idx], left_tang_core,
tens_core)
res_cores_list = []
for core_idx in range(ndims):
tens_core = what.tt_cores[core_idx]
left_tang_core = left_tangent_space_tens.tt_cores[core_idx]
right_tang_core = right_tangent_space_tens.tt_cores[core_idx]
if core_idx < ndims - 1:
einsum_str = 'sab,sb{0}c->sa{0}c'.format(mode_str)
proj_core = tf.einsum(einsum_str, lhs[core_idx], tens_core)
einsum_str = 'a{0}b,sbc->sa{0}c'.format(mode_str)
proj_core -= tf.einsum(einsum_str, left_tang_core, lhs[core_idx + 1])
if weights is None:
einsum_str = 'sa{0}b,sbc->a{0}c'.format(mode_str)
proj_core = tf.einsum(einsum_str, proj_core, rhs[core_idx + 1])
else:
einsum_str = 'sa{0}b,sbc->sa{0}c'.format(mode_str, output_batch_str)
proj_core_s = tf.einsum(einsum_str, proj_core, rhs[core_idx + 1])
einsum_str = 's{1},sa{0}c->{1}a{0}c'.format(mode_str, output_batch_str)
proj_core = tf.einsum(einsum_str, weights, proj_core_s)
if core_idx == ndims - 1:
if weights is None:
einsum_str = 'sab,sb{0}c->a{0}c'.format(mode_str)
proj_core = tf.einsum(einsum_str, lhs[core_idx], tens_core)
else:
einsum_str = 'sab,sb{0}c->sa{0}c'.format(mode_str, output_batch_str)
proj_core_s = tf.einsum(einsum_str, lhs[core_idx], tens_core)
einsum_str = 's{1},sa{0}c->{1}a{0}c'.format(mode_str, output_batch_str)
proj_core = tf.einsum(einsum_str, weights, proj_core_s)
if output_is_batch:
extended_left_tang_core = tf.expand_dims(left_tang_core, 0)
extended_right_tang_core = tf.expand_dims(right_tang_core, 0)
if where.is_tt_matrix():
extended_left_tang_core = tf.tile(extended_left_tang_core,
[output_batch_size, 1, 1, 1, 1])
extended_right_tang_core = tf.tile(extended_right_tang_core,
[output_batch_size, 1, 1, 1, 1])
else:
extended_left_tang_core = tf.tile(extended_left_tang_core,
[output_batch_size, 1, 1, 1])
extended_right_tang_core = tf.tile(extended_right_tang_core,
[output_batch_size, 1, 1, 1])
else:
extended_left_tang_core = left_tang_core
extended_right_tang_core = right_tang_core
if core_idx == 0:
res_core = tf.concat((proj_core, extended_left_tang_core),
axis=right_rank_dim)
elif core_idx == ndims - 1:
res_core = tf.concat((extended_right_tang_core, proj_core), axis=left_rank_dim)
else:
rank_1 = right_tangent_tt_ranks[core_idx]
rank_2 = left_tangent_tt_ranks[core_idx + 1]
if where.is_tt_matrix():
mode_size_n = raw_shape[0][core_idx]
mode_size_m = raw_shape[1][core_idx]
shape = [rank_1, mode_size_n, mode_size_m, rank_2]
else:
mode_size = raw_shape[0][core_idx]
shape = [rank_1, mode_size, rank_2]
if output_is_batch:
shape = [output_batch_size] + shape
zeros = tf.zeros(shape, dtype)
upper = tf.concat((extended_right_tang_core, zeros), axis=right_rank_dim)
lower = tf.concat((proj_core, extended_left_tang_core),
axis=right_rank_dim)
res_core = tf.concat((upper, lower), axis=left_rank_dim)
res_cores_list.append(res_core)
if output_is_batch:
res = TensorTrainBatch(res_cores_list, where.get_raw_shape(),
batch_size=output_batch_size)
else:
res = TensorTrain(res_cores_list, where.get_raw_shape())
res.projection_on = where
return res
def project(what, where):
if not isinstance(where, TensorTrain):
raise ValueError('The first argument should be a TensorTrain object, got '
'"%s".' % where)
if where.get_raw_shape() != what.get_raw_shape():
raise ValueError('The shapes of the tensor we want to project and of the '
'tensor on which tangent space we want to project should '
'match, got %s and %s.' %
(where.get_raw_shape(),
what.get_raw_shape()))
dtypes_compatible = (where.dtype.is_compatible_with(what.dtype) or
what.dtype.is_compatible_with(where.dtype))
if not dtypes_compatible:
raise ValueError('Dtypes of the arguments should coincide, got %s and %s.' %
(where.dtype,
what.dtype))
left_tangent_space_tens = decompositions.orthogonalize_tt_cores(
where)
right_tangent_space_tens = decompositions.orthogonalize_tt_cores(
left_tangent_space_tens, left_to_right=False)
ndims = where.ndims()
dtype = where.dtype
raw_shape = shapes.lazy_raw_shape(where)
right_tangent_tt_ranks = shapes.lazy_tt_ranks(right_tangent_space_tens)
left_tangent_tt_ranks = shapes.lazy_tt_ranks(left_tangent_space_tens)
mode_str = 'ij' if where.is_tt_matrix() else 'i'
right_rank_dim = what.right_tt_rank_dim
left_rank_dim = what.left_tt_rank_dim
output_is_batch = isinstance(what, TensorTrainBatch)
if output_is_batch:
output_batch_size = what.batch_size
what = shapes.expand_batch_dim(what)
batch_size = shapes.lazy_batch_size(what)
rhs = [None] * (ndims + 1)
rhs[ndims] = tf.ones((batch_size, 1, 1), dtype=dtype)
for core_idx in range(ndims - 1, 0, -1):
tens_core = what.tt_cores[core_idx]
right_tang_core = right_tangent_space_tens.tt_cores[core_idx]
einsum_str = 'sa{0}b,sbd,c{0}d->sac'.format(mode_str)
rhs[core_idx] = tf.einsum(einsum_str, tens_core, rhs[core_idx + 1],
right_tang_core)
lhs = [None] * (ndims + 1)
lhs[0] = tf.ones((batch_size, 1, 1), dtype=dtype)
for core_idx in range(ndims - 1):
tens_core = what.tt_cores[core_idx]
left_tang_core = left_tangent_space_tens.tt_cores[core_idx]
einsum_str = 'sab,a{0}c,sb{0}d->scd'.format(mode_str)
lhs[core_idx + 1] = tf.einsum(einsum_str, lhs[core_idx], left_tang_core,
tens_core)
res_cores_list = []
for core_idx in range(ndims):
tens_core = what.tt_cores[core_idx]
left_tang_core = left_tangent_space_tens.tt_cores[core_idx]
right_tang_core = right_tangent_space_tens.tt_cores[core_idx]
if core_idx < ndims - 1:
einsum_str = 'sab,sb{0}c->sa{0}c'.format(mode_str)
proj_core = tf.einsum(einsum_str, lhs[core_idx], tens_core)
einsum_str = 'a{0}b,sbc->sa{0}c'.format(mode_str)
proj_core -= tf.einsum(einsum_str, left_tang_core, lhs[core_idx + 1])
if output_is_batch:
einsum_str = 'sa{0}b,sbc->sa{0}c'.format(mode_str)
else:
einsum_str = 'sa{0}b,sbc->a{0}c'.format(mode_str)
proj_core = tf.einsum(einsum_str, proj_core, rhs[core_idx + 1])
if core_idx == ndims - 1:
if output_is_batch:
einsum_str = 'sab,sb{0}c->sa{0}c'.format(mode_str)
else:
einsum_str = 'sab,sb{0}c->a{0}c'.format(mode_str)
proj_core = tf.einsum(einsum_str, lhs[core_idx], tens_core)
if output_is_batch:
extended_left_tang_core = tf.expand_dims(left_tang_core, 0)
extended_right_tang_core = tf.expand_dims(right_tang_core, 0)
if where.is_tt_matrix():
extended_left_tang_core = tf.tile(extended_left_tang_core,
[output_batch_size, 1, 1, 1, 1])
extended_right_tang_core = tf.tile(extended_right_tang_core,
[output_batch_size, 1, 1, 1, 1])
else:
extended_left_tang_core = tf.tile(extended_left_tang_core,
[output_batch_size, 1, 1, 1])
extended_right_tang_core = tf.tile(extended_right_tang_core,
[output_batch_size, 1, 1, 1])
else:
extended_left_tang_core = left_tang_core
extended_right_tang_core = right_tang_core
if core_idx == 0:
res_core = tf.concat((proj_core, extended_left_tang_core),
axis=right_rank_dim)
elif core_idx == ndims - 1:
res_core = tf.concat((extended_right_tang_core, proj_core), axis=left_rank_dim)
else:
rank_1 = right_tangent_tt_ranks[core_idx]
rank_2 = left_tangent_tt_ranks[core_idx + 1]
if where.is_tt_matrix():
mode_size_n = raw_shape[0][core_idx]
mode_size_m = raw_shape[1][core_idx]
shape = [rank_1, mode_size_n, mode_size_m, rank_2]
else:
mode_size = raw_shape[0][core_idx]
shape = [rank_1, mode_size, rank_2]
if output_is_batch:
shape = [output_batch_size] + shape
zeros = tf.zeros(shape, dtype)
upper = tf.concat((extended_right_tang_core, zeros), axis=right_rank_dim)
lower = tf.concat((proj_core, extended_left_tang_core),
axis=right_rank_dim)
res_core = tf.concat((upper, lower), axis=left_rank_dim)
res_cores_list.append(res_core)
if output_is_batch:
res = TensorTrainBatch(res_cores_list, where.get_raw_shape(),
batch_size=output_batch_size)
else:
res = TensorTrain(res_cores_list, where.get_raw_shape())
res.projection_on = where
return res
def project_matmul(what, where, matrix):
if not isinstance(where, TensorTrain):
raise ValueError('The first argument should be a TensorTrain object, got '
'"%s".' % where)
if where.get_raw_shape() != what.get_raw_shape():
raise ValueError('The shapes of the tensor we want to project and of the '
'tensor on which tangent space we want to project should '
'match, got %s and %s.' %
(where.get_raw_shape(),
what.get_raw_shape()))
dtypes_compatible = (where.dtype.is_compatible_with(what.dtype) or
what.dtype.is_compatible_with(where.dtype))
if not dtypes_compatible:
raise ValueError('Dtypes of the arguments should coincide, got %s and %s.' %
(where.dtype,
what.dtype))
left_tangent_space_tens = decompositions.orthogonalize_tt_cores(
where)
right_tangent_space_tens = decompositions.orthogonalize_tt_cores(
left_tangent_space_tens, left_to_right=False)
ndims = where.ndims()
dtype = where.dtype
raw_shape = shapes.lazy_raw_shape(where)
batch_size = shapes.lazy_batch_size(what)
right_tangent_tt_ranks = shapes.lazy_tt_ranks(right_tangent_space_tens)
left_tangent_tt_ranks = shapes.lazy_tt_ranks(left_tangent_space_tens)
right_rank_dim = what.right_tt_rank_dim
left_rank_dim = what.left_tt_rank_dim
output_is_batch = isinstance(what, TensorTrainBatch)
if output_is_batch:
output_batch_size = what.batch_size
what = shapes.expand_batch_dim(what)
rhs = [None] * (ndims + 1)
rhs[ndims] = tf.ones((batch_size, 1, 1, 1), dtype=dtype)
for core_idx in range(ndims - 1, 0, -1):
tens_core = what.tt_cores[core_idx]
right_tang_core = right_tangent_space_tens.tt_cores[core_idx]
matrix_core = matrix.tt_cores[core_idx]
rhs[core_idx] = tf.einsum('bije,cikf,sdef,sajkd->sabc', matrix_core,
right_tang_core, rhs[core_idx + 1], tens_core)
lhs = [None] * (ndims + 1)
lhs[0] = tf.ones((batch_size, 1, 1, 1), dtype=dtype)
for core_idx in range(ndims - 1):
tens_core = what.tt_cores[core_idx]
left_tang_core = left_tangent_space_tens.tt_cores[core_idx]
matrix_core = matrix.tt_cores[core_idx]
lhs[core_idx + 1] = tf.einsum('bije,aikd,sabc,scjkf->sdef', matrix_core,
left_tang_core, lhs[core_idx], tens_core)
res_cores_list = []
for core_idx in range(ndims):
tens_core = what.tt_cores[core_idx]
matrix_core = matrix.tt_cores[core_idx]
left_tang_core = left_tangent_space_tens.tt_cores[core_idx]
right_tang_core = right_tangent_space_tens.tt_cores[core_idx]
if core_idx < ndims - 1:
proj_core = tf.einsum('scjke,sabc,bijd->saikde', tens_core,
lhs[core_idx], matrix_core)
proj_core -= tf.einsum('aikb,sbcd->saikcd', left_tang_core,
lhs[core_idx + 1])
proj_core = tf.einsum('saikcb,sbcd->saikd', proj_core, rhs[core_idx + 1])
if core_idx == ndims - 1:
proj_core = tf.einsum('sabc,bijd,scjke->saike', lhs[core_idx], matrix_core,
tens_core)
if output_is_batch:
extended_left_tang_core = tf.expand_dims(left_tang_core, 0)
extended_right_tang_core = tf.expand_dims(right_tang_core, 0)
extended_left_tang_core = tf.tile(extended_left_tang_core,
[output_batch_size, 1, 1, 1, 1])
extended_right_tang_core = tf.tile(extended_right_tang_core,
[output_batch_size, 1, 1, 1, 1])
else:
extended_left_tang_core = left_tang_core
extended_right_tang_core = right_tang_core
if core_idx == 0:
res_core = tf.concat((proj_core, extended_left_tang_core),
axis=right_rank_dim)
elif core_idx == ndims - 1:
res_core = tf.concat((extended_right_tang_core, proj_core),
axis=left_rank_dim)
else:
rank_1 = right_tangent_tt_ranks[core_idx]
rank_2 = left_tangent_tt_ranks[core_idx + 1]
mode_size_n = raw_shape[0][core_idx]
mode_size_m = raw_shape[1][core_idx]
shape = [rank_1, mode_size_n, mode_size_m, rank_2]
if output_is_batch:
shape = [output_batch_size] + shape
zeros = tf.zeros(shape, dtype)
upper = tf.concat((extended_right_tang_core, zeros),
axis=right_rank_dim)
lower = tf.concat((proj_core, extended_left_tang_core),
axis=right_rank_dim)
res_core = tf.concat((upper, lower), axis=left_rank_dim)
res_cores_list.append(res_core)
if output_is_batch:
res = TensorTrainBatch(res_cores_list, where.get_raw_shape(),
batch_size=output_batch_size)
else:
res = TensorTrain(res_cores_list, where.get_raw_shape())
res.projection_on = where
return res
def pairwise_flat_inner_projected(projected_tt_vectors_1,
projected_tt_vectors_2):
if not hasattr(projected_tt_vectors_1, 'projection_on') or \
not hasattr(projected_tt_vectors_2, 'projection_on'):
raise ValueError('Both arguments should be projections on the tangent '
'space of some other TT-object. All projection* functions '
'leave .projection_on field in the resulting TT-object '
'which is not present in the arguments you\'ve provided')
if projected_tt_vectors_1.projection_on != projected_tt_vectors_2.projection_on:
raise ValueError('Both arguments should be projections on the tangent '
'space of the same TT-object. The provided arguments are '
'projections on different TT-objects (%s and %s). Or at '
'least the pointers are different.' %
(projected_tt_vectors_1.projection_on,
projected_tt_vectors_2.projection_on))
# Always work with batches of objects for simplicity.
projected_tt_vectors_1 = shapes.expand_batch_dim(projected_tt_vectors_1)
projected_tt_vectors_2 = shapes.expand_batch_dim(projected_tt_vectors_2)
ndims = projected_tt_vectors_1.ndims()
tt_ranks = shapes.lazy_tt_ranks(projected_tt_vectors_1)
if projected_tt_vectors_1.is_tt_matrix():
right_size = tt_ranks[1] // 2
curr_core_1 = projected_tt_vectors_1.tt_cores[0]
curr_core_2 = projected_tt_vectors_2.tt_cores[0]
curr_du_1 = curr_core_1[:, :, :, :, :right_size]
curr_du_2 = curr_core_2[:, :, :, :, :right_size]
res = tf.einsum('paijb,qaijb->pq', curr_du_1, curr_du_2)
for core_idx in range(1, ndims):
left_size = tt_ranks[core_idx] // 2
right_size = tt_ranks[core_idx + 1] // 2
curr_core_1 = projected_tt_vectors_1.tt_cores[core_idx]
curr_core_2 = projected_tt_vectors_2.tt_cores[core_idx]
curr_du_1 = curr_core_1[:, left_size:, :, :, :right_size]
curr_du_2 = curr_core_2[:, left_size:, :, :, :right_size]
res += tf.einsum('paijb,qaijb->pq', curr_du_1, curr_du_2)
left_size = tt_ranks[-2] // 2
curr_core_1 = projected_tt_vectors_1.tt_cores[-1]
curr_core_2 = projected_tt_vectors_2.tt_cores[-1]
curr_du_1 = curr_core_1[:, left_size:, :, :, :]
curr_du_2 = curr_core_2[:, left_size:, :, :, :]
res += tf.einsum('paijb,qaijb->pq', curr_du_1, curr_du_2)
else:
# Working with TT-tensor, not TT-matrix.
right_size = tt_ranks[1] // 2
curr_core_1 = projected_tt_vectors_1.tt_cores[0]
curr_core_2 = projected_tt_vectors_2.tt_cores[0]
curr_du_1 = curr_core_1[:, :, :, :right_size]
curr_du_2 = curr_core_2[:, :, :, :right_size]
res = tf.einsum('paib,qaib->pq', curr_du_1, curr_du_2)
for core_idx in range(1, ndims):
left_size = tt_ranks[core_idx] // 2
right_size = tt_ranks[core_idx + 1] // 2
curr_core_1 = projected_tt_vectors_1.tt_cores[core_idx]
curr_core_2 = projected_tt_vectors_2.tt_cores[core_idx]
curr_du_1 = curr_core_1[:, left_size:, :, :right_size]
curr_du_2 = curr_core_2[:, left_size:, :, :right_size]
res += tf.einsum('paib,qaib->pq', curr_du_1, curr_du_2)
left_size = tt_ranks[-2] // 2
curr_core_1 = projected_tt_vectors_1.tt_cores[-1]
curr_core_2 = projected_tt_vectors_2.tt_cores[-1]
curr_du_1 = curr_core_1[:, left_size:, :, :]
curr_du_2 = curr_core_2[:, left_size:, :, :]
res += tf.einsum('paib,qaib->pq', curr_du_1, curr_du_2)
return res
def add_n_projected(tt_objects, coef=None):
for tt in tt_objects:
if not hasattr(tt, 'projection_on'):
raise ValueError('Both arguments should be projections on the tangent '
'space of some other TT-object. All projection* functions '
'leave .projection_on field in the resulting TT-object '
'which is not present in the argument you\'ve provided.')
projection_on = tt_objects[0].projection_on
for tt in tt_objects[1:]:
if tt.projection_on != projection_on:
raise ValueError('All tt_objects should be projections on the tangent '
'space of the same TT-object. The provided arguments are '
'projections on different TT-objects (%s and %s). Or at '
'least the pointers are different.' % (tt.projection_on,
projection_on))
if coef is not None:
coef = tf.convert_to_tensor(coef, dtype=tt_objects[0].dtype)
if coef.get_shape().ndims > 1:
some_core = tt_objects[0].tt_cores[0]
dim_array = [1] * (some_core.get_shape().ndims + 1)
dim_array[0] = coef.get_shape()[0].value
dim_array[1] = coef.get_shape()[1].value
coef = tf.reshape(coef, dim_array)
ndims = tt_objects[0].ndims()
tt_ranks = shapes.lazy_tt_ranks(tt_objects[0])
left_rank_dim = tt_objects[0].left_tt_rank_dim
right_rank_dim = tt_objects[0].right_tt_rank_dim
res_cores = []
def slice_tt_core(tt_core, left_idx, right_idx):
num_tt_core_dims = len(tt_core.get_shape())
idx = [slice(None)] * num_tt_core_dims
idx[left_rank_dim] = left_idx
idx[right_rank_dim] = right_idx
return tt_core[idx]
right_half_rank = tt_ranks[1] // 2
left_chunks = []
for obj_idx, tt in enumerate(tt_objects):
curr_core = slice_tt_core(tt.tt_cores[0], slice(None),
slice(0, right_half_rank))
if coef is not None:
curr_core *= coef[obj_idx]
left_chunks.append(curr_core)
left_part = tf.add_n(left_chunks)
first_obj_core = tt_objects[0].tt_cores[0]
right_part = slice_tt_core(first_obj_core, slice(None),
slice(right_half_rank, None))
first_core = tf.concat((left_part, right_part), axis=right_rank_dim)
res_cores.append(first_core)
for core_idx in range(1, ndims - 1):
first_obj_core = tt_objects[0].tt_cores[core_idx]
left_half_rank = tt_ranks[core_idx] // 2
right_half_rank = tt_ranks[core_idx + 1] // 2
upper_part = slice_tt_core(tt.tt_cores[core_idx], slice(0, left_half_rank),
slice(None))
lower_right_part = slice_tt_core(first_obj_core,
slice(left_half_rank, None),
slice(right_half_rank, None))
lower_left_chunks = []
for obj_idx, tt in enumerate(tt_objects):
curr_core = slice_tt_core(tt.tt_cores[core_idx],
slice(left_half_rank, None),
slice(0, right_half_rank))
if coef is not None:
curr_core *= coef[obj_idx]
lower_left_chunks.append(curr_core)
lower_left_part = tf.add_n(lower_left_chunks)
lower_part = tf.concat((lower_left_part, lower_right_part),
axis=right_rank_dim)
curr_core = tf.concat((upper_part, lower_part), axis=left_rank_dim)
res_cores.append(curr_core)
left_half_rank = tt_ranks[ndims - 1] // 2
upper_part = slice_tt_core(tt.tt_cores[-1], slice(0, left_half_rank),
slice(None))
lower_chunks = []
for obj_idx, tt in enumerate(tt_objects):
curr_core = slice_tt_core(tt.tt_cores[-1], slice(left_half_rank, None),
slice(None))
if coef is not None:
curr_core *= coef[obj_idx]
lower_chunks.append(curr_core)
lower_part = tf.add_n(lower_chunks)
last_core = tf.concat((upper_part, lower_part), axis=left_rank_dim)
res_cores.append(last_core)
raw_shape = tt_objects[0].get_raw_shape()
static_tt_ranks = tt_objects[0].get_tt_ranks()
if isinstance(tt_objects[0], TensorTrain):
res = TensorTrain(res_cores, raw_shape, static_tt_ranks)
elif isinstance(tt_objects[0], TensorTrainBatch):
res = TensorTrainBatch(res_cores, raw_shape, static_tt_ranks,
tt_objects[0].batch_size)
res.projection_on = tt_objects[0].projection_on
return res
def tangent_space_to_deltas(tt, name='t3f_tangent_space_to_deltas'):
if not hasattr(tt, 'projection_on') or tt.projection_on is None:
raise ValueError('tt argument is supposed to be a projection, but it '
'lacks projection_on field')
num_dims = tt.ndims()
left_tt_rank_dim = tt.left_tt_rank_dim
right_tt_rank_dim = tt.right_tt_rank_dim
deltas = [None] * num_dims
tt_ranks = shapes.lazy_tt_ranks(tt)
for i in range(1, num_dims - 1):
if int(tt_ranks[i] / 2) != tt_ranks[i] / 2:
raise ValueError('tt argument is supposed to be a projection, but its '
'ranks are not even.')
with tf.name_scope(name, values=tt.tt_cores):
for i in range(1, num_dims - 1):
r1, r2 = tt_ranks[i], tt_ranks[i + 1]
curr_core = tt.tt_cores[i]
slc = [slice(None)] * len(curr_core.shape)
slc[left_tt_rank_dim] = slice(int(r1 / 2), None)
slc[right_tt_rank_dim] = slice(0, int(r2 / 2))
deltas[i] = curr_core[slc]
slc = [slice(None)] * len(tt.tt_cores[0].shape)
slc[right_tt_rank_dim] = slice(0, int(tt_ranks[1] / 2))
deltas[0] = tt.tt_cores[0][slc]
slc = [slice(None)] * len(tt.tt_cores[0].shape)
slc[left_tt_rank_dim] = slice(int(tt_ranks[-2] / 2), None)
deltas[num_dims - 1] = tt.tt_cores[num_dims - 1][slc]
return deltas
def deltas_to_tangent_space(deltas, tt, left=None, right=None,
name='t3f_deltas_to_tangent_space'):
cores = []
dtype = tt.dtype
num_dims = tt.ndims()
input_tensors = list(tt.tt_cores) + list(deltas)
if left is not None:
input_tensors += list(left.tt_cores)
if right is not None:
input_tensors += list(right.tt_cores)
with tf.name_scope(name, values=input_tensors):
if left is None:
left = decompositions.orthogonalize_tt_cores(tt)
if right is None:
right = decompositions.orthogonalize_tt_cores(left, left_to_right=False)
left_tangent_tt_ranks = shapes.lazy_tt_ranks(left)
right_tangent_tt_ranks = shapes.lazy_tt_ranks(left)
raw_shape = shapes.lazy_raw_shape(left)
right_rank_dim = left.right_tt_rank_dim
left_rank_dim = left.left_tt_rank_dim
is_batch_case = len(deltas[0].shape) > len(tt.tt_cores[0].shape)
if is_batch_case:
right_rank_dim += 1
left_rank_dim += 1
batch_size = deltas[0].shape.as_list()[0]
for i in range(num_dims):
left_tt_core = left.tt_cores[i]
right_tt_core = right.tt_cores[i]
if is_batch_case:
tile = [1] * len(left_tt_core.shape)
tile = [batch_size] + tile
left_tt_core = tf.tile(left_tt_core[None, ...], tile)
right_tt_core = tf.tile(right_tt_core[None, ...], tile)
if i == 0:
tangent_core = tf.concat((deltas[i], left_tt_core),
axis=right_rank_dim)
elif i == num_dims - 1:
tangent_core = tf.concat((right_tt_core, deltas[i]),
axis=left_rank_dim)
else:
rank_1 = right_tangent_tt_ranks[i]
rank_2 = left_tangent_tt_ranks[i + 1]
if tt.is_tt_matrix():
mode_size_n = raw_shape[0][i]
mode_size_m = raw_shape[1][i]
shape = [rank_1, mode_size_n, mode_size_m, rank_2]
else:
mode_size_n = raw_shape[0][i]
shape = [rank_1, mode_size_n, rank_2]
if is_batch_case:
shape = [batch_size] + shape
zeros = tf.zeros(shape, dtype=dtype)
upper = tf.concat((right_tt_core, zeros), axis=right_rank_dim)
lower = tf.concat((deltas[i], left_tt_core), axis=right_rank_dim)
tangent_core = tf.concat((upper, lower), axis=left_rank_dim)
cores.append(tangent_core)
if is_batch_case:
tangent = TensorTrainBatch(cores, batch_size=batch_size)
else:
tangent = TensorTrain(cores)
tangent.projection_on = tt
return tangent
| true | true |
f7317ee14c32df99d8957b13095c5f0979815548 | 11,872 | py | Python | intake_esm/cat.py | agstephens/intake-esm | 25ead83497d025c37a80abdbefee9b286934308b | [
"Apache-2.0"
] | null | null | null | intake_esm/cat.py | agstephens/intake-esm | 25ead83497d025c37a80abdbefee9b286934308b | [
"Apache-2.0"
] | null | null | null | intake_esm/cat.py | agstephens/intake-esm | 25ead83497d025c37a80abdbefee9b286934308b | [
"Apache-2.0"
] | null | null | null | import enum
import json
import os
import pathlib
import typing
import fsspec
import pandas as pd
import pydantic
import tlz
from ._search import search, search_apply_require_all_on
class AggregationType(str, enum.Enum):
join_new = 'join_new'
join_existing = 'join_existing'
union = 'union'
class Config:
validate_all = True
validate_assignment = True
class DataFormat(str, enum.Enum):
netcdf = 'netcdf'
zarr = 'zarr'
class Config:
validate_all = True
validate_assignment = True
class Attribute(pydantic.BaseModel):
column_name: pydantic.StrictStr
vocabulary: pydantic.StrictStr = ''
class Config:
validate_all = True
validate_assignment = True
class Assets(pydantic.BaseModel):
column_name: pydantic.StrictStr
format: DataFormat
format_column_name: typing.Optional[pydantic.StrictStr]
class Config:
validate_all = True
validate_assignment = True
@pydantic.root_validator
def _validate_data_format(cls, values):
data_format, format_column_name = values.get('format'), values.get('format_column_name')
if data_format is not None and format_column_name is not None:
raise ValueError('Cannot set both format and format_column_name')
return values
class Aggregation(pydantic.BaseModel):
type: AggregationType
attribute_name: pydantic.StrictStr
options: typing.Optional[typing.Dict] = {}
class Config:
validate_all = True
validate_assignment = True
class AggregationControl(pydantic.BaseModel):
variable_column_name: pydantic.StrictStr
groupby_attrs: typing.List[pydantic.StrictStr]
aggregations: typing.List[Aggregation] = []
class Config:
validate_all = True
validate_assignment = True
class ESMCatalogModel(pydantic.BaseModel):
"""
Pydantic model for the ESM data catalog defined in https://git.io/JBWoW
"""
esmcat_version: pydantic.StrictStr
id: str
attributes: typing.List[Attribute]
assets: Assets
aggregation_control: AggregationControl
catalog_dict: typing.Optional[typing.List[typing.Dict]] = None
catalog_file: pydantic.StrictStr = None
description: pydantic.StrictStr = None
title: pydantic.StrictStr = None
_df: typing.Optional[typing.Any] = pydantic.PrivateAttr()
class Config:
validate_all = True
validate_assignment = True
@pydantic.root_validator
def validate_catalog(cls, values):
catalog_dict, catalog_file = values.get('catalog_dict'), values.get('catalog_file')
if catalog_dict is not None and catalog_file is not None:
raise ValueError('catalog_dict and catalog_file cannot be set at the same time')
return values
@classmethod
def from_dict(cls, data: typing.Dict) -> 'ESMCatalogModel':
esmcat = data['esmcat']
df = data['df']
cat = cls.parse_obj(esmcat)
cat._df = df
return cat
def save(self, name: str, *, directory: str = None, catalog_type: str = 'dict') -> None:
"""
Save the catalog to a file.
Parameters
-----------
name: str
The name of the file to save the catalog to.
directory: str
The directory to save the catalog to. If None, use the current directory
catalog_type: str
The type of catalog to save. Whether to save the catalog table as a dictionary
in the JSON file or as a separate CSV file. Valid options are 'dict' and 'file'.
Notes
-----
Large catalogs can result in large JSON files. To keep the JSON file size manageable, call with
`catalog_type='file'` to save catalog as a separate CSV file.
"""
if catalog_type not in {'file', 'dict'}:
raise ValueError(
f'catalog_type must be either "dict" or "file". Received catalog_type={catalog_type}'
)
csv_file_name = pathlib.Path(f'{name}.csv.gz')
json_file_name = pathlib.Path(f'{name}.json')
if directory:
directory = pathlib.Path(directory)
directory.mkdir(parents=True, exist_ok=True)
csv_file_name = directory / csv_file_name
json_file_name = directory / json_file_name
data = self.dict().copy()
for key in {'catalog_dict', 'catalog_file'}:
data.pop(key, None)
data['id'] = name
if catalog_type == 'file':
data['catalog_file'] = str(csv_file_name)
self.df.to_csv(csv_file_name, compression='gzip', index=False)
else:
data['catalog_dict'] = self.df.to_dict(orient='records')
with open(json_file_name, 'w') as outfile:
json.dump(data, outfile, indent=2)
print(f'Successfully wrote ESM collection json file to: {json_file_name}')
@classmethod
def load(
cls,
json_file: typing.Union[str, pydantic.FilePath, pydantic.AnyUrl],
storage_options: typing.Dict[str, typing.Any] = None,
read_csv_kwargs: typing.Dict[str, typing.Any] = None,
) -> 'ESMCatalogModel':
"""
Loads the catalog from a file
"""
storage_options = storage_options if storage_options is not None else {}
read_csv_kwargs = read_csv_kwargs or {}
_mapper = fsspec.get_mapper(json_file, **storage_options)
with fsspec.open(json_file, **storage_options) as fobj:
cat = cls.parse_raw(fobj.read())
if cat.catalog_file:
if _mapper.fs.exists(cat.catalog_file):
csv_path = cat.catalog_file
else:
csv_path = f'{os.path.dirname(_mapper.root)}/{cat.catalog_file}'
cat.catalog_file = csv_path
df = pd.read_csv(
cat.catalog_file,
storage_options=storage_options,
**read_csv_kwargs,
)
else:
df = pd.DataFrame(cat.catalog_dict)
cat._df = df
cat._cast_agg_columns_with_iterables()
return cat
@property
def columns_with_iterables(self) -> typing.Set[str]:
"""Return a set of columns that have iterables."""
if self._df.empty:
return set()
has_iterables = (
self._df.sample(20, replace=True)
.applymap(type)
.isin([list, tuple, set])
.any()
.to_dict()
)
return {column for column, check in has_iterables.items() if check}
@property
def has_multiple_variable_assets(self) -> bool:
"""Return True if the catalog has multiple variable assets."""
return self.aggregation_control.variable_column_name in self.columns_with_iterables
@property
def df(self) -> pd.DataFrame:
"""Return the dataframe."""
return self._df
@df.setter
def df(self, value: pd.DataFrame) -> None:
self._df = value
def _cast_agg_columns_with_iterables(self) -> None:
"""Cast all agg_columns with iterables to tuple values so as
to avoid hashing issues (e.g. TypeError: unhashable type: 'list')
"""
columns = list(
self.columns_with_iterables.intersection(
set(map(lambda agg: agg.attribute_name, self.aggregation_control.aggregations))
)
)
if columns:
self._df[columns] = self._df[columns].apply(tuple)
@property
def grouped(self) -> typing.Union[pd.core.groupby.DataFrameGroupBy, pd.DataFrame]:
if self.aggregation_control.groupby_attrs and set(
self.aggregation_control.groupby_attrs
) != set(self.df.columns):
return self.df.groupby(self.aggregation_control.groupby_attrs)
return self.df
def _construct_group_keys(
self, sep: str = '.'
) -> typing.Dict[str, typing.Union[str, typing.Tuple[str]]]:
grouped = self.grouped
if isinstance(grouped, pd.core.groupby.generic.DataFrameGroupBy):
internal_keys = grouped.groups.keys()
public_keys = map(
lambda key: key if isinstance(key, str) else sep.join(str(value) for value in key),
internal_keys,
)
else:
internal_keys = grouped.index
public_keys = (
grouped[grouped.columns.tolist()]
.apply(lambda row: sep.join(str(v) for v in row), axis=1)
.tolist()
)
return dict(zip(public_keys, internal_keys))
def _unique(self) -> typing.Dict:
def _find_unique(series):
values = series.dropna()
if series.name in self.columns_with_iterables:
values = tlz.concat(values)
return list(tlz.unique(values))
data = self.df[self.df.columns]
if data.empty:
return {col: [] for col in self.df.columns}
else:
return data.apply(_find_unique, result_type='reduce').to_dict()
def unique(self) -> pd.Series:
return pd.Series(self._unique())
def nunique(self) -> pd.Series:
return pd.Series(tlz.valmap(len, self._unique()))
def search(
self,
*,
query: typing.Union['QueryModel', typing.Dict[str, typing.Any]],
require_all_on: typing.Union[str, typing.List[str]] = None,
) -> 'ESMCatalogModel':
"""
Search for entries in the catalog.
Parameters
----------
query: dict, optional
A dictionary of query parameters to execute against the dataframe.
require_all_on : list, str, optional
A dataframe column or a list of dataframe columns across
which all entries must satisfy the query criteria.
If None, return entries that fulfill any of the criteria specified
in the query, by default None.
"""
if not isinstance(query, QueryModel):
_query = QueryModel(
query=query, require_all_on=require_all_on, columns=self.df.columns.tolist()
)
else:
_query = query
results = search(
df=self.df, query=_query.query, columns_with_iterables=self.columns_with_iterables
)
if _query.require_all_on is not None and not results.empty:
results = search_apply_require_all_on(
df=results, query=_query.query, require_all_on=_query.require_all_on
)
return results
class QueryModel(pydantic.BaseModel):
query: typing.Dict[pydantic.StrictStr, typing.Union[typing.Any, typing.List[typing.Any]]]
columns: typing.List[str]
require_all_on: typing.Union[str, typing.List[typing.Any]] = None
class Config:
validate_all = True
validate_assignment = True
@pydantic.root_validator(pre=False)
def validate_query(cls, values):
query = values.get('query', {})
columns = values.get('columns')
require_all_on = values.get('require_all_on', [])
if query:
for key in query:
if key not in columns:
raise ValueError(f'Column {key} not in columns {columns}')
if isinstance(require_all_on, str):
values['require_all_on'] = [require_all_on]
if require_all_on is not None:
for key in values['require_all_on']:
if key not in columns:
raise ValueError(f'Column {key} not in columns {columns}')
_query = query.copy()
for key, value in _query.items():
if isinstance(value, (str, int, float, bool)):
_query[key] = [value]
values['query'] = _query
return values
| 33.254902 | 103 | 0.614976 | import enum
import json
import os
import pathlib
import typing
import fsspec
import pandas as pd
import pydantic
import tlz
from ._search import search, search_apply_require_all_on
class AggregationType(str, enum.Enum):
join_new = 'join_new'
join_existing = 'join_existing'
union = 'union'
class Config:
validate_all = True
validate_assignment = True
class DataFormat(str, enum.Enum):
netcdf = 'netcdf'
zarr = 'zarr'
class Config:
validate_all = True
validate_assignment = True
class Attribute(pydantic.BaseModel):
column_name: pydantic.StrictStr
vocabulary: pydantic.StrictStr = ''
class Config:
validate_all = True
validate_assignment = True
class Assets(pydantic.BaseModel):
column_name: pydantic.StrictStr
format: DataFormat
format_column_name: typing.Optional[pydantic.StrictStr]
class Config:
validate_all = True
validate_assignment = True
@pydantic.root_validator
def _validate_data_format(cls, values):
data_format, format_column_name = values.get('format'), values.get('format_column_name')
if data_format is not None and format_column_name is not None:
raise ValueError('Cannot set both format and format_column_name')
return values
class Aggregation(pydantic.BaseModel):
type: AggregationType
attribute_name: pydantic.StrictStr
options: typing.Optional[typing.Dict] = {}
class Config:
validate_all = True
validate_assignment = True
class AggregationControl(pydantic.BaseModel):
variable_column_name: pydantic.StrictStr
groupby_attrs: typing.List[pydantic.StrictStr]
aggregations: typing.List[Aggregation] = []
class Config:
validate_all = True
validate_assignment = True
class ESMCatalogModel(pydantic.BaseModel):
esmcat_version: pydantic.StrictStr
id: str
attributes: typing.List[Attribute]
assets: Assets
aggregation_control: AggregationControl
catalog_dict: typing.Optional[typing.List[typing.Dict]] = None
catalog_file: pydantic.StrictStr = None
description: pydantic.StrictStr = None
title: pydantic.StrictStr = None
_df: typing.Optional[typing.Any] = pydantic.PrivateAttr()
class Config:
validate_all = True
validate_assignment = True
@pydantic.root_validator
def validate_catalog(cls, values):
catalog_dict, catalog_file = values.get('catalog_dict'), values.get('catalog_file')
if catalog_dict is not None and catalog_file is not None:
raise ValueError('catalog_dict and catalog_file cannot be set at the same time')
return values
@classmethod
def from_dict(cls, data: typing.Dict) -> 'ESMCatalogModel':
esmcat = data['esmcat']
df = data['df']
cat = cls.parse_obj(esmcat)
cat._df = df
return cat
def save(self, name: str, *, directory: str = None, catalog_type: str = 'dict') -> None:
if catalog_type not in {'file', 'dict'}:
raise ValueError(
f'catalog_type must be either "dict" or "file". Received catalog_type={catalog_type}'
)
csv_file_name = pathlib.Path(f'{name}.csv.gz')
json_file_name = pathlib.Path(f'{name}.json')
if directory:
directory = pathlib.Path(directory)
directory.mkdir(parents=True, exist_ok=True)
csv_file_name = directory / csv_file_name
json_file_name = directory / json_file_name
data = self.dict().copy()
for key in {'catalog_dict', 'catalog_file'}:
data.pop(key, None)
data['id'] = name
if catalog_type == 'file':
data['catalog_file'] = str(csv_file_name)
self.df.to_csv(csv_file_name, compression='gzip', index=False)
else:
data['catalog_dict'] = self.df.to_dict(orient='records')
with open(json_file_name, 'w') as outfile:
json.dump(data, outfile, indent=2)
print(f'Successfully wrote ESM collection json file to: {json_file_name}')
@classmethod
def load(
cls,
json_file: typing.Union[str, pydantic.FilePath, pydantic.AnyUrl],
storage_options: typing.Dict[str, typing.Any] = None,
read_csv_kwargs: typing.Dict[str, typing.Any] = None,
) -> 'ESMCatalogModel':
storage_options = storage_options if storage_options is not None else {}
read_csv_kwargs = read_csv_kwargs or {}
_mapper = fsspec.get_mapper(json_file, **storage_options)
with fsspec.open(json_file, **storage_options) as fobj:
cat = cls.parse_raw(fobj.read())
if cat.catalog_file:
if _mapper.fs.exists(cat.catalog_file):
csv_path = cat.catalog_file
else:
csv_path = f'{os.path.dirname(_mapper.root)}/{cat.catalog_file}'
cat.catalog_file = csv_path
df = pd.read_csv(
cat.catalog_file,
storage_options=storage_options,
**read_csv_kwargs,
)
else:
df = pd.DataFrame(cat.catalog_dict)
cat._df = df
cat._cast_agg_columns_with_iterables()
return cat
@property
def columns_with_iterables(self) -> typing.Set[str]:
if self._df.empty:
return set()
has_iterables = (
self._df.sample(20, replace=True)
.applymap(type)
.isin([list, tuple, set])
.any()
.to_dict()
)
return {column for column, check in has_iterables.items() if check}
@property
def has_multiple_variable_assets(self) -> bool:
return self.aggregation_control.variable_column_name in self.columns_with_iterables
@property
def df(self) -> pd.DataFrame:
return self._df
@df.setter
def df(self, value: pd.DataFrame) -> None:
self._df = value
def _cast_agg_columns_with_iterables(self) -> None:
columns = list(
self.columns_with_iterables.intersection(
set(map(lambda agg: agg.attribute_name, self.aggregation_control.aggregations))
)
)
if columns:
self._df[columns] = self._df[columns].apply(tuple)
@property
def grouped(self) -> typing.Union[pd.core.groupby.DataFrameGroupBy, pd.DataFrame]:
if self.aggregation_control.groupby_attrs and set(
self.aggregation_control.groupby_attrs
) != set(self.df.columns):
return self.df.groupby(self.aggregation_control.groupby_attrs)
return self.df
def _construct_group_keys(
self, sep: str = '.'
) -> typing.Dict[str, typing.Union[str, typing.Tuple[str]]]:
grouped = self.grouped
if isinstance(grouped, pd.core.groupby.generic.DataFrameGroupBy):
internal_keys = grouped.groups.keys()
public_keys = map(
lambda key: key if isinstance(key, str) else sep.join(str(value) for value in key),
internal_keys,
)
else:
internal_keys = grouped.index
public_keys = (
grouped[grouped.columns.tolist()]
.apply(lambda row: sep.join(str(v) for v in row), axis=1)
.tolist()
)
return dict(zip(public_keys, internal_keys))
def _unique(self) -> typing.Dict:
def _find_unique(series):
values = series.dropna()
if series.name in self.columns_with_iterables:
values = tlz.concat(values)
return list(tlz.unique(values))
data = self.df[self.df.columns]
if data.empty:
return {col: [] for col in self.df.columns}
else:
return data.apply(_find_unique, result_type='reduce').to_dict()
def unique(self) -> pd.Series:
return pd.Series(self._unique())
def nunique(self) -> pd.Series:
return pd.Series(tlz.valmap(len, self._unique()))
def search(
self,
*,
query: typing.Union['QueryModel', typing.Dict[str, typing.Any]],
require_all_on: typing.Union[str, typing.List[str]] = None,
) -> 'ESMCatalogModel':
if not isinstance(query, QueryModel):
_query = QueryModel(
query=query, require_all_on=require_all_on, columns=self.df.columns.tolist()
)
else:
_query = query
results = search(
df=self.df, query=_query.query, columns_with_iterables=self.columns_with_iterables
)
if _query.require_all_on is not None and not results.empty:
results = search_apply_require_all_on(
df=results, query=_query.query, require_all_on=_query.require_all_on
)
return results
class QueryModel(pydantic.BaseModel):
query: typing.Dict[pydantic.StrictStr, typing.Union[typing.Any, typing.List[typing.Any]]]
columns: typing.List[str]
require_all_on: typing.Union[str, typing.List[typing.Any]] = None
class Config:
validate_all = True
validate_assignment = True
@pydantic.root_validator(pre=False)
def validate_query(cls, values):
query = values.get('query', {})
columns = values.get('columns')
require_all_on = values.get('require_all_on', [])
if query:
for key in query:
if key not in columns:
raise ValueError(f'Column {key} not in columns {columns}')
if isinstance(require_all_on, str):
values['require_all_on'] = [require_all_on]
if require_all_on is not None:
for key in values['require_all_on']:
if key not in columns:
raise ValueError(f'Column {key} not in columns {columns}')
_query = query.copy()
for key, value in _query.items():
if isinstance(value, (str, int, float, bool)):
_query[key] = [value]
values['query'] = _query
return values
| true | true |
f7317f9433b31ade91e5efd2feb76b31b2af0dcf | 228 | py | Python | adc/tth.py | udoprog/python-adc | 6e3775a6fddd0c4a12211a237e2ae5f62a79fd31 | [
"BSD-3-Clause"
] | 1 | 2015-02-01T15:05:16.000Z | 2015-02-01T15:05:16.000Z | adc/tth.py | udoprog/python-adc | 6e3775a6fddd0c4a12211a237e2ae5f62a79fd31 | [
"BSD-3-Clause"
] | null | null | null | adc/tth.py | udoprog/python-adc | 6e3775a6fddd0c4a12211a237e2ae5f62a79fd31 | [
"BSD-3-Clause"
] | null | null | null | from merkletree import MerkleTree
from .hashing import TigerHash
class TigerTree(MerkleTree):
segment = 1024;
hashsize = TigerHash.size;
@classmethod
def _hash(klass, *chunks):
return TigerHash.digest(*chunks);
| 19 | 37 | 0.741228 | from merkletree import MerkleTree
from .hashing import TigerHash
class TigerTree(MerkleTree):
segment = 1024;
hashsize = TigerHash.size;
@classmethod
def _hash(klass, *chunks):
return TigerHash.digest(*chunks);
| true | true |
f7318003a4d1f4c23a5fc12ba056718d045c25e6 | 3,232 | py | Python | dataflows/processors/parallelize.py | cschloer/dataflows | 78a683b5d202512c06021ff6be8ac7f60ef1cd9b | [
"MIT"
] | null | null | null | dataflows/processors/parallelize.py | cschloer/dataflows | 78a683b5d202512c06021ff6be8ac7f60ef1cd9b | [
"MIT"
] | null | null | null | dataflows/processors/parallelize.py | cschloer/dataflows | 78a683b5d202512c06021ff6be8ac7f60ef1cd9b | [
"MIT"
] | null | null | null | import itertools
import os
import multiprocessing as mp
import threading
import queue
from ..helpers import ResourceMatcher
from .. import PackageWrapper, ResourceWrapper
def init_mp(num_processors, row_func, q_in, q_internal):
q_out = mp.Queue()
processes = [mp.Process(target=work, args=(q_in, q_out, row_func)) for _ in range(num_processors)]
for process in processes:
process.start()
t_fetch = threading.Thread(target=fetcher, args=(q_out, q_internal, num_processors))
t_fetch.start()
return (processes, t_fetch)
def fini_mp(processes, t_fetch):
for process in processes:
try:
process.join(timeout=10)
except Exception:
try:
process.kill()
except Exception:
pass
finally:
process.close()
t_fetch.join()
def producer(res, q_in, q_internal, num_processors, predicate):
try:
for row in res:
if predicate(row):
q_in.put(row)
else:
q_internal.put(row)
for _ in range(num_processors):
q_in.put(None)
except Exception:
q_internal.put(None)
return 1
return 0
def fetcher(q_out, q_internal, num_processors):
expected_nones = num_processors
while True:
row = q_out.get()
if row is None:
expected_nones -= 1
if expected_nones == 0:
q_internal.put(None)
break
continue
q_internal.put(row)
def work(q_in: mp.Queue, q_out: mp.Queue, row_func):
pid = os.getpid()
try:
while True:
row = q_in.get()
if row is None:
break
try:
row_func(row)
except Exception as e:
print(pid, 'FAILED TO RUN row_func {}\n'.format(e))
pass
q_out.put(row)
except Exception:
pass
finally:
q_out.put(None)
def fork(res, row_func, num_processors, predicate):
predicate = predicate or (lambda x: True)
for row in res:
if predicate(row):
res = itertools.chain([row], res)
q_in = mp.Queue()
q_internal = queue.Queue()
t_prod = threading.Thread(target=producer, args=(res, q_in, q_internal, num_processors, predicate))
t_prod.start()
processes, t_fetch = init_mp(num_processors, row_func, q_in, q_internal)
while True:
row = q_internal.get()
if row is None:
break
yield row
t_prod.join()
fini_mp(processes, t_fetch)
else:
yield row
def parallelize(row_func, num_processors=None, resources=None, predicate=None):
num_processors = num_processors or 2*os.cpu_count()
def func(package: PackageWrapper):
yield package.pkg
matcher = ResourceMatcher(resources, package.pkg)
res: ResourceWrapper
for res in package:
if matcher.match(res.res.name):
yield fork(res, row_func, num_processors, predicate)
else:
yield res
return func
| 26.933333 | 111 | 0.569926 | import itertools
import os
import multiprocessing as mp
import threading
import queue
from ..helpers import ResourceMatcher
from .. import PackageWrapper, ResourceWrapper
def init_mp(num_processors, row_func, q_in, q_internal):
q_out = mp.Queue()
processes = [mp.Process(target=work, args=(q_in, q_out, row_func)) for _ in range(num_processors)]
for process in processes:
process.start()
t_fetch = threading.Thread(target=fetcher, args=(q_out, q_internal, num_processors))
t_fetch.start()
return (processes, t_fetch)
def fini_mp(processes, t_fetch):
for process in processes:
try:
process.join(timeout=10)
except Exception:
try:
process.kill()
except Exception:
pass
finally:
process.close()
t_fetch.join()
def producer(res, q_in, q_internal, num_processors, predicate):
try:
for row in res:
if predicate(row):
q_in.put(row)
else:
q_internal.put(row)
for _ in range(num_processors):
q_in.put(None)
except Exception:
q_internal.put(None)
return 1
return 0
def fetcher(q_out, q_internal, num_processors):
expected_nones = num_processors
while True:
row = q_out.get()
if row is None:
expected_nones -= 1
if expected_nones == 0:
q_internal.put(None)
break
continue
q_internal.put(row)
def work(q_in: mp.Queue, q_out: mp.Queue, row_func):
pid = os.getpid()
try:
while True:
row = q_in.get()
if row is None:
break
try:
row_func(row)
except Exception as e:
print(pid, 'FAILED TO RUN row_func {}\n'.format(e))
pass
q_out.put(row)
except Exception:
pass
finally:
q_out.put(None)
def fork(res, row_func, num_processors, predicate):
predicate = predicate or (lambda x: True)
for row in res:
if predicate(row):
res = itertools.chain([row], res)
q_in = mp.Queue()
q_internal = queue.Queue()
t_prod = threading.Thread(target=producer, args=(res, q_in, q_internal, num_processors, predicate))
t_prod.start()
processes, t_fetch = init_mp(num_processors, row_func, q_in, q_internal)
while True:
row = q_internal.get()
if row is None:
break
yield row
t_prod.join()
fini_mp(processes, t_fetch)
else:
yield row
def parallelize(row_func, num_processors=None, resources=None, predicate=None):
num_processors = num_processors or 2*os.cpu_count()
def func(package: PackageWrapper):
yield package.pkg
matcher = ResourceMatcher(resources, package.pkg)
res: ResourceWrapper
for res in package:
if matcher.match(res.res.name):
yield fork(res, row_func, num_processors, predicate)
else:
yield res
return func
| true | true |
f73180d03d5ce10b4c3dc13aefb29b163648e360 | 3,306 | py | Python | data/p2DJ/New/program/qiskit/simulator/startQiskit336.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/simulator/startQiskit336.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/simulator/startQiskit336.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=2
# total number=18
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.y(input_qubit[1]) # number=2
prog.y(input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=3
prog.rx(2.0860175219836226,input_qubit[1]) # number=7
prog.x(input_qubit[0]) # number=5
prog.x(input_qubit[0]) # number=6
prog.h(input_qubit[0]) # number=10
prog.cz(input_qubit[1],input_qubit[0]) # number=11
prog.h(input_qubit[0]) # number=12
prog.h(input_qubit[0]) # number=13
prog.cz(input_qubit[1],input_qubit[0]) # number=14
prog.h(input_qubit[0]) # number=15
prog.x(input_qubit[1]) # number=16
prog.x(input_qubit[1]) # number=17
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit336.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 28.747826 | 82 | 0.625227 |
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
prog.x(target)
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1])
prog.h(target)
prog.barrier()
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
prog.y(input_qubit[1])
prog.y(input_qubit[1])
prog.y(input_qubit[1])
prog.rx(2.0860175219836226,input_qubit[1])
prog.x(input_qubit[0])
prog.x(input_qubit[0])
prog.h(input_qubit[0])
prog.cz(input_qubit[1],input_qubit[0])
prog.h(input_qubit[0])
prog.h(input_qubit[0])
prog.cz(input_qubit[1],input_qubit[0])
prog.h(input_qubit[0])
prog.x(input_qubit[1])
prog.x(input_qubit[1])
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit336.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true | true |
f73180ffb7b8ac7ff84ef16f39b5d76cd4a45949 | 255 | py | Python | tests/urls.py | srijwalzartek/django-slick-reporting | aed9262a3dd83aa28e141301a4b3bf7041be7748 | [
"BSD-3-Clause"
] | null | null | null | tests/urls.py | srijwalzartek/django-slick-reporting | aed9262a3dd83aa28e141301a4b3bf7041be7748 | [
"BSD-3-Clause"
] | null | null | null | tests/urls.py | srijwalzartek/django-slick-reporting | aed9262a3dd83aa28e141301a4b3bf7041be7748 | [
"BSD-3-Clause"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('report1/', views.MonthlyProductSales.as_view(), name='report1'),
path('product_crosstab_client/', views.ProductClientSalesMatrix.as_view(), name='product_crosstab_client'),
]
| 31.875 | 111 | 0.756863 | from django.urls import path
from . import views
urlpatterns = [
path('report1/', views.MonthlyProductSales.as_view(), name='report1'),
path('product_crosstab_client/', views.ProductClientSalesMatrix.as_view(), name='product_crosstab_client'),
]
| true | true |
f7318134d4c39845139626c088e97f1dd313f92c | 4,575 | py | Python | minecraft_dynmap_timemachine/dynmap.py | paul-eff/minecraft-dynmap-timemachine | ee902fe6600023a3de2d3b71969738016914a03a | [
"MIT"
] | 61 | 2015-05-22T17:30:09.000Z | 2022-02-26T20:22:15.000Z | minecraft_dynmap_timemachine/dynmap.py | paul-eff/minecraft-dynmap-timemachine | ee902fe6600023a3de2d3b71969738016914a03a | [
"MIT"
] | 10 | 2017-07-18T18:34:42.000Z | 2022-03-09T01:49:13.000Z | minecraft_dynmap_timemachine/dynmap.py | paul-eff/minecraft-dynmap-timemachine | ee902fe6600023a3de2d3b71969738016914a03a | [
"MIT"
] | 18 | 2017-12-28T10:44:54.000Z | 2022-02-26T01:33:05.000Z | # import urllib
import json
import time
import math
import re
from . import simple_downloader
class MapException(Exception):
def __init__(self, map_obj, *args, **kwargs):
super(MapException, self).__init__(*args, **kwargs)
self.map = map_obj
class DynMap(object):
def __init__(self, url):
# super(DynMap, self).__init__(*args, **kwargs)
self.url = url.rstrip('/')
#self._server_addres = server_addres
#self._cache_dir = cache_dir
self._config = None
self._config_urls = None
self._worlds = {}
#self.urls # force init dynmap urls from server or from property
self._init()
def _init(self):
for c in self.config['worlds']:
# print(c)
w = World(c)
self._worlds[w.name] = w
def _download_config(self):
"""configuration of all worlds and their maps"""
rel_path = self.urls['configuration'].replace('{timestamp}', str(int(time.time())))
return simple_downloader.download(self.url + '/' + rel_path)
def _download_config_urls(self):
"""DynMap configuration"""
return simple_downloader.download(self.url + '/' + 'standalone/config.js')
@staticmethod
def parse_config_urls_string(jsonlike_str):
m = re.search('url \: (.+)};', jsonlike_str, re.DOTALL)
#return json.loads(m.group(1))
pattern = r"([a-zA-Z_][a-zA-Z_0-9]*)\s*\:"
repl = lambda match: '"{}":'.format(match.group(1))
json_str = re.sub(pattern, repl, m.group(1))
#print json_str
return json.loads(json_str.replace('\'', '"'))
@property
def urls(self):
if not self._config_urls:
# if self._config_urls_json:
# self._config_urls = json.loads(self._config_urls_json)
# else:
self._config_urls = self.parse_config_urls_string(self._download_config_urls())
# self._config_urls_json = json.dumps(self._config_urls)
#self.save()
return self._config_urls
@property
def config(self):
if not self._config:
# if self._config_json:
# self._config = json.loads(self._config_json)
# else:
self._config = json.loads(self._download_config())
# self._config_json = json.dumps(self._config)
return self._config
@property
def worlds(self):
return self._worlds
class World(object):
def __init__(self, world_config):
self._config = world_config
self._maps = {}
self._init()
def _init(self):
for c in self._config['maps']:
m = Map(c, self.name)
self._maps[m.name] = m
@property
def name(self):
return self._config['name']
@property
def title(self):
return self._config['title']
@property
def maps(self):
return self._maps
class Map(object):
# PERSPECTIVES = ['iso_SE_30_hires', 'iso_SE_30_lowres', 'iso_SE_60_hires', 'iso_SE_60_lowres', 'iso_S_90_hires', 'iso_S_90_lowres']
# SHADERS = ['stdtexture', 'cave']
def __init__(self, map_config, world):
self._config = map_config
self._world = world
# if not Map.is_known_perspective(self.perspective):
# raise MapException(self, 'Unknown perspective "%s"' % self.perspective)
# if not Map.is_known_shader(self.shader):
# raise MapException(self, 'Unknown shader "%s"' % self.shader)
# @staticmethod
# def is_known_perspective(type_name):
# return type_name in Map.PERSPECTIVES
#
# @staticmethod
# def is_known_shader(shader_name):
# return shader_name in Map.SHADERS
def image_url(self, t_loc):
zoom = t_loc.zoom
chunk_x = math.floor(t_loc.x / 32.0)
chunk_y = math.floor(t_loc.y / 32.0)
dashes = ('' if zoom == 0 else ('z' * zoom) + '_')
image_url = '/tiles/%s/%s/%d_%d/%s%d_%d.png' % (self._world, self.prefix, chunk_x, chunk_y, dashes, t_loc.x, t_loc.y)
return image_url
@property
def perspective(self):
return self._config['perspective']
@property
def shader(self):
return self._config['shader']
@property
def name(self):
return self._config['name']
@property
def title(self):
return self._config['title']
@property
def worldtomap(self):
return self._config['worldtomap']
@property
def prefix(self):
return self._config['prefix'] | 28.773585 | 136 | 0.599344 |
import json
import time
import math
import re
from . import simple_downloader
class MapException(Exception):
def __init__(self, map_obj, *args, **kwargs):
super(MapException, self).__init__(*args, **kwargs)
self.map = map_obj
class DynMap(object):
def __init__(self, url):
self.url = url.rstrip('/')
self._config = None
self._config_urls = None
self._worlds = {}
or c in self.config['worlds']:
w = World(c)
self._worlds[w.name] = w
def _download_config(self):
rel_path = self.urls['configuration'].replace('{timestamp}', str(int(time.time())))
return simple_downloader.download(self.url + '/' + rel_path)
def _download_config_urls(self):
return simple_downloader.download(self.url + '/' + 'standalone/config.js')
@staticmethod
def parse_config_urls_string(jsonlike_str):
m = re.search('url \: (.+)};', jsonlike_str, re.DOTALL)
pattern = r"([a-zA-Z_][a-zA-Z_0-9]*)\s*\:"
repl = lambda match: '"{}":'.format(match.group(1))
json_str = re.sub(pattern, repl, m.group(1))
return json.loads(json_str.replace('\'', '"'))
@property
def urls(self):
if not self._config_urls:
# if self._config_urls_json:
# self._config_urls = json.loads(self._config_urls_json)
# else:
self._config_urls = self.parse_config_urls_string(self._download_config_urls())
# self._config_urls_json = json.dumps(self._config_urls)
#self.save()
return self._config_urls
@property
def config(self):
if not self._config:
# if self._config_json:
# self._config = json.loads(self._config_json)
# else:
self._config = json.loads(self._download_config())
# self._config_json = json.dumps(self._config)
return self._config
@property
def worlds(self):
return self._worlds
class World(object):
def __init__(self, world_config):
self._config = world_config
self._maps = {}
self._init()
def _init(self):
for c in self._config['maps']:
m = Map(c, self.name)
self._maps[m.name] = m
@property
def name(self):
return self._config['name']
@property
def title(self):
return self._config['title']
@property
def maps(self):
return self._maps
class Map(object):
# PERSPECTIVES = ['iso_SE_30_hires', 'iso_SE_30_lowres', 'iso_SE_60_hires', 'iso_SE_60_lowres', 'iso_S_90_hires', 'iso_S_90_lowres']
# SHADERS = ['stdtexture', 'cave']
def __init__(self, map_config, world):
self._config = map_config
self._world = world
# if not Map.is_known_perspective(self.perspective):
# raise MapException(self, 'Unknown perspective "%s"' % self.perspective)
# if not Map.is_known_shader(self.shader):
# raise MapException(self, 'Unknown shader "%s"' % self.shader)
# @staticmethod
# def is_known_perspective(type_name):
# return type_name in Map.PERSPECTIVES
#
# @staticmethod
# def is_known_shader(shader_name):
# return shader_name in Map.SHADERS
def image_url(self, t_loc):
zoom = t_loc.zoom
chunk_x = math.floor(t_loc.x / 32.0)
chunk_y = math.floor(t_loc.y / 32.0)
dashes = ('' if zoom == 0 else ('z' * zoom) + '_')
image_url = '/tiles/%s/%s/%d_%d/%s%d_%d.png' % (self._world, self.prefix, chunk_x, chunk_y, dashes, t_loc.x, t_loc.y)
return image_url
@property
def perspective(self):
return self._config['perspective']
@property
def shader(self):
return self._config['shader']
@property
def name(self):
return self._config['name']
@property
def title(self):
return self._config['title']
@property
def worldtomap(self):
return self._config['worldtomap']
@property
def prefix(self):
return self._config['prefix'] | true | true |
f731817c0c67a924ce2ccfc634deaf68c68b5c57 | 54,791 | py | Python | numpy/polynomial/hermite.py | ivanov/numpy | 6d2665626e40f346bb5af8d780579f5a429ff9ba | [
"BSD-3-Clause"
] | null | null | null | numpy/polynomial/hermite.py | ivanov/numpy | 6d2665626e40f346bb5af8d780579f5a429ff9ba | [
"BSD-3-Clause"
] | null | null | null | numpy/polynomial/hermite.py | ivanov/numpy | 6d2665626e40f346bb5af8d780579f5a429ff9ba | [
"BSD-3-Clause"
] | null | null | null | """
Objects for dealing with Hermite series.
This module provides a number of objects (mostly functions) useful for
dealing with Hermite series, including a `Hermite` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `hermdomain` -- Hermite series default domain, [-1,1].
- `hermzero` -- Hermite series that evaluates identically to 0.
- `hermone` -- Hermite series that evaluates identically to 1.
- `hermx` -- Hermite series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``.
- `hermadd` -- add two Hermite series.
- `hermsub` -- subtract one Hermite series from another.
- `hermmul` -- multiply two Hermite series.
- `hermdiv` -- divide one Hermite series by another.
- `hermval` -- evaluate a Hermite series at given points.
- `hermval2d` -- evaluate a 2D Hermite series at given points.
- `hermval3d` -- evaluate a 3D Hermite series at given points.
- `hermgrid2d` -- evaluate a 2D Hermite series on a Cartesian product.
- `hermgrid3d` -- evaluate a 3D Hermite series on a Cartesian product.
Calculus
--------
- `hermder` -- differentiate a Hermite series.
- `hermint` -- integrate a Hermite series.
Misc Functions
--------------
- `hermfromroots` -- create a Hermite series with specified roots.
- `hermroots` -- find the roots of a Hermite series.
- `hermvander` -- Vandermonde-like matrix for Hermite polynomials.
- `hermvander2d` -- Vandermonde-like matrix for 2D power series.
- `hermvander3d` -- Vandermonde-like matrix for 3D power series.
- `hermgauss` -- Gauss-Hermite quadrature, points and weights.
- `hermweight` -- Hermite weight function.
- `hermcompanion` -- symmetrized companion matrix in Hermite form.
- `hermfit` -- least-squares fit returning a Hermite series.
- `hermtrim` -- trim leading coefficients from a Hermite series.
- `hermline` -- Hermite series of given straight line.
- `herm2poly` -- convert a Hermite series to a polynomial.
- `poly2herm` -- convert a polynomial to a Hermite series.
Classes
-------
- `Hermite` -- A Hermite series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
import warnings
from .polytemplate import polytemplate
__all__ = ['hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline',
'hermadd', 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow',
'hermval', 'hermder', 'hermint', 'herm2poly', 'poly2herm',
'hermfromroots', 'hermvander', 'hermfit', 'hermtrim', 'hermroots',
'Hermite', 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d',
'hermvander2d', 'hermvander3d', 'hermcompanion', 'hermgauss',
'hermweight']
hermtrim = pu.trimcoef
def poly2herm(pol) :
"""
poly2herm(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herm2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite_e import poly2herme
>>> poly2herm(np.arange(4))
array([ 1. , 2.75 , 0.5 , 0.375])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1) :
res = hermadd(hermmulx(res), pol[i])
return res
def herm2poly(c) :
"""
Convert a Hermite series to a polynomial.
Convert an array representing the coefficients of a Hermite series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Hermite series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2herm
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import herm2poly
>>> herm2poly([ 1. , 2.75 , 0.5 , 0.375])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
if n == 2:
c[1] *= 2
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1) :
tmp = c0
c0 = polysub(c[i - 2], c1*(2*(i - 1)))
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1)*2)
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Hermite
hermdomain = np.array([-1,1])
# Hermite coefficients representing zero.
hermzero = np.array([0])
# Hermite coefficients representing one.
hermone = np.array([1])
# Hermite coefficients representing the identity x.
hermx = np.array([0, 1/2])
def hermline(off, scl) :
"""
Hermite series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Hermite series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.hermite import hermline, hermval
>>> hermval(0,hermline(3, 2))
3.0
>>> hermval(1,hermline(3, 2))
5.0
"""
if scl != 0 :
return np.array([off,scl/2])
else :
return np.array([off])
def hermfromroots(roots) :
"""
Generate a Hermite series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Hermite form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Hermite form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, chebfromroots,
hermefromroots.
Examples
--------
>>> from numpy.polynomial.hermite import hermfromroots, hermval
>>> coef = hermfromroots((-1, 0, 1))
>>> hermval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = hermfromroots((-1j, 1j))
>>> hermval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0 :
return np.ones(1)
else :
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [hermline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [hermmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = hermmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def hermadd(c1, c2):
"""
Add one Hermite series to another.
Returns the sum of two Hermite series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Hermite series of their sum.
See Also
--------
hermsub, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Hermite series
is a Hermite series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermadd
>>> hermadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] += c2
ret = c1
else :
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermsub(c1, c2):
"""
Subtract one Hermite series from another.
Returns the difference of two Hermite series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their difference.
See Also
--------
hermadd, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Hermite
series is a Hermite series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermsub
>>> hermsub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] -= c2
ret = c1
else :
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermmulx(c):
"""Multiply a Hermite series by x.
Multiply the Hermite series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Hermite
polynomials in the form
.. math::
xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.hermite import hermmulx
>>> hermmulx([1, 2, 3])
array([ 2. , 6.5, 1. , 1.5])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]/2
for i in range(1, len(c)):
prd[i + 1] = c[i]/2
prd[i - 1] += c[i]*i
return prd
def hermmul(c1, c2):
"""
Multiply one Hermite series by another.
Returns the product of two Hermite series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their product.
See Also
--------
hermadd, hermsub, hermdiv, hermpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Hermite polynomial basis set. Thus, to express
the product as a Hermite series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermmul
>>> hermmul([1, 2, 3], [0, 1, 2])
array([ 52., 29., 52., 7., 6.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else :
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1) :
tmp = c0
nd = nd - 1
c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1)))
c1 = hermadd(tmp, hermmulx(c1)*2)
return hermadd(c0, hermmulx(c1)*2)
def hermdiv(c1, c2):
"""
Divide one Hermite series by another.
Returns the quotient-with-remainder of two Hermite series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Hermite series coefficients representing the quotient and
remainder.
See Also
--------
hermadd, hermsub, hermmul, hermpow
Notes
-----
In general, the (polynomial) division of one Hermite series by another
results in quotient and remainder terms that are not in the Hermite
polynomial basis set. Thus, to express these results as a Hermite
series, it is necessary to "reproject" the results onto the Hermite
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermdiv
>>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 2., 2.]))
>>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 1.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0 :
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2 :
return c1[:1]*0, c1
elif lc2 == 1 :
return c1/c2[-1], c1[:1]*0
else :
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = hermmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def hermpow(c, pow, maxpower=16) :
"""Raise a Hermite series to a power.
Returns the Hermite series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Hermite series of power.
See Also
--------
hermadd, hermsub, hermmul, hermdiv
Examples
--------
>>> from numpy.polynomial.hermite import hermpow
>>> hermpow([1, 2, 3], 2)
array([ 81., 52., 82., 12., 9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0 :
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower :
raise ValueError("Power is too large")
elif power == 0 :
return np.array([1], dtype=c.dtype)
elif power == 1 :
return c
else :
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1) :
prd = hermmul(prd, c)
return prd
def hermder(c, m=1, scl=1, axis=0) :
"""
Differentiate a Hermite series.
Returns the Hermite series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2``
while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) +
2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Hermite series coefficients. If `c` is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Hermite series of the derivative.
See Also
--------
hermint
Notes
-----
In general, the result of differentiating a Hermite series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.hermite import hermder
>>> hermder([ 1. , 0.5, 0.5, 0.5])
array([ 1., 2., 3.])
>>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else :
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 0, -1):
der[j - 1] = (2*j)*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite series.
Returns the Hermite series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
hermder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermint
>>> hermint([1,2,3]) # integrate once, value 0 at 0.
array([ 1. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ])
>>> hermint([1,2,3], k=1) # integrate once, value 1 at 0.
array([ 2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1
array([-2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1)
array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0 :
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt :
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt) :
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]/2
for j in range(1, n):
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[0] += k[i] - hermval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermval(x, c, tensor=True):
"""
Evaluate an Hermite series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
hermval2d, hermgrid2d, hermval3d, hermgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.hermite import hermval
>>> coef = [1,2,3]
>>> hermval(1, coef)
11.0
>>> hermval([[1,2],[3,4]], coef)
array([[ 11., 51.],
[ 115., 203.]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
x2 = x*2
if len(c) == 1 :
c0 = c[0]
c1 = 0
elif len(c) == 2 :
c0 = c[0]
c1 = c[1]
else :
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1) :
tmp = c0
nd = nd - 1
c0 = c[-i] - c1*(2*(nd - 1))
c1 = tmp + c1*x2
return c0 + c1*x2
def hermval2d(x, y, c):
"""
Evaluate a 2-D Hermite series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
hermval, hermgrid2d, hermval3d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = hermval(x, c)
c = hermval(y, c, tensor=False)
return c
def hermgrid2d(x, y, c):
"""
Evaluate a 2-D Hermite series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * H_i(a) * H_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermval, hermval2d, hermval3d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermval(x, c)
c = hermval(y, c)
return c
def hermval3d(x, y, z, c):
"""
Evaluate a 3-D Hermite series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = hermval(x, c)
c = hermval(y, c, tensor=False)
c = hermval(z, c, tensor=False)
return c
def hermgrid3d(x, y, z, c):
"""
Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermval(x, c)
c = hermval(y, c)
c = hermval(z, c)
return c
def hermvander(x, deg) :
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = H_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Hermite polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and
``hermval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Hermite series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Hermite polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.hermite import hermvander
>>> x = np.array([-1, 0, 1])
>>> hermvander(x, 3)
array([[ 1., -2., 2., 4.],
[ 1., 0., -2., -0.],
[ 1., 2., 2., -4.]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0 :
x2 = x*2
v[1] = x2
for i in range(2, ideg + 1) :
v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1)))
return np.rollaxis(v, 0, v.ndim)
def hermvander2d(x, y, deg) :
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = H_i(x) * H_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Hermite polynomials.
If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
hermvander, hermvander3d. hermval2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = hermvander(x, degx)
vy = hermvander(y, degy)
v = vx[..., None]*vy[..., None, :]
return v.reshape(v.shape[:-2] + (-1,))
def hermvander3d(x, y, z, deg) :
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Hermite polynomials.
If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
hermvander, hermvander3d. hermval2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = hermvander(x, degx)
vy = hermvander(y, degy)
vz = hermvander(z, degz)
v = vx[..., None, None]*vy[..., None, :, None]*vz[..., None, None, :]
return v.reshape(v.shape[:-3] + (-1,))
def hermfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Hermite series to data.
Return the coefficients of a Hermite series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x),
where `n` is `deg`.
Since numpy version 1.7.0, hermfit also supports NA. If any of the
elements of `x`, `y`, or `w` are NA, then the corresponding rows of the
linear least squares problem (see Notes) are set to 0. If `y` is 2-D,
then an NA in any row of `y` invalidates that whole row.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Hermite coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : present when `full` = True
Residuals of the least-squares fit, the effective rank of the
scaled Vandermonde matrix and its singular values, and the
specified value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, lagfit, polyfit, hermefit
hermval : Evaluates a Hermite series.
hermvander : Vandermonde matrix of Hermite series.
hermweight : Hermite weight function
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Hermite series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Hermite series are probably most useful when the data can be
approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Hermite
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `hermweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.hermite import hermfit, hermval
>>> x = np.linspace(-10, 10)
>>> err = np.random.randn(len(x))/10
>>> y = hermval(x, [1, 2, 3]) + err
>>> hermfit(x, y, 2)
array([ 0.97902637, 1.99849131, 3.00006 ])
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2 :
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = hermvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full :
return c, [resids, rank, s, rcond]
else :
return c
def hermcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Hermite basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
accprod = np.multiply.accumulate
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array(-.5*c[0]/c[1])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.hstack((1., np.sqrt(2.*np.arange(1,n))))
scl = np.multiply.accumulate(scl)
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(.5*np.arange(1,n))
bot[...] = top
mat[:,-1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5
return mat
def hermroots(c):
"""
Compute the roots of a Hermite series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * H_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, chebroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Hermite series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.hermite import hermroots, hermfromroots
>>> coef = hermfromroots([-1, 0, 1])
>>> coef
array([ 0. , 0.25 , 0. , 0.125])
>>> hermroots(coef)
array([ -1.00000000e+00, -1.38777878e-17, 1.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1 :
return np.array([], dtype=c.dtype)
if len(c) == 2 :
return np.array([-.5*c[0]/c[1]])
m = hermcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def hermgauss(deg):
"""
Gauss-Hermite quadrature.
Computes the sample points and weights for Gauss-Hermite quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\inf, \inf]`
with the weight function :math:`f(x) = \exp(-x^2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`H_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = hermcompanion(c)
x = la.eigvals(m)
x.sort()
# improve roots by one application of Newton
dy = hermval(x, c)
df = hermval(x, hermder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = hermval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# for Hermite we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= np.sqrt(np.pi) / w.sum()
return x, w
def hermweight(x):
"""
Weight function of the Hermite polynomials.
The weight function is :math:`\exp(-x^2)` and the interval of
integration is :math:`[-\inf, \inf]`. the Hermite polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-x**2)
return w
#
# Hermite series class
#
exec(polytemplate.substitute(name='Hermite', nick='herm', domain='[-1,1]'))
| 31.291262 | 79 | 0.597416 | from __future__ import division, absolute_import
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
import warnings
from .polytemplate import polytemplate
__all__ = ['hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline',
'hermadd', 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow',
'hermval', 'hermder', 'hermint', 'herm2poly', 'poly2herm',
'hermfromroots', 'hermvander', 'hermfit', 'hermtrim', 'hermroots',
'Hermite', 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d',
'hermvander2d', 'hermvander3d', 'hermcompanion', 'hermgauss',
'hermweight']
hermtrim = pu.trimcoef
def poly2herm(pol) :
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1) :
res = hermadd(hermmulx(res), pol[i])
return res
def herm2poly(c) :
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
if n == 2:
c[1] *= 2
return c
else:
c0 = c[-2]
c1 = c[-1]
for i in range(n - 1, 1, -1) :
tmp = c0
c0 = polysub(c[i - 2], c1*(2*(i - 1)))
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1)*2)
hermdomain = np.array([-1,1])
hermzero = np.array([0])
hermone = np.array([1])
hermx = np.array([0, 1/2])
def hermline(off, scl) :
if scl != 0 :
return np.array([off,scl/2])
else :
return np.array([off])
def hermfromroots(roots) :
if len(roots) == 0 :
return np.ones(1)
else :
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [hermline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [hermmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = hermmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def hermadd(c1, c2):
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] += c2
ret = c1
else :
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermsub(c1, c2):
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] -= c2
ret = c1
else :
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermmulx(c):
[c] = pu.as_series([c])
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]/2
for i in range(1, len(c)):
prd[i + 1] = c[i]/2
prd[i - 1] += c[i]*i
return prd
def hermmul(c1, c2):
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else :
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1) :
tmp = c0
nd = nd - 1
c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1)))
c1 = hermadd(tmp, hermmulx(c1)*2)
return hermadd(c0, hermmulx(c1)*2)
def hermdiv(c1, c2):
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0 :
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2 :
return c1[:1]*0, c1
elif lc2 == 1 :
return c1/c2[-1], c1[:1]*0
else :
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = hermmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def hermpow(c, pow, maxpower=16) :
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0 :
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower :
raise ValueError("Power is too large")
elif power == 0 :
return np.array([1], dtype=c.dtype)
elif power == 1 :
return c
else :
prd = c
for i in range(2, power + 1) :
prd = hermmul(prd, c)
return prd
def hermder(c, m=1, scl=1, axis=0) :
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else :
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 0, -1):
der[j - 1] = (2*j)*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0 :
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt :
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt) :
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]/2
for j in range(1, n):
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[0] += k[i] - hermval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermval(x, c, tensor=True):
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
x2 = x*2
if len(c) == 1 :
c0 = c[0]
c1 = 0
elif len(c) == 2 :
c0 = c[0]
c1 = c[1]
else :
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1) :
tmp = c0
nd = nd - 1
c0 = c[-i] - c1*(2*(nd - 1))
c1 = tmp + c1*x2
return c0 + c1*x2
def hermval2d(x, y, c):
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = hermval(x, c)
c = hermval(y, c, tensor=False)
return c
def hermgrid2d(x, y, c):
c = hermval(x, c)
c = hermval(y, c)
return c
def hermval3d(x, y, z, c):
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = hermval(x, c)
c = hermval(y, c, tensor=False)
c = hermval(z, c, tensor=False)
return c
def hermgrid3d(x, y, z, c):
c = hermval(x, c)
c = hermval(y, c)
c = hermval(z, c)
return c
def hermvander(x, deg) :
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0 :
x2 = x*2
v[1] = x2
for i in range(2, ideg + 1) :
v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1)))
return np.rollaxis(v, 0, v.ndim)
def hermvander2d(x, y, deg) :
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = hermvander(x, degx)
vy = hermvander(y, degy)
v = vx[..., None]*vy[..., None, :]
return v.reshape(v.shape[:-2] + (-1,))
def hermvander3d(x, y, z, deg) :
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = hermvander(x, degx)
vy = hermvander(y, degy)
vz = hermvander(z, degz)
v = vx[..., None, None]*vy[..., None, :, None]*vz[..., None, None, :]
return v.reshape(v.shape[:-3] + (-1,))
def hermfit(x, y, deg, rcond=None, full=False, w=None):
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
if deg < 0 :
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2 :
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
lhs = hermvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full :
return c, [resids, rank, s, rcond]
else :
return c
def hermcompanion(c):
accprod = np.multiply.accumulate
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array(-.5*c[0]/c[1])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.hstack((1., np.sqrt(2.*np.arange(1,n))))
scl = np.multiply.accumulate(scl)
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(.5*np.arange(1,n))
bot[...] = top
mat[:,-1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5
return mat
def hermroots(c):
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1 :
return np.array([], dtype=c.dtype)
if len(c) == 2 :
return np.array([-.5*c[0]/c[1]])
m = hermcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def hermgauss(deg):
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = hermcompanion(c)
x = la.eigvals(m)
x.sort()
# improve roots by one application of Newton
dy = hermval(x, c)
df = hermval(x, hermder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = hermval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# for Hermite we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= np.sqrt(np.pi) / w.sum()
return x, w
def hermweight(x):
w = np.exp(-x**2)
return w
#
# Hermite series class
#
exec(polytemplate.substitute(name='Hermite', nick='herm', domain='[-1,1]'))
| true | true |
f73182b901161d28b67b66626d7189fdce69c7df | 368 | py | Python | utilities/models.py | codes-dev/qubwebs-blog | 0ba9372b2ec83ad181dbd31eb009a17a4a7acaf0 | [
"MIT"
] | null | null | null | utilities/models.py | codes-dev/qubwebs-blog | 0ba9372b2ec83ad181dbd31eb009a17a4a7acaf0 | [
"MIT"
] | null | null | null | utilities/models.py | codes-dev/qubwebs-blog | 0ba9372b2ec83ad181dbd31eb009a17a4a7acaf0 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class TimeStampMixin(models.Model):
"""
An abstract base class model that provides self-updating
``created_at`` and ``updated_at`` fields.
"""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True | 24.533333 | 60 | 0.701087 | from django.db import models
class TimeStampMixin(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True | true | true |
f73182c1280def3ee7b87cc1f8310cdf39c87b12 | 16,051 | py | Python | fairseq/fairseq/data/indexed_dataset.py | oguzdemirbasci/DynamicVocabAbstractiveSummariser | 2e8ba9efd6eddd7d1870d540638f05c80bfe9894 | [
"MIT"
] | null | null | null | fairseq/fairseq/data/indexed_dataset.py | oguzdemirbasci/DynamicVocabAbstractiveSummariser | 2e8ba9efd6eddd7d1870d540638f05c80bfe9894 | [
"MIT"
] | null | null | null | fairseq/fairseq/data/indexed_dataset.py | oguzdemirbasci/DynamicVocabAbstractiveSummariser | 2e8ba9efd6eddd7d1870d540638f05c80bfe9894 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
import os
import shutil
import struct
import numpy as np
import torch
from . import FairseqDataset
def __best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
def get_available_dataset_impl():
return ['raw', 'lazy', 'cached', 'mmap']
def infer_dataset_impl(path):
if IndexedRawTextDataset.exists(path):
return 'raw'
elif IndexedDataset.exists(path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return 'cached'
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return 'mmap'
else:
return None
else:
return None
def make_builder(out_file, impl, vocab_size=None):
if impl == 'mmap':
return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))
else:
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, fix_lua_indexing=False, dictionary=None):
if impl == 'raw' and IndexedRawTextDataset.exists(path):
assert dictionary is not None
return IndexedRawTextDataset(path, dictionary)
elif impl == 'lazy' and IndexedDataset.exists(path):
return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == 'cached' and IndexedDataset.exists(path):
return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == 'mmap' and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path)
return None
def dataset_exists(path, impl):
if impl == 'raw':
return IndexedRawTextDataset.exists(path)
elif impl == 'mmap':
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
8: np.uint16
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
class IndexedDataset(FairseqDataset):
"""Loader for TorchNet IndexedDataset"""
_HDR_MAGIC = b'TNTIDX\x00\x00'
def __init__(self, path, fix_lua_indexing=False):
super().__init__()
self.path = path
self.fix_lua_indexing = fix_lua_indexing
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = f.read(8)
assert struct.unpack('<Q', version) == (1,)
code, self.element_size = struct.unpack('<QQ', f.read(16))
self.dtype = dtypes[code]
self._len, self.s = struct.unpack('<QQ', f.read(16))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError('index out of range')
def __del__(self):
if self.data_file:
self.data_file.close()
@lru_cache(maxsize=8)
def __getitem__(self, i):
if not self.data_file:
self.read_data(self.path)
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
)
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path, fix_lua_indexing=False):
super().__init__(path, fix_lua_indexing=fix_lua_indexing)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx: ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, np.reshape(self.cache[ptx: ptx + a.size], tensor_size))
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
class IndexedRawTextDataset(FairseqDataset):
"""Takes a text file as input and binarizes it in memory at instantiation.
Original lines are also kept in memory"""
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
self.tokens_list = []
self.lines = []
self.sizes = []
self.append_eos = append_eos
self.reverse_order = reverse_order
self.read_data(path, dictionary)
self.size = len(self.tokens_list)
def read_data(self, path, dictionary):
with open(path, 'r', encoding='utf-8') as f:
for line in f:
self.lines.append(line.strip('\n'))
tokens = dictionary.encode_line(
line, add_if_not_exist=False,
append_eos=self.append_eos, reverse_order=self.reverse_order,
).long()
self.tokens_list.append(tokens)
self.sizes.append(len(tokens))
self.sizes = np.array(self.sizes)
def check_index(self, i):
if i < 0 or i >= self.size:
raise IndexError('index out of range')
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
return self.tokens_list[i]
def get_original_text(self, i):
self.check_index(i)
return self.lines[i]
def __del__(self):
pass
def __len__(self):
return self.size
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return os.path.exists(path)
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float: 4,
np.double: 8
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
def add_item(self, tensor):
# +1 for Lua compatibility
bytes = self.out_file.write(np.array(tensor.numpy() + 1, dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
begin = self.data_offsets[-1]
for offset in index.data_offsets[1:]:
self.data_offsets.append(begin + offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
with open(data_file_path(another_file), 'rb') as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', len(self.data_offsets) - 1, len(self.sizes)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
index.close()
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(100 * 1024 * 1024):
pass
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b'MMIDIDX\x00\x00'
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<B', code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack('<Q', len(sizes)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order='C'))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order='C'))
del pointers
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = struct.unpack('<Q', stream.read(8))
assert (1,) == version
dtype_code, = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
self._pointers = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._len,
offset=offset + self._sizes.nbytes)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path):
self._path = path
self._index = self.Index(index_file_path(self._path))
_warmup_mmap_file(data_file_path(self._path))
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
@lru_cache(maxsize=8)
def __getitem__(self, i):
ptr, size = self._index[i]
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
if self._index.dtype != np.int64:
np_array = np_array.astype(np.int64)
return torch.from_numpy(np_array)
@property
def sizes(self):
return self._index.sizes
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
)
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self._sizes = []
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes)
| 30.631679 | 105 | 0.592611 |
from functools import lru_cache
import os
import shutil
import struct
import numpy as np
import torch
from . import FairseqDataset
def __best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
def get_available_dataset_impl():
return ['raw', 'lazy', 'cached', 'mmap']
def infer_dataset_impl(path):
if IndexedRawTextDataset.exists(path):
return 'raw'
elif IndexedDataset.exists(path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return 'cached'
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return 'mmap'
else:
return None
else:
return None
def make_builder(out_file, impl, vocab_size=None):
if impl == 'mmap':
return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))
else:
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, fix_lua_indexing=False, dictionary=None):
if impl == 'raw' and IndexedRawTextDataset.exists(path):
assert dictionary is not None
return IndexedRawTextDataset(path, dictionary)
elif impl == 'lazy' and IndexedDataset.exists(path):
return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == 'cached' and IndexedDataset.exists(path):
return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == 'mmap' and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path)
return None
def dataset_exists(path, impl):
if impl == 'raw':
return IndexedRawTextDataset.exists(path)
elif impl == 'mmap':
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
8: np.uint16
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
class IndexedDataset(FairseqDataset):
_HDR_MAGIC = b'TNTIDX\x00\x00'
def __init__(self, path, fix_lua_indexing=False):
super().__init__()
self.path = path
self.fix_lua_indexing = fix_lua_indexing
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = f.read(8)
assert struct.unpack('<Q', version) == (1,)
code, self.element_size = struct.unpack('<QQ', f.read(16))
self.dtype = dtypes[code]
self._len, self.s = struct.unpack('<QQ', f.read(16))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError('index out of range')
def __del__(self):
if self.data_file:
self.data_file.close()
@lru_cache(maxsize=8)
def __getitem__(self, i):
if not self.data_file:
self.read_data(self.path)
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
)
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path, fix_lua_indexing=False):
super().__init__(path, fix_lua_indexing=fix_lua_indexing)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx: ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, np.reshape(self.cache[ptx: ptx + a.size], tensor_size))
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
class IndexedRawTextDataset(FairseqDataset):
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
self.tokens_list = []
self.lines = []
self.sizes = []
self.append_eos = append_eos
self.reverse_order = reverse_order
self.read_data(path, dictionary)
self.size = len(self.tokens_list)
def read_data(self, path, dictionary):
with open(path, 'r', encoding='utf-8') as f:
for line in f:
self.lines.append(line.strip('\n'))
tokens = dictionary.encode_line(
line, add_if_not_exist=False,
append_eos=self.append_eos, reverse_order=self.reverse_order,
).long()
self.tokens_list.append(tokens)
self.sizes.append(len(tokens))
self.sizes = np.array(self.sizes)
def check_index(self, i):
if i < 0 or i >= self.size:
raise IndexError('index out of range')
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
return self.tokens_list[i]
def get_original_text(self, i):
self.check_index(i)
return self.lines[i]
def __del__(self):
pass
def __len__(self):
return self.size
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return os.path.exists(path)
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float: 4,
np.double: 8
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
def add_item(self, tensor):
# +1 for Lua compatibility
bytes = self.out_file.write(np.array(tensor.numpy() + 1, dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
begin = self.data_offsets[-1]
for offset in index.data_offsets[1:]:
self.data_offsets.append(begin + offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
with open(data_file_path(another_file), 'rb') as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', len(self.data_offsets) - 1, len(self.sizes)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
index.close()
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(100 * 1024 * 1024):
pass
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b'MMIDIDX\x00\x00'
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<B', code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack('<Q', len(sizes)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order='C'))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order='C'))
del pointers
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = struct.unpack('<Q', stream.read(8))
assert (1,) == version
dtype_code, = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
self._pointers = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._len,
offset=offset + self._sizes.nbytes)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path):
self._path = path
self._index = self.Index(index_file_path(self._path))
_warmup_mmap_file(data_file_path(self._path))
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
@lru_cache(maxsize=8)
def __getitem__(self, i):
ptr, size = self._index[i]
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
if self._index.dtype != np.int64:
np_array = np_array.astype(np.int64)
return torch.from_numpy(np_array)
@property
def sizes(self):
return self._index.sizes
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
)
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self._sizes = []
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def merge_file_(self, another_file):
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes)
| true | true |
f731833e514a80da7c83975fe621231e7f8586e1 | 994 | py | Python | assets/code/practice-example-four.py | david-story/david-story.github.io | 4a02854207b86946befd933d6f23773f6239d7da | [
"MIT"
] | null | null | null | assets/code/practice-example-four.py | david-story/david-story.github.io | 4a02854207b86946befd933d6f23773f6239d7da | [
"MIT"
] | null | null | null | assets/code/practice-example-four.py | david-story/david-story.github.io | 4a02854207b86946befd933d6f23773f6239d7da | [
"MIT"
] | null | null | null | """
Practice File 4
Created by David Story
Description: Some nice examples of things you can do with functions from the following libraries:
- sys
- os
- time
- datetime
"""
import sys
import os
import time
import datetime
# get the time
print(time.time())
# get the current date
print(datetime.date.today())
# prints the current working directory
print(os.getcwd())
# saves your current working directory
your_working_directory = os.getcwd()
# changes directory to the C:\\ drive
os.chdir("C:\\")
# prints the current working directory
print(os.getcwd())
# change to the original current working directory
os.chdir(your_working_directory)
# prints the directory
print(os.getcwd())
# gets number of cores on your cpu
computer_cpu_count = os.cpu_count()
print("This computer has this many CPU cores:", computer_cpu_count)
seconds = 2
time_1 = time.time()
time.sleep(seconds)
time_2 = time.time()
print("The sleep operation took this many seconds:", (time_2-time_1)) | 19.490196 | 97 | 0.740443 |
import sys
import os
import time
import datetime
print(time.time())
print(datetime.date.today())
print(os.getcwd())
your_working_directory = os.getcwd()
os.chdir("C:\\")
print(os.getcwd())
os.chdir(your_working_directory)
print(os.getcwd())
computer_cpu_count = os.cpu_count()
print("This computer has this many CPU cores:", computer_cpu_count)
seconds = 2
time_1 = time.time()
time.sleep(seconds)
time_2 = time.time()
print("The sleep operation took this many seconds:", (time_2-time_1)) | true | true |
f731835a4a4d1abb694ef9dec2e9777f59218f60 | 390 | py | Python | NewRelicApiParser/REST/AlertsViolations/__init__.py | Bharat23/newrelic-api-parser | c55d508387fde33af9bdc93f16aae3cb2a2e5f13 | [
"MIT"
] | null | null | null | NewRelicApiParser/REST/AlertsViolations/__init__.py | Bharat23/newrelic-api-parser | c55d508387fde33af9bdc93f16aae3cb2a2e5f13 | [
"MIT"
] | 1 | 2021-07-30T17:32:37.000Z | 2021-07-30T17:32:37.000Z | NewRelicApiParser/REST/AlertsViolations/__init__.py | Bharat23/newrelic-api-parser | c55d508387fde33af9bdc93f16aae3cb2a2e5f13 | [
"MIT"
] | null | null | null | from NewRelicApiParser.Base import BaseNewRelic
class AlertsViolations(BaseNewRelic):
def __init__(self, API_KEY):
super().__init__(API_KEY)
def get_list(self, options: dict = {}) -> dict:
"""
fetch the alert violations for new relic
"""
url = self.BASE_URI + '/alerts_violations.json'
return super().get_data(url, options=options) | 30 | 55 | 0.651282 | from NewRelicApiParser.Base import BaseNewRelic
class AlertsViolations(BaseNewRelic):
def __init__(self, API_KEY):
super().__init__(API_KEY)
def get_list(self, options: dict = {}) -> dict:
url = self.BASE_URI + '/alerts_violations.json'
return super().get_data(url, options=options) | true | true |
f731839f44074592ff4c8fc120911ce2b706f06b | 2,652 | py | Python | utils/audio.py | deciding/Voicefilter | dda34da9d1cfca48102b2d1b4274bfd76e5a2e1c | [
"Apache-2.0",
"MIT"
] | 3 | 2020-03-25T06:27:20.000Z | 2021-02-28T12:59:14.000Z | utils/audio.py | deciding/Voicefilter | dda34da9d1cfca48102b2d1b4274bfd76e5a2e1c | [
"Apache-2.0",
"MIT"
] | null | null | null | utils/audio.py | deciding/Voicefilter | dda34da9d1cfca48102b2d1b4274bfd76e5a2e1c | [
"Apache-2.0",
"MIT"
] | null | null | null | # adapted from Keith Ito's tacotron implementation
# https://github.com/keithito/tacotron/blob/master/util/audio.py
import librosa
import numpy as np
class Audio():
def __init__(self, hp):
self.hp = hp
self.mel_basis = librosa.filters.mel(sr=hp.audio.sample_rate,
n_fft=hp.embedder.n_fft,
n_mels=hp.embedder.num_mels)
self.audio_mel_basis = librosa.filters.mel(sr=hp.audio.sample_rate,
n_fft=hp.audio.n_fft,
n_mels=80)
def get_mel(self, y):
y = librosa.core.stft(y=y, n_fft=self.hp.embedder.n_fft,
hop_length=self.hp.audio.hop_length,
win_length=self.hp.audio.win_length,
window='hann')
magnitudes = np.abs(y) ** 2
mel = np.log10(np.dot(self.mel_basis, magnitudes) + 1e-6)
return mel
def wav2spec(self, y):
D = self.stft(y)
S = self.amp_to_db(np.abs(D)) - self.hp.audio.ref_level_db
S, D = self.normalize(S), np.angle(D)
S, D = S.T, D.T # to make [time, freq]
return S, D
def wav2mel(self, y):
D = self.stft(y)
S = np.dot(self.audio_mel_basis, np.abs(D))
S = self.amp_to_db(S) - self.hp.audio.ref_level_db
S = self.normalize(S)
return S.T
def spec2wav(self, spectrogram, phase):
spectrogram, phase = spectrogram.T, phase.T
# used during inference only
# spectrogram: enhanced output
# phase: use noisy input's phase, so no GLA is required
S = self.db_to_amp(self.denormalize(spectrogram) + self.hp.audio.ref_level_db)
return self.istft(S, phase)
def stft(self, y):
return librosa.stft(y=y, n_fft=self.hp.audio.n_fft,
hop_length=self.hp.audio.hop_length,
win_length=self.hp.audio.win_length)
def istft(self, mag, phase):
stft_matrix = mag * np.exp(1j*phase)
return librosa.istft(stft_matrix,
hop_length=self.hp.audio.hop_length,
win_length=self.hp.audio.win_length)
def amp_to_db(self, x):
return 20.0 * np.log10(np.maximum(1e-5, x))
def db_to_amp(self, x):
return np.power(10.0, x * 0.05)
def normalize(self, S):
return np.clip(S / -self.hp.audio.min_level_db, -1.0, 0.0) + 1.0
def denormalize(self, S):
return (np.clip(S, 0.0, 1.0) - 1.0) * -self.hp.audio.min_level_db
| 36.833333 | 86 | 0.549397 |
# https://github.com/keithito/tacotron/blob/master/util/audio.py
import librosa
import numpy as np
class Audio():
def __init__(self, hp):
self.hp = hp
self.mel_basis = librosa.filters.mel(sr=hp.audio.sample_rate,
n_fft=hp.embedder.n_fft,
n_mels=hp.embedder.num_mels)
self.audio_mel_basis = librosa.filters.mel(sr=hp.audio.sample_rate,
n_fft=hp.audio.n_fft,
n_mels=80)
def get_mel(self, y):
y = librosa.core.stft(y=y, n_fft=self.hp.embedder.n_fft,
hop_length=self.hp.audio.hop_length,
win_length=self.hp.audio.win_length,
window='hann')
magnitudes = np.abs(y) ** 2
mel = np.log10(np.dot(self.mel_basis, magnitudes) + 1e-6)
return mel
def wav2spec(self, y):
D = self.stft(y)
S = self.amp_to_db(np.abs(D)) - self.hp.audio.ref_level_db
S, D = self.normalize(S), np.angle(D)
S, D = S.T, D.T # to make [time, freq]
return S, D
def wav2mel(self, y):
D = self.stft(y)
S = np.dot(self.audio_mel_basis, np.abs(D))
S = self.amp_to_db(S) - self.hp.audio.ref_level_db
S = self.normalize(S)
return S.T
def spec2wav(self, spectrogram, phase):
spectrogram, phase = spectrogram.T, phase.T
# used during inference only
# spectrogram: enhanced output
# phase: use noisy input's phase, so no GLA is required
S = self.db_to_amp(self.denormalize(spectrogram) + self.hp.audio.ref_level_db)
return self.istft(S, phase)
def stft(self, y):
return librosa.stft(y=y, n_fft=self.hp.audio.n_fft,
hop_length=self.hp.audio.hop_length,
win_length=self.hp.audio.win_length)
def istft(self, mag, phase):
stft_matrix = mag * np.exp(1j*phase)
return librosa.istft(stft_matrix,
hop_length=self.hp.audio.hop_length,
win_length=self.hp.audio.win_length)
def amp_to_db(self, x):
return 20.0 * np.log10(np.maximum(1e-5, x))
def db_to_amp(self, x):
return np.power(10.0, x * 0.05)
def normalize(self, S):
return np.clip(S / -self.hp.audio.min_level_db, -1.0, 0.0) + 1.0
def denormalize(self, S):
return (np.clip(S, 0.0, 1.0) - 1.0) * -self.hp.audio.min_level_db
| true | true |
f73183abb453edfa3f42627d23629fd71241bbd0 | 1,790 | py | Python | keymint_cli/verb/__init__.py | keymint/keymint_cli | 977995ade32cf2b3a6394bda1c05f80a9fcc3369 | [
"Apache-2.0"
] | null | null | null | keymint_cli/verb/__init__.py | keymint/keymint_cli | 977995ade32cf2b3a6394bda1c05f80a9fcc3369 | [
"Apache-2.0"
] | null | null | null | keymint_cli/verb/__init__.py | keymint/keymint_cli | 977995ade32cf2b3a6394bda1c05f80a9fcc3369 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2017 Dirk Thomas
# Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keymint_cli.plugin_system import instantiate_extensions
from keymint_cli.plugin_system import PLUGIN_SYSTEM_VERSION
from keymint_cli.plugin_system import satisfies_version
class VerbExtension:
"""
The interface for verb extensions.
The following properties must be defined:
* `NAME` (will be set to the entry point name)
The following methods must be defined:
* `main`
"""
NAME = None
EXTENSION_POINT_VERSION = '0.1'
def __init__(self):
super(VerbExtension, self).__init__()
satisfies_version(PLUGIN_SYSTEM_VERSION, '^0.1')
def get_verb_extensions(name):
extensions = instantiate_extensions(name)
for name, extension in extensions.items():
extension.NAME = name
return extensions
def add_task_arguments(parser, task_name):
plugins = get_verb_extensions(task_name)
for plugin_name, plugin in plugins.items():
group = parser.add_argument_group(
title="Arguments for '{plugin_name}' packages"
.format_map(locals()))
func = getattr(plugin, 'add_%s_arguments' % task_name, None)
if func:
func(group)
| 31.964286 | 74 | 0.721229 |
from keymint_cli.plugin_system import instantiate_extensions
from keymint_cli.plugin_system import PLUGIN_SYSTEM_VERSION
from keymint_cli.plugin_system import satisfies_version
class VerbExtension:
NAME = None
EXTENSION_POINT_VERSION = '0.1'
def __init__(self):
super(VerbExtension, self).__init__()
satisfies_version(PLUGIN_SYSTEM_VERSION, '^0.1')
def get_verb_extensions(name):
extensions = instantiate_extensions(name)
for name, extension in extensions.items():
extension.NAME = name
return extensions
def add_task_arguments(parser, task_name):
plugins = get_verb_extensions(task_name)
for plugin_name, plugin in plugins.items():
group = parser.add_argument_group(
title="Arguments for '{plugin_name}' packages"
.format_map(locals()))
func = getattr(plugin, 'add_%s_arguments' % task_name, None)
if func:
func(group)
| true | true |
f7318473c64262e9daec3211788af8404125cccc | 722 | py | Python | webview/monitor/migrations/0002_auto_20191022_1952.py | dw0rdptr/2019_IoT_GoToDouble | 7f8a3005710b57199e918be1a0d8b9047918bce2 | [
"MIT"
] | 4 | 2019-10-23T04:43:09.000Z | 2019-10-24T11:25:37.000Z | webview/monitor/migrations/0002_auto_20191022_1952.py | dw0rdptr/2019_IoT_GoToDouble | 7f8a3005710b57199e918be1a0d8b9047918bce2 | [
"MIT"
] | 10 | 2019-10-22T12:06:42.000Z | 2019-10-24T17:39:31.000Z | webview/monitor/migrations/0002_auto_20191022_1952.py | epikjjh/2019_IoT_GoToDouble | 1d80eb87b7d59da90a7d0a9225209bfb4704a045 | [
"MIT"
] | 5 | 2019-10-22T08:04:26.000Z | 2019-10-24T17:31:40.000Z | # Generated by Django 2.2.6 on 2019-10-22 10:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('monitor', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Sposition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('angle', models.FloatField()),
('distance', models.FloatField()),
('time', models.DateTimeField(auto_now_add=True)),
],
),
migrations.RenameModel(
old_name='Position',
new_name='Fposition',
),
]
| 26.740741 | 114 | 0.548476 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('monitor', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Sposition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('angle', models.FloatField()),
('distance', models.FloatField()),
('time', models.DateTimeField(auto_now_add=True)),
],
),
migrations.RenameModel(
old_name='Position',
new_name='Fposition',
),
]
| true | true |
f731847f66654d267e7a80dca13e796267689882 | 726 | py | Python | FlatCAMPool.py | NeverGET/FlatCAM | 5d3d0f2166695687aa319cc56919c971adc82843 | [
"MIT"
] | 63 | 2016-02-24T20:42:58.000Z | 2022-03-13T09:10:17.000Z | FlatCAMPool.py | NeverGET/FlatCAM | 5d3d0f2166695687aa319cc56919c971adc82843 | [
"MIT"
] | 7 | 2017-02-03T22:05:22.000Z | 2022-03-01T21:16:26.000Z | FlatCAMPool.py | NeverGET/FlatCAM | 5d3d0f2166695687aa319cc56919c971adc82843 | [
"MIT"
] | 27 | 2016-02-24T20:42:58.000Z | 2022-02-17T02:40:16.000Z | from PyQt4 import QtCore
from multiprocessing import Pool
import dill
def run_dill_encoded(what):
fun, args = dill.loads(what)
print "load", fun, args
return fun(*args)
def apply_async(pool, fun, args):
print "...", fun, args
print "dumps", dill.dumps((fun, args))
return pool.map_async(run_dill_encoded, (dill.dumps((fun, args)),))
def func1():
print "func"
class WorkerPool(QtCore.QObject):
def __init__(self):
super(WorkerPool, self).__init__()
self.pool = Pool(2)
def add_task(self, task):
print "adding task", task
# task['fcn'](*task['params'])
# print self.pool.map(task['fcn'], task['params'])
apply_async(self.pool, func1, ())
| 25.034483 | 71 | 0.630854 | from PyQt4 import QtCore
from multiprocessing import Pool
import dill
def run_dill_encoded(what):
fun, args = dill.loads(what)
print "load", fun, args
return fun(*args)
def apply_async(pool, fun, args):
print "...", fun, args
print "dumps", dill.dumps((fun, args))
return pool.map_async(run_dill_encoded, (dill.dumps((fun, args)),))
def func1():
print "func"
class WorkerPool(QtCore.QObject):
def __init__(self):
super(WorkerPool, self).__init__()
self.pool = Pool(2)
def add_task(self, task):
print "adding task", task
apply_async(self.pool, func1, ())
| false | true |
f73184b05282cc90c8544a41fce552460381a5d0 | 17,311 | py | Python | src/k2hash/tests/test_k2hash.py | ggtakec/k2hash_python | 9d20d36aaf28d0dd2497d39b43286d50e7200fcc | [
"MIT"
] | 1 | 2022-03-02T10:27:43.000Z | 2022-03-02T10:27:43.000Z | src/k2hash/tests/test_k2hash.py | ggtakec/k2hash_python | 9d20d36aaf28d0dd2497d39b43286d50e7200fcc | [
"MIT"
] | null | null | null | src/k2hash/tests/test_k2hash.py | ggtakec/k2hash_python | 9d20d36aaf28d0dd2497d39b43286d50e7200fcc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# K2hash Python Driver
#
# Copyright (c) 2022 Yahoo Japan Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# AUTHOR: Hirotaka Wakabayashi
# CREATE: Tue Feb 08 2022
# REVISION:
#
import unittest
import k2hash
import logging
import ctypes
import time
class TestK2hashIterator(unittest.TestCase):
def test_K2hashIterator_construct(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
ki = k2hash.K2hashIterator(db)
self.assertTrue(isinstance(ki, k2hash.K2hashIterator))
self.assertTrue(key == next(ki))
db.close()
def test_K2hashIterator_construct_key(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
subkey = "subkey1"
subval = "subval1"
self.assertTrue(db.add_subkey(key, subkey, subval), True)
ki = k2hash.K2hashIterator(db, key)
self.assertTrue(isinstance(ki, k2hash.K2hashIterator))
self.assertTrue(subkey == next(ki))
db.close()
class TestK2hash(unittest.TestCase):
def test_K2hash_construct(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
db.close()
def test_K2hash_get_iterator(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
ki = db.get_iterator()
# Note: handle should be undefined before setting no keys.
self.assertTrue(isinstance(ki, k2hash.K2hashIterator))
db.close()
def test_K2hash_set(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
db.close()
def test_K2hash_get(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
db.close()
@unittest.skip("skipping because no plugin lib prepared")
def test_K2hash_add_attribute_plugin_lib(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
db.close()
def test_K2hash_add_decryption_password(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
password = "secretstring"
self.assertTrue(db.add_decryption_password(password), True)
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key, password), val)
self.assertTrue(db.get(key), "")
db.close()
def test_K2hash_add_subkey(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
subkey = "subkey"
subval = "subval"
self.assertTrue(db.set(subkey, subval), True)
self.assertTrue(db.get(subkey), subval)
self.assertTrue(db.add_subkey(key, subkey, subval), True)
self.assertTrue(db.get_subkeys(key) == [subkey])
db.close()
def test_K2hash_begin_tx(self):
db = k2hash.K2hash()
# for debugging, uncomment the following line.
# db.set_log_level(k2hash.LogLevel.ERROR)
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
k2h_tx_log = "test.log"
self.assertTrue(db.begin_tx(k2h_tx_log), True)
# TODO how to check whether transaction is enabled.
db.close()
def test_K2hash_close(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(db.close(), True)
def test_K2hash_create(self):
k2h_file = "test.k2h"
self.assertTrue(k2hash.K2hash.create(k2h_file), True)
def test_K2hash_dump_to_file(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
k2h_file = "test.k2h"
self.assertTrue(db.dump_to_file(k2h_file), val)
db.close()
def test_K2hash_enable_encryption(self):
db = k2hash.K2hash()
# db.set_log_level(k2hash.LogLevel.ERROR)
self.assertTrue(isinstance(db, k2hash.K2hash))
password = "secretstring"
# Calls set_default_encryption_password before calling enable_encryption
self.assertTrue(db.set_default_encryption_password(password), True)
self.assertTrue(db.enable_encryption(), True)
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key, password), val)
# for debugging, uncomment the following line.
db.close()
def test_K2hash_enable_history(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
self.assertTrue(db.enable_history(), True)
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
db.close()
def test_K2hash_enable_mtime(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
self.assertTrue(db.enable_mtime(), True)
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
db.close()
def test_K2hash_get_attributes(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
attr_key = "attrkey1"
attr_val = "attrval1"
attrs = {attr_key: attr_val}
self.assertTrue(db.set_attribute(key, attr_key, attr_val), True)
self.assertTrue(db.get_attributes(key), attrs)
db.close()
def test_K2hash_handle(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
# TODO insufficient check
self.assertTrue(db.handle != k2hash.K2hash.K2H_INVALID_HANDLE)
db.close()
def test_K2hash_get_subkeys(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
subkey = "subkey"
subval = "subval"
self.assertTrue(db.set(subkey, subval), True)
self.assertTrue(db.get(subkey), subval)
self.assertTrue(db.add_subkey(key, subkey, subval), True)
self.assertTrue(db.get_subkeys(key) == [subkey])
db.close()
def test_K2hash_get_tx_file_fd(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
k2h_tx_log = "test.log"
self.assertTrue(db.begin_tx(k2h_tx_log), True)
# TODO how to check whether transaction is enabled.
self.assertTrue(db.get_tx_file_fd() != k2hash.K2hash.K2H_INVALID_HANDLE)
db.close()
def test_K2hash_get_tx_pool_size(self):
self.assertTrue(k2hash.K2hash.get_tx_pool_size() == 0)
def test_K2hash_libk2hash(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(isinstance(db.libk2hash, ctypes.CDLL))
db.close()
def test_K2hash_libc(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(isinstance(db.libc, ctypes.CDLL))
db.close()
def test_K2hash_load_from_file(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
k2h_file = "test.k2h"
self.assertTrue(db.dump_to_file(k2h_file), val)
db.close()
db = None
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
self.assertTrue(db.load_from_file(k2h_file), val)
self.assertTrue(db.get(key), val)
db.close()
def test_K2hash_print_attribute_plugins(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(db.print_attribute_plugins(), True)
db.close()
def test_K2hash_print_attributes(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(db.print_attributes(), True)
db.close()
def test_K2hash_print_data_stats(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(db.print_data_stats(), True)
db.close()
def test_K2hash_print_table_stats(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(db.print_table_stats(), True)
db.close()
def test_K2hash_remove(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(db.remove(key), True)
self.assertTrue(db.get(key) == "")
db.close()
def test_K2hash_remove_subkeys(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
subkey = "subkey"
subval = "subval"
self.assertTrue(db.set(subkey, subval), True)
self.assertTrue(db.get(subkey), subval)
self.assertTrue(db.add_subkey(key, subkey, subval), True)
self.assertTrue(db.get_subkeys(key) == [subkey])
self.assertTrue(db.remove_subkeys(key, [subkey]), True)
self.assertTrue(db.get_subkeys(key) == [])
self.assertTrue(db.get(subkey) == "")
db.close()
def test_K2hash_rename(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
newkey = key[::-1]
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(db.rename(key, newkey), val)
self.assertTrue(db.get(newkey), val)
db.close()
def test_K2hash_set_attribute(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
attr_key = "attrkey1"
attr_val = "attrval1"
attrs = {attr_key: attr_val}
self.assertTrue(db.set_attribute(key, attr_key, attr_val), True)
self.assertTrue(db.get_attributes(key), attrs)
db.close()
def test_K2hash_set_encryption_password_file(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
# 1. make the password file for test
password_file="password.txt"
password = "secretstring"
import os
with open(password_file, 'w') as f:
print("{}".format(password), file=f)
# 2. call the api
self.assertTrue(db.set_encryption_password_file(password_file), True)
db.close()
def test_K2hash_set_expiration_duration(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
duration = 3
self.assertTrue(db.set_expiration_duration(duration), True)
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
time.sleep(duration + 1)
self.assertTrue(db.get(key) == "")
db.close()
def test_K2hash_set_log_level(self):
db = k2hash.K2hash()
db.set_log_level(k2hash.LogLevel.ERROR)
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
db.set_log_level(k2hash.LogLevel.WARNING)
db.close()
def test_K2hash_set_subkeys(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
subkey = "subkey"
subval = "subval"
self.assertTrue(db.set_subkeys(key, {subkey: subval}), True)
self.assertTrue(db.get_subkeys(key) == [subkey])
db.close()
def test_K2hash_set_tx_pool_size(self):
self.assertTrue(k2hash.K2hash.set_tx_pool_size(1), True)
self.assertTrue(k2hash.K2hash.get_tx_pool_size() == 1)
def test_K2hash_stop_tx(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
k2h_tx_log = "test.log"
self.assertTrue(db.begin_tx(k2h_tx_log), True)
# TODO how to check whether transaction is enabled.
self.assertTrue(db.get_tx_file_fd() != k2hash.K2hash.K2H_INVALID_HANDLE)
self.assertTrue(db.stop_tx(), True)
db.close()
def test_K2hash_repr(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertRegex(repr(db), "<_K2hash _k2hfile=.*")
db.close()
def test_K2hash_version(self):
self.assertTrue(k2hash.K2hash.version() == None)
if __name__ == '__main__':
unittest.main()
#
# Local variables:
# tab-width: 4
# c-basic-offset: 4
# End:
# vim600: expandtab sw=4 ts=4 fdm=marker
# vim<600: expandtab sw=4 ts=4
#
| 34.691383 | 80 | 0.617122 |
import unittest
import k2hash
import logging
import ctypes
import time
class TestK2hashIterator(unittest.TestCase):
def test_K2hashIterator_construct(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
ki = k2hash.K2hashIterator(db)
self.assertTrue(isinstance(ki, k2hash.K2hashIterator))
self.assertTrue(key == next(ki))
db.close()
def test_K2hashIterator_construct_key(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
subkey = "subkey1"
subval = "subval1"
self.assertTrue(db.add_subkey(key, subkey, subval), True)
ki = k2hash.K2hashIterator(db, key)
self.assertTrue(isinstance(ki, k2hash.K2hashIterator))
self.assertTrue(subkey == next(ki))
db.close()
class TestK2hash(unittest.TestCase):
def test_K2hash_construct(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
db.close()
def test_K2hash_get_iterator(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
ki = db.get_iterator()
self.assertTrue(isinstance(ki, k2hash.K2hashIterator))
db.close()
def test_K2hash_set(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
db.close()
def test_K2hash_get(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
db.close()
@unittest.skip("skipping because no plugin lib prepared")
def test_K2hash_add_attribute_plugin_lib(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
db.close()
def test_K2hash_add_decryption_password(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
password = "secretstring"
self.assertTrue(db.add_decryption_password(password), True)
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key, password), val)
self.assertTrue(db.get(key), "")
db.close()
def test_K2hash_add_subkey(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
subkey = "subkey"
subval = "subval"
self.assertTrue(db.set(subkey, subval), True)
self.assertTrue(db.get(subkey), subval)
self.assertTrue(db.add_subkey(key, subkey, subval), True)
self.assertTrue(db.get_subkeys(key) == [subkey])
db.close()
def test_K2hash_begin_tx(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
k2h_tx_log = "test.log"
self.assertTrue(db.begin_tx(k2h_tx_log), True)
db.close()
def test_K2hash_close(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(db.close(), True)
def test_K2hash_create(self):
k2h_file = "test.k2h"
self.assertTrue(k2hash.K2hash.create(k2h_file), True)
def test_K2hash_dump_to_file(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
k2h_file = "test.k2h"
self.assertTrue(db.dump_to_file(k2h_file), val)
db.close()
def test_K2hash_enable_encryption(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
password = "secretstring"
self.assertTrue(db.set_default_encryption_password(password), True)
self.assertTrue(db.enable_encryption(), True)
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key, password), val)
db.close()
def test_K2hash_enable_history(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
self.assertTrue(db.enable_history(), True)
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
db.close()
def test_K2hash_enable_mtime(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
self.assertTrue(db.enable_mtime(), True)
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
db.close()
def test_K2hash_get_attributes(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
attr_key = "attrkey1"
attr_val = "attrval1"
attrs = {attr_key: attr_val}
self.assertTrue(db.set_attribute(key, attr_key, attr_val), True)
self.assertTrue(db.get_attributes(key), attrs)
db.close()
def test_K2hash_handle(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(db.handle != k2hash.K2hash.K2H_INVALID_HANDLE)
db.close()
def test_K2hash_get_subkeys(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
subkey = "subkey"
subval = "subval"
self.assertTrue(db.set(subkey, subval), True)
self.assertTrue(db.get(subkey), subval)
self.assertTrue(db.add_subkey(key, subkey, subval), True)
self.assertTrue(db.get_subkeys(key) == [subkey])
db.close()
def test_K2hash_get_tx_file_fd(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
k2h_tx_log = "test.log"
self.assertTrue(db.begin_tx(k2h_tx_log), True)
self.assertTrue(db.get_tx_file_fd() != k2hash.K2hash.K2H_INVALID_HANDLE)
db.close()
def test_K2hash_get_tx_pool_size(self):
self.assertTrue(k2hash.K2hash.get_tx_pool_size() == 0)
def test_K2hash_libk2hash(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(isinstance(db.libk2hash, ctypes.CDLL))
db.close()
def test_K2hash_libc(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(isinstance(db.libc, ctypes.CDLL))
db.close()
def test_K2hash_load_from_file(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
k2h_file = "test.k2h"
self.assertTrue(db.dump_to_file(k2h_file), val)
db.close()
db = None
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
self.assertTrue(db.load_from_file(k2h_file), val)
self.assertTrue(db.get(key), val)
db.close()
def test_K2hash_print_attribute_plugins(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(db.print_attribute_plugins(), True)
db.close()
def test_K2hash_print_attributes(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(db.print_attributes(), True)
db.close()
def test_K2hash_print_data_stats(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(db.print_data_stats(), True)
db.close()
def test_K2hash_print_table_stats(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(db.print_table_stats(), True)
db.close()
def test_K2hash_remove(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(db.remove(key), True)
self.assertTrue(db.get(key) == "")
db.close()
def test_K2hash_remove_subkeys(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
subkey = "subkey"
subval = "subval"
self.assertTrue(db.set(subkey, subval), True)
self.assertTrue(db.get(subkey), subval)
self.assertTrue(db.add_subkey(key, subkey, subval), True)
self.assertTrue(db.get_subkeys(key) == [subkey])
self.assertTrue(db.remove_subkeys(key, [subkey]), True)
self.assertTrue(db.get_subkeys(key) == [])
self.assertTrue(db.get(subkey) == "")
db.close()
def test_K2hash_rename(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
newkey = key[::-1]
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertTrue(db.rename(key, newkey), val)
self.assertTrue(db.get(newkey), val)
db.close()
def test_K2hash_set_attribute(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
attr_key = "attrkey1"
attr_val = "attrval1"
attrs = {attr_key: attr_val}
self.assertTrue(db.set_attribute(key, attr_key, attr_val), True)
self.assertTrue(db.get_attributes(key), attrs)
db.close()
def test_K2hash_set_encryption_password_file(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
password_file="password.txt"
password = "secretstring"
import os
with open(password_file, 'w') as f:
print("{}".format(password), file=f)
self.assertTrue(db.set_encryption_password_file(password_file), True)
db.close()
def test_K2hash_set_expiration_duration(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
duration = 3
self.assertTrue(db.set_expiration_duration(duration), True)
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
time.sleep(duration + 1)
self.assertTrue(db.get(key) == "")
db.close()
def test_K2hash_set_log_level(self):
db = k2hash.K2hash()
db.set_log_level(k2hash.LogLevel.ERROR)
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
db.set_log_level(k2hash.LogLevel.WARNING)
db.close()
def test_K2hash_set_subkeys(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
subkey = "subkey"
subval = "subval"
self.assertTrue(db.set_subkeys(key, {subkey: subval}), True)
self.assertTrue(db.get_subkeys(key) == [subkey])
db.close()
def test_K2hash_set_tx_pool_size(self):
self.assertTrue(k2hash.K2hash.set_tx_pool_size(1), True)
self.assertTrue(k2hash.K2hash.get_tx_pool_size() == 1)
def test_K2hash_stop_tx(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
k2h_tx_log = "test.log"
self.assertTrue(db.begin_tx(k2h_tx_log), True)
self.assertTrue(db.get_tx_file_fd() != k2hash.K2hash.K2H_INVALID_HANDLE)
self.assertTrue(db.stop_tx(), True)
db.close()
def test_K2hash_repr(self):
db = k2hash.K2hash()
self.assertTrue(isinstance(db, k2hash.K2hash))
key = "hello"
val = "world"
self.assertTrue(db.set(key, val), True)
self.assertTrue(db.get(key), val)
self.assertRegex(repr(db), "<_K2hash _k2hfile=.*")
db.close()
def test_K2hash_version(self):
self.assertTrue(k2hash.K2hash.version() == None)
if __name__ == '__main__':
unittest.main()
| true | true |
f73184b337ca7e743751238f2a113e71bb8c75e0 | 7,364 | py | Python | tests/rundb/test_sqldb.py | EdmondIguazio/mlrun | e63b34a610788ebe522ce7a46642e26927e39882 | [
"Apache-2.0"
] | null | null | null | tests/rundb/test_sqldb.py | EdmondIguazio/mlrun | e63b34a610788ebe522ce7a46642e26927e39882 | [
"Apache-2.0"
] | null | null | null | tests/rundb/test_sqldb.py | EdmondIguazio/mlrun | e63b34a610788ebe522ce7a46642e26927e39882 | [
"Apache-2.0"
] | 1 | 2021-05-05T14:19:46.000Z | 2021-05-05T14:19:46.000Z | # Copyright 2019 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQLDB specific tests, common tests should be in test_dbs.py"""
from collections import defaultdict
from contextlib import contextmanager
from datetime import datetime, timedelta
from unittest.mock import Mock
import pytest
from sqlalchemy.orm import Session
from mlrun.api.db.sqldb.db import SQLDB
from mlrun.api.db.sqldb.models import _tagged
from tests.conftest import new_run
@contextmanager
def patch(obj, **kw):
old = {}
for k, v in kw.items():
old[k] = getattr(obj, k)
setattr(obj, k, v)
try:
yield obj
finally:
for k, v in old.items():
setattr(obj, k, v)
def test_list_artifact_tags(db: SQLDB, db_session: Session):
db.store_artifact(db_session, "k1", {}, "1", tag="t1", project="p1")
db.store_artifact(db_session, "k1", {}, "2", tag="t2", project="p1")
db.store_artifact(db_session, "k1", {}, "2", tag="t2", project="p2")
tags = db.list_artifact_tags(db_session, "p1")
assert {"t1", "t2"} == set(tags), "bad tags"
def test_list_artifact_date(db: SQLDB, db_session: Session):
t1 = datetime(2020, 2, 16)
t2 = t1 - timedelta(days=7)
t3 = t2 - timedelta(days=7)
prj = "p7"
db.store_artifact(db_session, "k1", {"updated": t1}, "u1", project=prj)
db.store_artifact(db_session, "k2", {"updated": t2}, "u2", project=prj)
db.store_artifact(db_session, "k3", {"updated": t3}, "u3", project=prj)
arts = db.list_artifacts(db_session, project=prj, since=t3, tag="*")
assert 3 == len(arts), "since t3"
arts = db.list_artifacts(db_session, project=prj, since=t2, tag="*")
assert 2 == len(arts), "since t2"
arts = db.list_artifacts(
db_session, project=prj, since=t1 + timedelta(days=1), tag="*"
)
assert not arts, "since t1+"
arts = db.list_artifacts(db_session, project=prj, until=t2, tag="*")
assert 2 == len(arts), "until t2"
arts = db.list_artifacts(db_session, project=prj, since=t2, until=t2, tag="*")
assert 1 == len(arts), "since/until t2"
def test_list_projects(db: SQLDB, db_session: Session):
for i in range(10):
run = new_run("s1", {"l1": "v1", "l2": "v2"}, x=1)
db.store_run(db_session, run, "u7", project=f"prj{i % 3}", iter=i)
assert {"prj0", "prj1", "prj2"} == {p.name for p in db.list_projects(db_session)}
def test_run_iter0(db: SQLDB, db_session: Session):
uid, prj = "uid39", "lemon"
run = new_run("s1", {"l1": "v1", "l2": "v2"}, x=1)
for i in range(7):
db.store_run(db_session, run, uid, prj, i)
db._get_run(db_session, uid, prj, 0) # See issue 140
def test_artifacts_latest(db: SQLDB, db_session: Session):
k1, u1, art1 = "k1", "u1", {"a": 1}
prj = "p38"
db.store_artifact(db_session, k1, art1, u1, project=prj)
arts = db.list_artifacts(db_session, project=prj, tag="latest")
assert art1["a"] == arts[0]["a"], "bad artifact"
u2, art2 = "u2", {"a": 17}
db.store_artifact(db_session, k1, art2, u2, project=prj)
arts = db.list_artifacts(db_session, project=prj, tag="latest")
assert 2 == len(arts), "count"
assert art2["a"] == arts[1]["a"], "bad artifact"
k2, u3, art3 = "k2", "u3", {"a": 99}
db.store_artifact(db_session, k2, art3, u3, project=prj)
arts = db.list_artifacts(db_session, project=prj, tag="latest")
assert 3 == len(arts), "number"
assert {1, 17, 99} == set(art["a"] for art in arts), "latest"
@pytest.mark.parametrize("cls", _tagged)
def test_tags(db: SQLDB, db_session: Session, cls):
p1, n1 = "prj1", "name1"
obj1, obj2, obj3 = cls(), cls(), cls()
db_session.add(obj1)
db_session.add(obj2)
db_session.add(obj3)
db_session.commit()
db.tag_objects(db_session, [obj1, obj2], p1, n1)
objs = db.find_tagged(db_session, p1, n1)
assert {obj1, obj2} == set(objs), "find tags"
db.del_tag(db_session, p1, n1)
objs = db.find_tagged(db_session, p1, n1)
assert [] == objs, "find tags after del"
def _tag_objs(db: SQLDB, db_session: Session, count, project, tags):
by_tag = defaultdict(list)
for i in range(count):
cls = _tagged[i % len(_tagged)]
obj = cls()
by_tag[tags[i % len(tags)]].append(obj)
db_session.add(obj)
db_session.commit()
for tag, objs in by_tag.items():
db.tag_objects(db_session, objs, project, tag)
def test_list_tags(db: SQLDB, db_session: Session):
p1, tags1 = "prj1", ["a", "b", "c"]
_tag_objs(db, db_session, 17, p1, tags1)
p2, tags2 = "prj2", ["b", "c", "d", "e"]
_tag_objs(db, db_session, 11, p2, tags2)
tags = db.list_tags(db_session, p1)
assert set(tags) == set(tags1), "tags"
def test_projects(db: SQLDB, db_session: Session):
prj1 = {
"name": "p1",
"description": "banana",
# 'users': ['u1', 'u2'],
"spec": {"company": "ACME"},
"state": "active",
"created": datetime.now(),
}
pid1 = db.add_project(db_session, prj1)
p1 = db.get_project(db_session, project_id=pid1)
assert p1, f"project {pid1} not found"
out = {
"name": p1.name,
"description": p1.description,
# 'users': sorted(u.name for u in p1.users),
"spec": p1.spec,
"state": p1.state,
"created": p1.created,
}
assert prj1 == out, "bad project"
data = {"description": "lemon"}
db.update_project(db_session, p1.name, data)
p1 = db.get_project(db_session, project_id=pid1)
assert data["description"] == p1.description, "bad update"
prj2 = {"name": "p2"}
db.add_project(db_session, prj2)
prjs = {p.name for p in db.list_projects(db_session)}
assert {prj1["name"], prj2["name"]} == prjs, "list"
def test_cache_projects(db: SQLDB, db_session: Session):
assert 0 == len(db._projects), "empty cache"
name = "prj348"
db.add_project(db_session, {"name": name})
assert {name} == db._projects, "project"
mock = Mock()
with patch(db, add_project=mock):
db._ensure_project(db_session, name)
mock.assert_not_called()
mock = Mock()
with patch(db, add_project=mock):
db._ensure_project(db_session, name + "-new")
mock.assert_called_once()
project_2_name = "project-2"
db.add_project(db_session, {"name": project_2_name})
db._projects = set()
mock = Mock()
with patch(db, add_project=mock):
db._ensure_project(db_session, name)
mock.assert_not_called()
# def test_function_latest(db: SQLDB, db_session: Session):
# fn1, t1 = {'x': 1}, 'u83'
# fn2, t2 = {'x': 2}, 'u23'
# prj, name = 'p388', 'n3023'
# db.store_function(db_session, fn1, name, prj, t1)
# db.store_function(db_session, fn2, name, prj, t2)
#
# fn = db.get_function(db_session, name, prj, 'latest')
# assert fn2 == fn, 'latest'
| 33.022422 | 85 | 0.629957 |
from collections import defaultdict
from contextlib import contextmanager
from datetime import datetime, timedelta
from unittest.mock import Mock
import pytest
from sqlalchemy.orm import Session
from mlrun.api.db.sqldb.db import SQLDB
from mlrun.api.db.sqldb.models import _tagged
from tests.conftest import new_run
@contextmanager
def patch(obj, **kw):
old = {}
for k, v in kw.items():
old[k] = getattr(obj, k)
setattr(obj, k, v)
try:
yield obj
finally:
for k, v in old.items():
setattr(obj, k, v)
def test_list_artifact_tags(db: SQLDB, db_session: Session):
db.store_artifact(db_session, "k1", {}, "1", tag="t1", project="p1")
db.store_artifact(db_session, "k1", {}, "2", tag="t2", project="p1")
db.store_artifact(db_session, "k1", {}, "2", tag="t2", project="p2")
tags = db.list_artifact_tags(db_session, "p1")
assert {"t1", "t2"} == set(tags), "bad tags"
def test_list_artifact_date(db: SQLDB, db_session: Session):
t1 = datetime(2020, 2, 16)
t2 = t1 - timedelta(days=7)
t3 = t2 - timedelta(days=7)
prj = "p7"
db.store_artifact(db_session, "k1", {"updated": t1}, "u1", project=prj)
db.store_artifact(db_session, "k2", {"updated": t2}, "u2", project=prj)
db.store_artifact(db_session, "k3", {"updated": t3}, "u3", project=prj)
arts = db.list_artifacts(db_session, project=prj, since=t3, tag="*")
assert 3 == len(arts), "since t3"
arts = db.list_artifacts(db_session, project=prj, since=t2, tag="*")
assert 2 == len(arts), "since t2"
arts = db.list_artifacts(
db_session, project=prj, since=t1 + timedelta(days=1), tag="*"
)
assert not arts, "since t1+"
arts = db.list_artifacts(db_session, project=prj, until=t2, tag="*")
assert 2 == len(arts), "until t2"
arts = db.list_artifacts(db_session, project=prj, since=t2, until=t2, tag="*")
assert 1 == len(arts), "since/until t2"
def test_list_projects(db: SQLDB, db_session: Session):
for i in range(10):
run = new_run("s1", {"l1": "v1", "l2": "v2"}, x=1)
db.store_run(db_session, run, "u7", project=f"prj{i % 3}", iter=i)
assert {"prj0", "prj1", "prj2"} == {p.name for p in db.list_projects(db_session)}
def test_run_iter0(db: SQLDB, db_session: Session):
uid, prj = "uid39", "lemon"
run = new_run("s1", {"l1": "v1", "l2": "v2"}, x=1)
for i in range(7):
db.store_run(db_session, run, uid, prj, i)
db._get_run(db_session, uid, prj, 0)
def test_artifacts_latest(db: SQLDB, db_session: Session):
k1, u1, art1 = "k1", "u1", {"a": 1}
prj = "p38"
db.store_artifact(db_session, k1, art1, u1, project=prj)
arts = db.list_artifacts(db_session, project=prj, tag="latest")
assert art1["a"] == arts[0]["a"], "bad artifact"
u2, art2 = "u2", {"a": 17}
db.store_artifact(db_session, k1, art2, u2, project=prj)
arts = db.list_artifacts(db_session, project=prj, tag="latest")
assert 2 == len(arts), "count"
assert art2["a"] == arts[1]["a"], "bad artifact"
k2, u3, art3 = "k2", "u3", {"a": 99}
db.store_artifact(db_session, k2, art3, u3, project=prj)
arts = db.list_artifacts(db_session, project=prj, tag="latest")
assert 3 == len(arts), "number"
assert {1, 17, 99} == set(art["a"] for art in arts), "latest"
@pytest.mark.parametrize("cls", _tagged)
def test_tags(db: SQLDB, db_session: Session, cls):
p1, n1 = "prj1", "name1"
obj1, obj2, obj3 = cls(), cls(), cls()
db_session.add(obj1)
db_session.add(obj2)
db_session.add(obj3)
db_session.commit()
db.tag_objects(db_session, [obj1, obj2], p1, n1)
objs = db.find_tagged(db_session, p1, n1)
assert {obj1, obj2} == set(objs), "find tags"
db.del_tag(db_session, p1, n1)
objs = db.find_tagged(db_session, p1, n1)
assert [] == objs, "find tags after del"
def _tag_objs(db: SQLDB, db_session: Session, count, project, tags):
by_tag = defaultdict(list)
for i in range(count):
cls = _tagged[i % len(_tagged)]
obj = cls()
by_tag[tags[i % len(tags)]].append(obj)
db_session.add(obj)
db_session.commit()
for tag, objs in by_tag.items():
db.tag_objects(db_session, objs, project, tag)
def test_list_tags(db: SQLDB, db_session: Session):
p1, tags1 = "prj1", ["a", "b", "c"]
_tag_objs(db, db_session, 17, p1, tags1)
p2, tags2 = "prj2", ["b", "c", "d", "e"]
_tag_objs(db, db_session, 11, p2, tags2)
tags = db.list_tags(db_session, p1)
assert set(tags) == set(tags1), "tags"
def test_projects(db: SQLDB, db_session: Session):
prj1 = {
"name": "p1",
"description": "banana",
"spec": {"company": "ACME"},
"state": "active",
"created": datetime.now(),
}
pid1 = db.add_project(db_session, prj1)
p1 = db.get_project(db_session, project_id=pid1)
assert p1, f"project {pid1} not found"
out = {
"name": p1.name,
"description": p1.description,
"spec": p1.spec,
"state": p1.state,
"created": p1.created,
}
assert prj1 == out, "bad project"
data = {"description": "lemon"}
db.update_project(db_session, p1.name, data)
p1 = db.get_project(db_session, project_id=pid1)
assert data["description"] == p1.description, "bad update"
prj2 = {"name": "p2"}
db.add_project(db_session, prj2)
prjs = {p.name for p in db.list_projects(db_session)}
assert {prj1["name"], prj2["name"]} == prjs, "list"
def test_cache_projects(db: SQLDB, db_session: Session):
assert 0 == len(db._projects), "empty cache"
name = "prj348"
db.add_project(db_session, {"name": name})
assert {name} == db._projects, "project"
mock = Mock()
with patch(db, add_project=mock):
db._ensure_project(db_session, name)
mock.assert_not_called()
mock = Mock()
with patch(db, add_project=mock):
db._ensure_project(db_session, name + "-new")
mock.assert_called_once()
project_2_name = "project-2"
db.add_project(db_session, {"name": project_2_name})
db._projects = set()
mock = Mock()
with patch(db, add_project=mock):
db._ensure_project(db_session, name)
mock.assert_not_called()
| true | true |
f73184ce4006c41fecce4252dad30c23fe8ec11b | 974 | py | Python | pictures/iptc1.py | digcat/cmistest | f8718e09077b741cb885b4335719a99923c8b330 | [
"MIT"
] | null | null | null | pictures/iptc1.py | digcat/cmistest | f8718e09077b741cb885b4335719a99923c8b330 | [
"MIT"
] | null | null | null | pictures/iptc1.py | digcat/cmistest | f8718e09077b741cb885b4335719a99923c8b330 | [
"MIT"
] | null | null | null | from iptcinfo import IPTCInfo
import sys
fn = (len(sys.argv) > 1 and [sys.argv[1]] or ['test.jpg'])[0]
fn2 = (len(sys.argv) > 2 and [sys.argv[2]] or ['test_out.jpg'])[0]
# Create new info object
info = IPTCInfo(fn)
# Check if file had IPTC data
if len(info.data) < 4: raise Exception(info.error)
# Print list of keywords, supplemental categories, contacts
print "KEYWORDS"
print info.keywords
print "SUPPLEMENTAL"
print info.supplementalCategories
print "CONTACTS"
print info.contacts
print "DATA"
print info.data
# Get specific attributes...
caption = info.data['caption/abstract']
# Create object for file that may does have IPTC data.
# info = IPTCInfo(fn)
# for files without IPTC data, use
info = IPTCInfo(fn, force=True)
# Add/change an attribute
info.data['caption/abstract'] = 'Witty caption here'
info.data['supplemental category'] = ['portrait']
try:
info.save()
info.saveAs(fn2)
print IPTCInfo(fn2)
except:
print "Couldnt Update the file"
| 23.756098 | 66 | 0.719713 | from iptcinfo import IPTCInfo
import sys
fn = (len(sys.argv) > 1 and [sys.argv[1]] or ['test.jpg'])[0]
fn2 = (len(sys.argv) > 2 and [sys.argv[2]] or ['test_out.jpg'])[0]
info = IPTCInfo(fn)
if len(info.data) < 4: raise Exception(info.error)
print "KEYWORDS"
print info.keywords
print "SUPPLEMENTAL"
print info.supplementalCategories
print "CONTACTS"
print info.contacts
print "DATA"
print info.data
caption = info.data['caption/abstract']
info = IPTCInfo(fn, force=True)
info.data['caption/abstract'] = 'Witty caption here'
info.data['supplemental category'] = ['portrait']
try:
info.save()
info.saveAs(fn2)
print IPTCInfo(fn2)
except:
print "Couldnt Update the file"
| false | true |
f731851b0bbc67a1b6af6e831923eba84d6c28aa | 10,964 | py | Python | rustplus/api/rust_api.py | olijeffers0n/rustplus.py | 0ecaaecc345848b47abe27bc1fd8b7ef7aebfd23 | [
"MIT"
] | 25 | 2021-05-03T11:08:55.000Z | 2022-03-14T00:56:50.000Z | rustplus/api/rust_api.py | olijeffers0n/rustplus.py | 0ecaaecc345848b47abe27bc1fd8b7ef7aebfd23 | [
"MIT"
] | 9 | 2021-06-15T10:38:42.000Z | 2022-03-26T11:45:03.000Z | rustplus/api/rust_api.py | olijeffers0n/rustplus.py | 0ecaaecc345848b47abe27bc1fd8b7ef7aebfd23 | [
"MIT"
] | 13 | 2021-06-08T18:35:17.000Z | 2022-03-26T00:44:08.000Z | import asyncio
from asyncio.futures import Future
from typing import List
from PIL import Image
from io import BytesIO
from importlib import resources
from datetime import datetime
from collections import defaultdict
from .base_rust_api import BaseRustSocket
from .structures import RustTime, RustInfo, RustMap, RustMarker, RustChatMessage, RustTeamInfo, RustTeamMember, RustTeamNote, RustEntityInfo, RustContents, RustItem
from .remote.rustplus_pb2 import *
from .remote import HeartBeat
from ..commands import CommandOptions
from ..exceptions import *
from ..utils import *
class RustSocket(BaseRustSocket):
def __init__(self, ip: str = None, port: str = None, steamid: int = None, playertoken: int = None, command_options : CommandOptions = None, raise_ratelimit_exception : bool = True, ratelimit_limit : int = 25, ratelimit_refill : int = 3) -> None:
super().__init__(ip=ip, port=port, steamid=steamid, playertoken=playertoken, command_options=command_options, raise_ratelimit_exception=raise_ratelimit_exception, ratelimit_limit=ratelimit_limit, ratelimit_refill=ratelimit_refill, heartbeat=HeartBeat(self))
def entity_event(self, eid):
"""
Decorator to register a smart device listener
"""
def wrap_func(coro):
def entity_event_callback(future : Future):
try:
entity_info : RustEntityInfo = future.result()
self.remote.event_handler.register_event(eid, (coro, loop, entity_info.type))
except:
raise SmartDeviceRegistrationError("Not Found")
loop = asyncio.get_event_loop()
future = asyncio.run_coroutine_threadsafe(self.get_entity_info(eid), loop)
future.add_done_callback(entity_event_callback)
return wrap_func
async def get_time(self) -> RustTime:
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getTime.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
response = await self.remote.get_response(app_request.seq, app_request)
return format_time(response)
async def send_team_message(self, message: str) -> None:
await self._handle_ratelimit(2)
app_send_message = AppSendMessage()
app_send_message.message = message
app_request = self._generate_protobuf()
app_request.sendTeamMessage.CopyFrom(app_send_message)
self.remote.ignored_responses.append(app_request.seq)
await self.remote.send_message(app_request)
async def get_info(self) -> RustInfo:
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getInfo.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
response = await self.remote.get_response(app_request.seq, app_request)
return RustInfo(response.response.info)
async def get_team_chat(self) -> List[RustChatMessage]:
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getTeamChat.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
messages = (await self.remote.get_response(app_request.seq, app_request)).response.teamChat.messages
return [RustChatMessage(message) for message in messages]
async def get_team_info(self):
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getTeamInfo.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
return RustTeamInfo(app_message.response.teamInfo)
async def get_markers(self) -> List[RustMarker]:
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getMapMarkers.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
return [RustMarker(marker) for marker in app_message.response.mapMarkers.markers]
async def get_raw_map_data(self) -> RustMap:
await self._handle_ratelimit(5)
app_request = self._generate_protobuf()
app_request.getMap.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
return RustMap(app_message.response.map)
async def get_map(self, add_icons: bool = False, add_events: bool = False, add_vending_machines: bool = False, override_images: dict = {}) -> Image:
MAPSIZE = int((await self.get_info()).size)
await self._handle_ratelimit(5 + 1 if [add_icons, add_events, add_vending_machines].count(True) >= 1 else 0)
app_request = self._generate_protobuf()
app_request.getMap.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
map = app_message.response.map
monuments = list(map.monuments)
try:
image = Image.open(BytesIO(map.jpgImage))
except:
raise ImageError("Invalid bytes for the image")
image = image.crop((500,500,map.height-500,map.width-500))
map = image.resize((MAPSIZE,MAPSIZE), Image.ANTIALIAS)
if add_icons or add_events or add_vending_machines:
mapMarkers = await self.get_markers()
if add_icons:
for monument in monuments:
if str(monument.token) == "DungeonBase":
continue
icon = convert_monument(monument.token, override_images)
if monument.token in override_images:
icon = icon.resize((150, 150))
if str(monument.token) == "train_tunnel_display_name":
icon = icon.resize((100, 125))
map.paste(icon, (format_cood(int(monument.x), int(monument.y), MAPSIZE)), icon)
if add_vending_machines:
with resources.path("rustplus.api.icons", "vending_machine.png") as path:
vendingMachine = Image.open(path).convert("RGBA")
vendingMachine = vendingMachine.resize((100, 100))
for marker in mapMarkers:
if add_events:
if marker.type == 2 or marker.type == 4 or marker.type == 5 or marker.type == 6:
icon = convert_marker(str(marker.type), marker.rotation)
if marker.type == 6:
x = marker.x
y = marker.y
if y > MAPSIZE: y = MAPSIZE
if y < 0: y = 100
if x > MAPSIZE: x = MAPSIZE - 75
if x < 0: x = 50
map.paste(icon, (int(x), MAPSIZE - int(y)), icon)
else:
map.paste(icon, (format_cood(int(marker.x), int(marker.y), MAPSIZE)), icon)
if add_vending_machines and marker.type == 3:
map.paste(vendingMachine, (int(marker.x) - 50, MAPSIZE - int(marker.y) - 50), vendingMachine)
return map.resize((2000, 2000), Image.ANTIALIAS)
async def get_entity_info(self, eid: int = None) -> RustEntityInfo:
await self._handle_ratelimit()
if eid is None:
raise ValueError("EID cannot be None")
app_request = self._generate_protobuf()
app_request.entityId = eid
app_request.getEntityInfo.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
return RustEntityInfo(app_message.response.entityInfo)
async def _update_smart_device(self, eid : int, value : bool) -> None:
await self._handle_ratelimit()
entityValue = AppSetEntityValue()
entityValue.value = value
app_request = self._generate_protobuf()
app_request.entityId = eid
app_request.setEntityValue.CopyFrom(entityValue)
self.remote.ignored_responses.append(app_request.seq)
await self.remote.send_message(app_request)
async def turn_on_smart_switch(self, eid: int = None) -> None:
if eid is None:
raise ValueError("EID cannot be None")
await self._update_smart_device(eid, True)
async def turn_off_smart_switch(self, eid: int = None) -> None:
if eid is None:
raise ValueError("EID cannot be None")
await self._update_smart_device(eid, False)
async def promote_to_team_leader(self, steamid: int = None) -> None:
if steamid is None:
raise ValueError("SteamID cannot be None")
await self._handle_ratelimit()
leaderPacket = AppPromoteToLeader()
leaderPacket.steamId = steamid
app_request = self._generate_protobuf()
app_request.promoteToLeader.CopyFrom(leaderPacket)
self.remote.ignored_responses.append(app_request.seq)
await self.remote.send_message(app_request)
async def get_current_events(self) -> List[RustMarker]:
return [marker for marker in (await self.get_markers()) if marker.type == 2 or marker.type == 4 or marker.type == 5 or marker.type == 6]
async def get_tc_storage_contents(self, eid: int = None, combine_stacks: bool = False) -> RustContents:
if eid is None:
raise ValueError("EID cannot be None")
returnedData = await self.get_entity_info(eid)
targetTime = datetime.utcfromtimestamp(int(returnedData.protectionExpiry))
difference = targetTime - datetime.utcnow()
items = []
for item in returnedData.items:
items.append(RustItem(translate_id_to_stack(item.itemId), item.itemId, item.quantity, item.itemIsBlueprint))
if combine_stacks:
mergedMap = defaultdict(tuple)
for item in items:
data = mergedMap[str(item.itemId)]
if data:
count = int(data[0]) + int(item.quantity)
mergedMap[str(item.itemId)] = (count, bool(item.isBlueprint))
else:
mergedMap[str(item.itemId)] = (int(item.quantity), bool(item.isBlueprint))
items = []
for key in mergedMap.keys():
items.append(RustItem(translate_id_to_stack(key), key, int(mergedMap[key][0]), bool(mergedMap[key][1])))
return RustContents(difference, bool(returnedData.hasProtection), items)
| 36.915825 | 265 | 0.639639 | import asyncio
from asyncio.futures import Future
from typing import List
from PIL import Image
from io import BytesIO
from importlib import resources
from datetime import datetime
from collections import defaultdict
from .base_rust_api import BaseRustSocket
from .structures import RustTime, RustInfo, RustMap, RustMarker, RustChatMessage, RustTeamInfo, RustTeamMember, RustTeamNote, RustEntityInfo, RustContents, RustItem
from .remote.rustplus_pb2 import *
from .remote import HeartBeat
from ..commands import CommandOptions
from ..exceptions import *
from ..utils import *
class RustSocket(BaseRustSocket):
def __init__(self, ip: str = None, port: str = None, steamid: int = None, playertoken: int = None, command_options : CommandOptions = None, raise_ratelimit_exception : bool = True, ratelimit_limit : int = 25, ratelimit_refill : int = 3) -> None:
super().__init__(ip=ip, port=port, steamid=steamid, playertoken=playertoken, command_options=command_options, raise_ratelimit_exception=raise_ratelimit_exception, ratelimit_limit=ratelimit_limit, ratelimit_refill=ratelimit_refill, heartbeat=HeartBeat(self))
def entity_event(self, eid):
def wrap_func(coro):
def entity_event_callback(future : Future):
try:
entity_info : RustEntityInfo = future.result()
self.remote.event_handler.register_event(eid, (coro, loop, entity_info.type))
except:
raise SmartDeviceRegistrationError("Not Found")
loop = asyncio.get_event_loop()
future = asyncio.run_coroutine_threadsafe(self.get_entity_info(eid), loop)
future.add_done_callback(entity_event_callback)
return wrap_func
async def get_time(self) -> RustTime:
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getTime.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
response = await self.remote.get_response(app_request.seq, app_request)
return format_time(response)
async def send_team_message(self, message: str) -> None:
await self._handle_ratelimit(2)
app_send_message = AppSendMessage()
app_send_message.message = message
app_request = self._generate_protobuf()
app_request.sendTeamMessage.CopyFrom(app_send_message)
self.remote.ignored_responses.append(app_request.seq)
await self.remote.send_message(app_request)
async def get_info(self) -> RustInfo:
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getInfo.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
response = await self.remote.get_response(app_request.seq, app_request)
return RustInfo(response.response.info)
async def get_team_chat(self) -> List[RustChatMessage]:
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getTeamChat.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
messages = (await self.remote.get_response(app_request.seq, app_request)).response.teamChat.messages
return [RustChatMessage(message) for message in messages]
async def get_team_info(self):
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getTeamInfo.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
return RustTeamInfo(app_message.response.teamInfo)
async def get_markers(self) -> List[RustMarker]:
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getMapMarkers.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
return [RustMarker(marker) for marker in app_message.response.mapMarkers.markers]
async def get_raw_map_data(self) -> RustMap:
await self._handle_ratelimit(5)
app_request = self._generate_protobuf()
app_request.getMap.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
return RustMap(app_message.response.map)
async def get_map(self, add_icons: bool = False, add_events: bool = False, add_vending_machines: bool = False, override_images: dict = {}) -> Image:
MAPSIZE = int((await self.get_info()).size)
await self._handle_ratelimit(5 + 1 if [add_icons, add_events, add_vending_machines].count(True) >= 1 else 0)
app_request = self._generate_protobuf()
app_request.getMap.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
map = app_message.response.map
monuments = list(map.monuments)
try:
image = Image.open(BytesIO(map.jpgImage))
except:
raise ImageError("Invalid bytes for the image")
image = image.crop((500,500,map.height-500,map.width-500))
map = image.resize((MAPSIZE,MAPSIZE), Image.ANTIALIAS)
if add_icons or add_events or add_vending_machines:
mapMarkers = await self.get_markers()
if add_icons:
for monument in monuments:
if str(monument.token) == "DungeonBase":
continue
icon = convert_monument(monument.token, override_images)
if monument.token in override_images:
icon = icon.resize((150, 150))
if str(monument.token) == "train_tunnel_display_name":
icon = icon.resize((100, 125))
map.paste(icon, (format_cood(int(monument.x), int(monument.y), MAPSIZE)), icon)
if add_vending_machines:
with resources.path("rustplus.api.icons", "vending_machine.png") as path:
vendingMachine = Image.open(path).convert("RGBA")
vendingMachine = vendingMachine.resize((100, 100))
for marker in mapMarkers:
if add_events:
if marker.type == 2 or marker.type == 4 or marker.type == 5 or marker.type == 6:
icon = convert_marker(str(marker.type), marker.rotation)
if marker.type == 6:
x = marker.x
y = marker.y
if y > MAPSIZE: y = MAPSIZE
if y < 0: y = 100
if x > MAPSIZE: x = MAPSIZE - 75
if x < 0: x = 50
map.paste(icon, (int(x), MAPSIZE - int(y)), icon)
else:
map.paste(icon, (format_cood(int(marker.x), int(marker.y), MAPSIZE)), icon)
if add_vending_machines and marker.type == 3:
map.paste(vendingMachine, (int(marker.x) - 50, MAPSIZE - int(marker.y) - 50), vendingMachine)
return map.resize((2000, 2000), Image.ANTIALIAS)
async def get_entity_info(self, eid: int = None) -> RustEntityInfo:
await self._handle_ratelimit()
if eid is None:
raise ValueError("EID cannot be None")
app_request = self._generate_protobuf()
app_request.entityId = eid
app_request.getEntityInfo.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
return RustEntityInfo(app_message.response.entityInfo)
async def _update_smart_device(self, eid : int, value : bool) -> None:
await self._handle_ratelimit()
entityValue = AppSetEntityValue()
entityValue.value = value
app_request = self._generate_protobuf()
app_request.entityId = eid
app_request.setEntityValue.CopyFrom(entityValue)
self.remote.ignored_responses.append(app_request.seq)
await self.remote.send_message(app_request)
async def turn_on_smart_switch(self, eid: int = None) -> None:
if eid is None:
raise ValueError("EID cannot be None")
await self._update_smart_device(eid, True)
async def turn_off_smart_switch(self, eid: int = None) -> None:
if eid is None:
raise ValueError("EID cannot be None")
await self._update_smart_device(eid, False)
async def promote_to_team_leader(self, steamid: int = None) -> None:
if steamid is None:
raise ValueError("SteamID cannot be None")
await self._handle_ratelimit()
leaderPacket = AppPromoteToLeader()
leaderPacket.steamId = steamid
app_request = self._generate_protobuf()
app_request.promoteToLeader.CopyFrom(leaderPacket)
self.remote.ignored_responses.append(app_request.seq)
await self.remote.send_message(app_request)
async def get_current_events(self) -> List[RustMarker]:
return [marker for marker in (await self.get_markers()) if marker.type == 2 or marker.type == 4 or marker.type == 5 or marker.type == 6]
async def get_tc_storage_contents(self, eid: int = None, combine_stacks: bool = False) -> RustContents:
if eid is None:
raise ValueError("EID cannot be None")
returnedData = await self.get_entity_info(eid)
targetTime = datetime.utcfromtimestamp(int(returnedData.protectionExpiry))
difference = targetTime - datetime.utcnow()
items = []
for item in returnedData.items:
items.append(RustItem(translate_id_to_stack(item.itemId), item.itemId, item.quantity, item.itemIsBlueprint))
if combine_stacks:
mergedMap = defaultdict(tuple)
for item in items:
data = mergedMap[str(item.itemId)]
if data:
count = int(data[0]) + int(item.quantity)
mergedMap[str(item.itemId)] = (count, bool(item.isBlueprint))
else:
mergedMap[str(item.itemId)] = (int(item.quantity), bool(item.isBlueprint))
items = []
for key in mergedMap.keys():
items.append(RustItem(translate_id_to_stack(key), key, int(mergedMap[key][0]), bool(mergedMap[key][1])))
return RustContents(difference, bool(returnedData.hasProtection), items)
| true | true |
f73185ddbd238f0f843033c23cdf25d8077121c5 | 989 | py | Python | tests/sparseml/onnx/test_imports.py | clementpoiret/sparseml | 8442a6ef8ba11fb02f5e51472dd68b72438539b9 | [
"Apache-2.0"
] | 922 | 2021-02-04T17:51:54.000Z | 2022-03-31T20:49:26.000Z | tests/sparseml/onnx/test_imports.py | clementpoiret/sparseml | 8442a6ef8ba11fb02f5e51472dd68b72438539b9 | [
"Apache-2.0"
] | 197 | 2021-02-04T22:17:21.000Z | 2022-03-31T13:58:55.000Z | tests/sparseml/onnx/test_imports.py | clementpoiret/sparseml | 8442a6ef8ba11fb02f5e51472dd68b72438539b9 | [
"Apache-2.0"
] | 80 | 2021-02-04T22:20:14.000Z | 2022-03-30T19:36:15.000Z | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def test_imports():
# flake8: noqa
from sparseml.onnx import (
check_onnx_install,
check_onnxruntime_install,
detect_framework,
framework_info,
is_supported,
onnx,
onnx_err,
onnxruntime,
onnxruntime_err,
require_onnx,
require_onnxruntime,
sparsification_info,
)
| 30.90625 | 76 | 0.695652 |
def test_imports():
from sparseml.onnx import (
check_onnx_install,
check_onnxruntime_install,
detect_framework,
framework_info,
is_supported,
onnx,
onnx_err,
onnxruntime,
onnxruntime_err,
require_onnx,
require_onnxruntime,
sparsification_info,
)
| true | true |
f73186d8775e20db4e181ff54bb6713045cc02fc | 7,745 | py | Python | demo/demo.py | hummat/detectron2 | ef2f4df474b4a07049cada4793392e8e36c3e746 | [
"Apache-2.0"
] | null | null | null | demo/demo.py | hummat/detectron2 | ef2f4df474b4a07049cada4793392e8e36c3e746 | [
"Apache-2.0"
] | null | null | null | demo/demo.py | hummat/detectron2 | ef2f4df474b4a07049cada4793392e8e36c3e746 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import glob
import multiprocessing as mp
import numpy as np
import os
import tempfile
import time
import warnings
import cv2
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from predictor import VisualizationDemo
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_coco_json
path_to_coco_json = "/home/matthias/Data/Ubuntu/data/datasets/porta_filter/front3d/coco_data/coco_annotations.json"
path_to_images = "/home/matthias/Data/Ubuntu/data/datasets/porta_filter/front3d"
# path_to_config_yaml = "/home/matthias/Data/Ubuntu/git/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
DatasetCatalog.register("porta_filter", lambda: load_coco_json(path_to_coco_json, path_to_images))
MetadataCatalog.get("porta_filter").set(thing_classes=["porta filter"], json_file=path_to_coco_json, image_root=path_to_images)
# constants
WINDOW_NAME = "COCO detections"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
# To use demo for Panoptic-DeepLab, please uncomment the following two lines.
# from detectron2.projects.panoptic_deeplab import add_panoptic_deeplab_config # noqa
# add_panoptic_deeplab_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
parser.add_argument(
"--config-file",
default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
demo = VisualizationDemo(cfg)
if args.input:
if len(args.input) == 1:
args.input = glob.glob(os.path.expanduser(args.input[0]))
assert args.input, "The input path(s) was not found"
for path in tqdm.tqdm(args.input, disable=not args.output):
# use PIL, to be consistent with evaluation
img = read_image(path, format="BGR")
start_time = time.time()
predictions, visualized_output = demo.run_on_image(img)
logger.info(
"{}: {} in {:.2f}s".format(
path,
"detected {} instances".format(len(predictions["instances"]))
if "instances" in predictions
else "finished",
time.time() - start_time,
)
)
if args.output:
if os.path.isdir(args.output):
assert os.path.isdir(args.output), args.output
out_filename = os.path.join(args.output, os.path.basename(path))
else:
assert len(args.input) == 1, "Please specify a directory with args.output"
out_filename = args.output
visualized_output.save(out_filename)
else:
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
if cv2.waitKey(0) == 27:
break # esc to quit
elif args.webcam:
assert args.input is None, "Cannot have both --input and --webcam!"
assert args.output is None, "output not yet supported with --webcam!"
cam = cv2.VideoCapture(0)
for vis in tqdm.tqdm(demo.run_on_video(cam)):
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, vis)
if cv2.waitKey(1) == 27:
break # esc to quit
cam.release()
cv2.destroyAllWindows()
elif args.video_input:
video = cv2.VideoCapture(args.video_input)
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
frames_per_second = video.get(cv2.CAP_PROP_FPS)
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
basename = os.path.basename(args.video_input)
codec, file_ext = (
("x264", ".mkv") if test_opencv_video_format("x264", ".mkv") else ("mp4v", ".mp4")
)
if codec == ".mp4v":
warnings.warn("x264 codec not available, switching to mp4v")
if args.output:
if os.path.isdir(args.output):
output_fname = os.path.join(args.output, basename)
output_fname = os.path.splitext(output_fname)[0] + file_ext
else:
output_fname = args.output
assert not os.path.isfile(output_fname), output_fname
output_file = cv2.VideoWriter(
filename=output_fname,
# some installation of opencv may not support x264 (due to its license),
# you can try other format (e.g. MPEG)
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(frames_per_second),
frameSize=(width, height),
isColor=True,
)
assert os.path.isfile(args.video_input)
for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):
if args.output:
output_file.write(vis_frame)
else:
cv2.namedWindow(basename, cv2.WINDOW_NORMAL)
cv2.imshow(basename, vis_frame)
if cv2.waitKey(1) == 27:
break # esc to quit
video.release()
if args.output:
output_file.release()
else:
cv2.destroyAllWindows()
| 39.116162 | 128 | 0.628922 |
import argparse
import glob
import multiprocessing as mp
import numpy as np
import os
import tempfile
import time
import warnings
import cv2
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from predictor import VisualizationDemo
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_coco_json
path_to_coco_json = "/home/matthias/Data/Ubuntu/data/datasets/porta_filter/front3d/coco_data/coco_annotations.json"
path_to_images = "/home/matthias/Data/Ubuntu/data/datasets/porta_filter/front3d"
DatasetCatalog.register("porta_filter", lambda: load_coco_json(path_to_coco_json, path_to_images))
MetadataCatalog.get("porta_filter").set(thing_classes=["porta filter"], json_file=path_to_coco_json, image_root=path_to_images)
WINDOW_NAME = "COCO detections"
def setup_cfg(args):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
parser.add_argument(
"--config-file",
default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
demo = VisualizationDemo(cfg)
if args.input:
if len(args.input) == 1:
args.input = glob.glob(os.path.expanduser(args.input[0]))
assert args.input, "The input path(s) was not found"
for path in tqdm.tqdm(args.input, disable=not args.output):
img = read_image(path, format="BGR")
start_time = time.time()
predictions, visualized_output = demo.run_on_image(img)
logger.info(
"{}: {} in {:.2f}s".format(
path,
"detected {} instances".format(len(predictions["instances"]))
if "instances" in predictions
else "finished",
time.time() - start_time,
)
)
if args.output:
if os.path.isdir(args.output):
assert os.path.isdir(args.output), args.output
out_filename = os.path.join(args.output, os.path.basename(path))
else:
assert len(args.input) == 1, "Please specify a directory with args.output"
out_filename = args.output
visualized_output.save(out_filename)
else:
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
if cv2.waitKey(0) == 27:
break
elif args.webcam:
assert args.input is None, "Cannot have both --input and --webcam!"
assert args.output is None, "output not yet supported with --webcam!"
cam = cv2.VideoCapture(0)
for vis in tqdm.tqdm(demo.run_on_video(cam)):
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, vis)
if cv2.waitKey(1) == 27:
break
cam.release()
cv2.destroyAllWindows()
elif args.video_input:
video = cv2.VideoCapture(args.video_input)
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
frames_per_second = video.get(cv2.CAP_PROP_FPS)
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
basename = os.path.basename(args.video_input)
codec, file_ext = (
("x264", ".mkv") if test_opencv_video_format("x264", ".mkv") else ("mp4v", ".mp4")
)
if codec == ".mp4v":
warnings.warn("x264 codec not available, switching to mp4v")
if args.output:
if os.path.isdir(args.output):
output_fname = os.path.join(args.output, basename)
output_fname = os.path.splitext(output_fname)[0] + file_ext
else:
output_fname = args.output
assert not os.path.isfile(output_fname), output_fname
output_file = cv2.VideoWriter(
filename=output_fname,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(frames_per_second),
frameSize=(width, height),
isColor=True,
)
assert os.path.isfile(args.video_input)
for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):
if args.output:
output_file.write(vis_frame)
else:
cv2.namedWindow(basename, cv2.WINDOW_NORMAL)
cv2.imshow(basename, vis_frame)
if cv2.waitKey(1) == 27:
break
video.release()
if args.output:
output_file.release()
else:
cv2.destroyAllWindows()
| true | true |
f731877388c6e4f2d7121cb542ca1fe6ff75cc62 | 1,466 | py | Python | xcc/util.py | BastianZim/xanadu-cloud-client | 3a2d9d5373d90339a1047ee939bacef1bd4019ed | [
"Apache-2.0"
] | 3 | 2021-11-12T21:44:35.000Z | 2022-02-10T15:07:20.000Z | xcc/util.py | BastianZim/xanadu-cloud-client | 3a2d9d5373d90339a1047ee939bacef1bd4019ed | [
"Apache-2.0"
] | 2 | 2021-11-08T16:39:56.000Z | 2022-01-20T14:47:29.000Z | xcc/util.py | BastianZim/xanadu-cloud-client | 3a2d9d5373d90339a1047ee939bacef1bd4019ed | [
"Apache-2.0"
] | 2 | 2021-12-01T19:07:05.000Z | 2022-01-20T14:26:47.000Z | """
This module contains utilities which are shared between other modules.
"""
from typing import Any, Callable
class cached_property: # pylint: disable=invalid-name
"""Descriptor that transforms a class method into a property whose value is
computed once and then cached for subsequent accesses.
Args:
func (Callable[[Any], Any]): class method whose value should be cached
.. note::
Each class instance is associated with an independent cache.
.. warning::
Unlike ``functools.cached_property``, this descriptor is *not* safe for
concurrent use.
"""
def __init__(self, func: Callable[[Any], Any]) -> None:
self.func = func
self.caches = {}
self.__doc__ = func.__doc__
def __get__(self, instance: Any, _) -> Any:
"""Returns the (cached) value associated with the given instance."""
# Edge case to support getattr() and generate Sphinx documentation.
if instance is None:
return self
if instance not in self.caches:
self.caches[instance] = self.func(instance)
return self.caches[instance]
def __set__(self, instance: Any, value: Any) -> None:
"""Sets the cache of the given instance to the provided value."""
self.caches[instance] = value
def __delete__(self, instance: Any) -> None:
"""Clears the cache of the given instance."""
self.caches.pop(instance, None)
| 31.191489 | 79 | 0.645975 |
from typing import Any, Callable
class cached_property:
def __init__(self, func: Callable[[Any], Any]) -> None:
self.func = func
self.caches = {}
self.__doc__ = func.__doc__
def __get__(self, instance: Any, _) -> Any:
if instance is None:
return self
if instance not in self.caches:
self.caches[instance] = self.func(instance)
return self.caches[instance]
def __set__(self, instance: Any, value: Any) -> None:
self.caches[instance] = value
def __delete__(self, instance: Any) -> None:
self.caches.pop(instance, None)
| true | true |
f73187898e8bb312c2b27b9f5d0d2fa9a2a19d87 | 79,267 | py | Python | flaml/model.py | rserran/FLAML | 7d6822aa40883550e72c4ee24adb765c6e937ce7 | [
"MIT"
] | null | null | null | flaml/model.py | rserran/FLAML | 7d6822aa40883550e72c4ee24adb765c6e937ce7 | [
"MIT"
] | null | null | null | flaml/model.py | rserran/FLAML | 7d6822aa40883550e72c4ee24adb765c6e937ce7 | [
"MIT"
] | null | null | null | # !
# * Copyright (c) FLAML authors. All rights reserved.
# * Licensed under the MIT License. See LICENSE file in the
# * project root for license information.
from contextlib import contextmanager
from functools import partial
import signal
import os
from typing import Callable, List
import numpy as np
import time
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.ensemble import ExtraTreesRegressor, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.dummy import DummyClassifier, DummyRegressor
from scipy.sparse import issparse
import logging
import shutil
from pandas import DataFrame, Series, to_datetime
import sys
import math
from . import tune
from .data import (
group_counts,
CLASSIFICATION,
TS_FORECASTREGRESSION,
TS_TIMESTAMP_COL,
TS_VALUE_COL,
SEQCLASSIFICATION,
SEQREGRESSION,
TOKENCLASSIFICATION,
SUMMARIZATION,
NLG_TASKS,
MULTICHOICECLASSIFICATION,
)
try:
import psutil
except ImportError:
psutil = None
try:
import resource
except ImportError:
resource = None
logger = logging.getLogger("flaml.automl")
FREE_MEM_RATIO = 0.2
def TimeoutHandler(sig, frame):
raise TimeoutError(sig, frame)
@contextmanager
def limit_resource(memory_limit, time_limit):
if memory_limit > 0:
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
if soft < 0 and (hard < 0 or memory_limit <= hard) or memory_limit < soft:
try:
resource.setrlimit(resource.RLIMIT_AS, (int(memory_limit), hard))
except ValueError:
# According to https://bugs.python.org/issue40518, it's a mac-specific error.
pass
main_thread = False
if time_limit is not None:
try:
signal.signal(signal.SIGALRM, TimeoutHandler)
signal.alarm(int(time_limit) or 1)
main_thread = True
except ValueError:
pass
try:
yield
finally:
if main_thread:
signal.alarm(0)
if memory_limit > 0:
resource.setrlimit(resource.RLIMIT_AS, (soft, hard))
class BaseEstimator:
"""The abstract class for all learners.
Typical examples:
* XGBoostEstimator: for regression.
* XGBoostSklearnEstimator: for classification.
* LGBMEstimator, RandomForestEstimator, LRL1Classifier, LRL2Classifier:
for both regression and classification.
"""
def __init__(self, task="binary", **config):
"""Constructor.
Args:
task: A string of the task type, one of
'binary', 'multiclass', 'regression', 'rank', 'seq-classification',
'seq-regression', 'token-classification', 'multichoice-classification',
'summarization', 'ts_forecast', 'ts_forecast_classification'.
config: A dictionary containing the hyperparameter names, 'n_jobs' as keys.
n_jobs is the number of parallel threads.
"""
self._task = task
self.params = self.config2params(config)
self.estimator_class = self._model = None
if "_estimator_type" in config:
self._estimator_type = self.params.pop("_estimator_type")
else:
self._estimator_type = (
"classifier" if task in CLASSIFICATION else "regressor"
)
def get_params(self, deep=False):
params = self.params.copy()
params["task"] = self._task
if hasattr(self, "_estimator_type"):
params["_estimator_type"] = self._estimator_type
return params
@property
def classes_(self):
return self._model.classes_
@property
def n_features_in_(self):
return self._model.n_features_in_
@property
def model(self):
"""Trained model after fit() is called, or None before fit() is called."""
return self._model
@property
def estimator(self):
"""Trained model after fit() is called, or None before fit() is called."""
return self._model
def _preprocess(self, X):
return X
def _fit(self, X_train, y_train, **kwargs):
current_time = time.time()
if "groups" in kwargs:
kwargs = kwargs.copy()
groups = kwargs.pop("groups")
if self._task == "rank":
kwargs["group"] = group_counts(groups)
# groups_val = kwargs.get('groups_val')
# if groups_val is not None:
# kwargs['eval_group'] = [group_counts(groups_val)]
# kwargs['eval_set'] = [
# (kwargs['X_val'], kwargs['y_val'])]
# kwargs['verbose'] = False
# del kwargs['groups_val'], kwargs['X_val'], kwargs['y_val']
X_train = self._preprocess(X_train)
model = self.estimator_class(**self.params)
if logger.level == logging.DEBUG:
# xgboost 1.6 doesn't display all the params in the model str
logger.debug(f"flaml.model - {model} fit started with params {self.params}")
model.fit(X_train, y_train, **kwargs)
if logger.level == logging.DEBUG:
logger.debug(f"flaml.model - {model} fit finished")
train_time = time.time() - current_time
self._model = model
return train_time
def fit(self, X_train, y_train, budget=None, **kwargs):
"""Train the model from given training data.
Args:
X_train: A numpy array or a dataframe of training data in shape n*m.
y_train: A numpy array or a series of labels in shape n*1.
budget: A float of the time budget in seconds.
Returns:
train_time: A float of the training time in seconds.
"""
if (
getattr(self, "limit_resource", None)
and resource is not None
and (budget is not None or psutil is not None)
):
start_time = time.time()
mem = psutil.virtual_memory() if psutil is not None else None
try:
with limit_resource(
mem.available * (1 - FREE_MEM_RATIO)
+ psutil.Process(os.getpid()).memory_info().rss
if mem is not None
else -1,
budget,
):
train_time = self._fit(X_train, y_train, **kwargs)
except (MemoryError, TimeoutError) as e:
logger.warning(f"{e.__class__} {e}")
if self._task in CLASSIFICATION:
model = DummyClassifier()
else:
model = DummyRegressor()
X_train = self._preprocess(X_train)
model.fit(X_train, y_train)
self._model = model
train_time = time.time() - start_time
else:
train_time = self._fit(X_train, y_train, **kwargs)
return train_time
def predict(self, X, **kwargs):
"""Predict label from features.
Args:
X: A numpy array or a dataframe of featurized instances, shape n*m.
Returns:
A numpy array of shape n*1.
Each element is the label for a instance.
"""
if self._model is not None:
X = self._preprocess(X)
return self._model.predict(X)
else:
logger.warning(
"Estimator is not fit yet. Please run fit() before predict()."
)
return np.ones(X.shape[0])
def predict_proba(self, X, **kwargs):
"""Predict the probability of each class from features.
Only works for classification problems
Args:
X: A numpy array of featurized instances, shape n*m.
Returns:
A numpy array of shape n*c. c is the # classes.
Each element at (i,j) is the probability for instance i to be in
class j.
"""
assert self._task in CLASSIFICATION, "predict_proba() only for classification."
X = self._preprocess(X)
return self._model.predict_proba(X)
def score(self, X_val: DataFrame, y_val: Series, **kwargs):
"""Report the evaluation score of a trained estimator.
Args:
X_val: A pandas dataframe of the validation input data.
y_val: A pandas series of the validation label.
kwargs: keyword argument of the evaluation function, for example:
- metric: A string of the metric name or a function
e.g., 'accuracy', 'roc_auc', 'roc_auc_ovr', 'roc_auc_ovo',
'f1', 'micro_f1', 'macro_f1', 'log_loss', 'mae', 'mse', 'r2',
'mape'. Default is 'auto'.
If metric is given, the score will report the user specified metric.
If metric is not given, the metric is set to accuracy for classification and r2
for regression.
You can also pass a customized metric function, for examples on how to pass a
customized metric function, please check
[test/nlp/test_autohf_custom_metric.py](https://github.com/microsoft/FLAML/blob/main/test/nlp/test_autohf_custom_metric.py) and
[test/automl/test_multiclass.py](https://github.com/microsoft/FLAML/blob/main/test/automl/test_multiclass.py).
Returns:
The evaluation score on the validation dataset.
"""
from .ml import metric_loss_score
from .ml import is_min_metric
if self._model is not None:
if self._task == "rank":
raise NotImplementedError(
"AutoML.score() is not implemented for ranking"
)
else:
X_val = self._preprocess(X_val)
metric = kwargs.get("metric", None)
if metric:
y_pred = self.predict(X_val, **kwargs)
if is_min_metric(metric):
return metric_loss_score(metric, y_pred, y_val)
else:
return 1.0 - metric_loss_score(metric, y_pred, y_val)
else:
return self._model.score(X_val, y_val, **kwargs)
else:
logger.warning(
"Estimator is not fit yet. Please run fit() before predict()."
)
return 0.0
def cleanup(self):
del self._model
self._model = None
@classmethod
def search_space(cls, data_size, task, **params):
"""[required method] search space.
Args:
data_size: A tuple of two integers, number of rows and columns.
task: A str of the task type, e.g., "binary", "multiclass", "regression".
Returns:
A dictionary of the search space.
Each key is the name of a hyperparameter, and value is a dict with
its domain (required) and low_cost_init_value, init_value,
cat_hp_cost (if applicable).
e.g., ```{'domain': tune.randint(lower=1, upper=10), 'init_value': 1}```.
"""
return {}
@classmethod
def size(cls, config: dict) -> float:
"""[optional method] memory size of the estimator in bytes.
Args:
config: A dict of the hyperparameter config.
Returns:
A float of the memory size required by the estimator to train the
given config.
"""
return 1.0
@classmethod
def cost_relative2lgbm(cls) -> float:
"""[optional method] relative cost compared to lightgbm."""
return 1.0
@classmethod
def init(cls):
"""[optional method] initialize the class."""
pass
def config2params(self, config: dict) -> dict:
"""[optional method] config dict to params dict
Args:
config: A dict of the hyperparameter config.
Returns:
A dict that will be passed to self.estimator_class's constructor.
"""
params = config.copy()
if "FLAML_sample_size" in params:
params.pop("FLAML_sample_size")
return params
class TransformersEstimator(BaseEstimator):
"""The class for fine-tuning language models, using huggingface transformers API."""
ITER_HP = "global_max_steps"
def __init__(self, task="seq-classification", **config):
super().__init__(task, **config)
import uuid
self.trial_id = str(uuid.uuid1().hex)[:8]
if task not in NLG_TASKS: # TODO: not in NLG_TASKS
from .nlp.huggingface.training_args import (
TrainingArgumentsForAuto as TrainingArguments,
)
else:
from .nlp.huggingface.training_args import (
Seq2SeqTrainingArgumentsForAuto as TrainingArguments,
)
self._TrainingArguments = TrainingArguments
@staticmethod
def _join(X_train, y_train, task):
y_train = DataFrame(y_train, index=X_train.index)
y_train.columns = ["label"] if task != TOKENCLASSIFICATION else ["labels"]
train_df = X_train.join(y_train)
return train_df
@classmethod
def search_space(cls, data_size, task, **params):
search_space_dict = {
"learning_rate": {
"domain": tune.loguniform(lower=1e-6, upper=1e-3),
"init_value": 1e-5,
},
"num_train_epochs": {
"domain": tune.loguniform(lower=0.1, upper=10.0),
"init_value": 3.0, # to be consistent with roberta
},
"per_device_train_batch_size": {
"domain": tune.choice([4, 8, 16, 32]),
"init_value": 32,
},
"warmup_ratio": {
"domain": tune.uniform(lower=0.0, upper=0.3),
"init_value": 0.0,
},
"weight_decay": {
"domain": tune.uniform(lower=0.0, upper=0.3),
"init_value": 0.0,
},
"adam_epsilon": {
"domain": tune.loguniform(lower=1e-8, upper=1e-6),
"init_value": 1e-6,
},
"seed": {"domain": tune.choice(list(range(40, 45))), "init_value": 42},
"global_max_steps": {
"domain": sys.maxsize,
"init_value": sys.maxsize,
},
}
return search_space_dict
@property
def checkpoint_freq(self):
return (
int(
min(self._training_args.num_train_epochs, 1)
* len(self._X_train)
/ self._training_args.per_device_train_batch_size
/ self._training_args.ckpt_per_epoch
)
+ 1
)
@property
def fp16(self):
return self._kwargs.get("gpu_per_trial") and self._training_args.fp16
@property
def no_cuda(self):
return not self._kwargs.get("gpu_per_trial")
def _set_training_args(self, **kwargs):
from .nlp.utils import date_str, Counter
for (key, val) in kwargs.items():
assert key not in self.params, (
"Since {} is in the search space, it cannot exist in 'custom_fit_kwargs' at the same time."
"If you need to fix the value of {} to {}, the only way is to add a single-value domain in the search "
"space by adding:\n '{}': {{ 'domain': {} }} to 'custom_hp'. For example:"
'automl_settings["custom_hp"] = {{ "transformer": {{ "model_path": {{ "domain" : '
'"google/electra-small-discriminator" }} }} }}'.format(
key, key, val, key, val
)
)
"""
If use has specified any custom args for TrainingArguments, update these arguments
"""
self._training_args = self._TrainingArguments(**kwargs)
"""
Update the attributes in TrainingArguments with self.params values
"""
for key, val in self.params.items():
if hasattr(self._training_args, key):
setattr(self._training_args, key, val)
"""
Update the attributes in TrainingArguments that depends on the values of self.params
"""
local_dir = os.path.join(
self._training_args.output_dir, "train_{}".format(date_str())
)
if self._use_ray is True:
import ray
self._training_args.output_dir = ray.tune.get_trial_dir()
else:
self._training_args.output_dir = Counter.get_trial_fold_name(
local_dir, self.params, self.trial_id
)
self._training_args.eval_steps = (
self._training_args.logging_steps
) = self._training_args.saving_steps = self.checkpoint_freq
self._training_args.fp16 = self.fp16
self._training_args.no_cuda = self.no_cuda
def _preprocess(self, X, y=None, **kwargs):
from .nlp.utils import tokenize_text, is_a_list_of_str
is_str = str(X.dtypes[0]) in ("string", "str")
is_list_of_str = is_a_list_of_str(X[list(X.keys())[0]].to_list()[0])
if is_str or is_list_of_str:
return tokenize_text(
X=X,
Y=y,
task=self._task,
hf_args=self._training_args,
tokenizer=self.tokenizer,
)
else:
return X, None
def _model_init(self):
from .nlp.utils import load_model
this_model = load_model(
checkpoint_path=self._training_args.model_path,
task=self._task,
num_labels=self.num_labels,
)
return this_model
def preprocess_data(self, X, y):
from datasets import Dataset
if (self._task not in NLG_TASKS) and (self._task != TOKENCLASSIFICATION):
processed_X, _ = self._preprocess(X=X, **self._kwargs)
processed_y = y
else:
processed_X, processed_y = self._preprocess(X=X, y=y, **self._kwargs)
processed_dataset = Dataset.from_pandas(
TransformersEstimator._join(processed_X, processed_y, self._task)
)
return processed_dataset, processed_X, processed_y
@property
def num_labels(self):
from .data import SEQCLASSIFICATION, SEQREGRESSION, TOKENCLASSIFICATION
if self._task == SEQREGRESSION:
return 1
elif self._task == SEQCLASSIFICATION:
return len(set(self._y_train))
elif self._task == TOKENCLASSIFICATION:
return len(set([a for b in self._y_train.tolist() for a in b]))
else:
return None
@property
def tokenizer(self):
from transformers import AutoTokenizer
if self._task == SUMMARIZATION:
return AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=self._training_args.model_path,
cache_dir=None,
use_fast=True,
revision="main",
use_auth_token=None,
)
else:
return AutoTokenizer.from_pretrained(
self._training_args.model_path,
use_fast=True,
add_prefix_space=True
if "roberta" in self._training_args.model_path
else False, # If roberta model, must set add_prefix_space to True to avoid the assertion error at
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/roberta/tokenization_roberta_fast.py#L249
)
@property
def data_collator(self):
from .nlp.huggingface.data_collator import task_to_datacollator_class
return (
task_to_datacollator_class[self._task](
tokenizer=self.tokenizer,
pad_to_multiple_of=8, # if self._training_args.fp16 else None,
)
if self._task in (MULTICHOICECLASSIFICATION, TOKENCLASSIFICATION)
else None
)
def fit(
self,
X_train: DataFrame,
y_train: Series,
budget=None,
X_val=None,
y_val=None,
gpu_per_trial=None,
metric=None,
**kwargs,
):
import transformers
transformers.logging.set_verbosity_error()
from transformers import TrainerCallback
from transformers.trainer_utils import set_seed
from .nlp.huggingface.trainer import TrainerForAuto
try:
from ray.tune import is_session_enabled
self._use_ray = is_session_enabled()
except ImportError:
self._use_ray = False
this_params = self.params
self._kwargs = kwargs
self._X_train, self._y_train = X_train, y_train
self._set_training_args(**kwargs)
train_dataset, self._X_train, self._y_train = self.preprocess_data(
X_train, y_train
)
if X_val is not None:
eval_dataset, self._X_val, self._y_val = self.preprocess_data(X_val, y_val)
else:
eval_dataset, self._X_val, self._y_val = None, None, None
set_seed(self.params.get("seed", self._training_args.seed))
self._metric = metric
class EarlyStoppingCallbackForAuto(TrainerCallback):
def on_train_begin(self, args, state, control, **callback_kwargs):
self.train_begin_time = time.time()
def on_step_begin(self, args, state, control, **callback_kwargs):
self.step_begin_time = time.time()
def on_step_end(self, args, state, control, **callback_kwargs):
if state.global_step == 1:
self.time_per_iter = time.time() - self.step_begin_time
if (
budget
and (
time.time() + self.time_per_iter
> self.train_begin_time + budget
)
or state.global_step >= this_params[TransformersEstimator.ITER_HP]
):
control.should_training_stop = True
control.should_save = True
control.should_evaluate = True
return control
def on_epoch_end(self, args, state, control, **callback_kwargs):
if (
control.should_training_stop
or state.epoch + 1 >= args.num_train_epochs
):
control.should_save = True
control.should_evaluate = True
self._trainer = TrainerForAuto(
args=self._training_args,
model_init=self._model_init,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
tokenizer=self.tokenizer,
data_collator=self.data_collator,
compute_metrics=self._compute_metrics_by_dataset_name,
callbacks=[EarlyStoppingCallbackForAuto],
)
if self._task in NLG_TASKS:
setattr(self._trainer, "_is_seq2seq", True)
"""
When not using ray for tuning, set the limit of CUDA_VISIBLE_DEVICES to math.ceil(gpu_per_trial),
so each estimator does not see all the GPUs
"""
if gpu_per_trial is not None:
tmp_cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "")
self._trainer.args._n_gpu = gpu_per_trial
# if gpu_per_trial == 0:
# os.environ["CUDA_VISIBLE_DEVICES"] = ""
if tmp_cuda_visible_devices.count(",") != math.ceil(gpu_per_trial) - 1:
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(x) for x in range(math.ceil(gpu_per_trial))]
)
import time
start_time = time.time()
self._trainer.train()
if gpu_per_trial is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = tmp_cuda_visible_devices
self.params[self.ITER_HP] = self._trainer.state.global_step
self._checkpoint_path = self._select_checkpoint(self._trainer)
self._ckpt_remains = list(self._trainer.ckpt_to_metric.keys())
if hasattr(self._trainer, "intermediate_results"):
self.intermediate_results = [
x[1]
for x in sorted(
self._trainer.intermediate_results.items(), key=lambda x: x[0]
)
]
self._trainer = None
return time.time() - start_time
def _delete_one_ckpt(self, ckpt_location):
if self._use_ray is False:
try:
shutil.rmtree(ckpt_location)
except FileNotFoundError:
logger.warning("checkpoint {} not found".format(ckpt_location))
def cleanup(self):
super().cleanup()
if hasattr(self, "_ckpt_remains"):
for each_ckpt in self._ckpt_remains:
self._delete_one_ckpt(each_ckpt)
def _select_checkpoint(self, trainer):
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
if trainer.ckpt_to_metric:
best_ckpt, _ = min(
trainer.ckpt_to_metric.items(), key=lambda x: x[1]["eval_loss"]
)
best_ckpt_global_step = trainer.ckpt_to_global_step[best_ckpt]
for each_ckpt in list(trainer.ckpt_to_metric):
if each_ckpt != best_ckpt:
del trainer.ckpt_to_metric[each_ckpt]
del trainer.ckpt_to_global_step[each_ckpt]
self._delete_one_ckpt(each_ckpt)
else:
best_ckpt_global_step = trainer.state.global_step
best_ckpt = os.path.join(
trainer.args.output_dir,
f"{PREFIX_CHECKPOINT_DIR}-{best_ckpt_global_step}",
)
self.params[self.ITER_HP] = best_ckpt_global_step
logger.debug(trainer.state.global_step)
logger.debug(trainer.ckpt_to_global_step)
return best_ckpt
def _compute_metrics_by_dataset_name(self, eval_pred):
if isinstance(self._metric, str):
from .ml import metric_loss_score
from .nlp.utils import postprocess_text
predictions, labels = eval_pred
if self._task in NLG_TASKS:
if isinstance(predictions, tuple):
predictions = np.argmax(predictions[0], axis=2)
decoded_preds = self.tokenizer.batch_decode(
predictions, skip_special_tokens=True
)
labels = np.where(labels != -100, labels, self.tokenizer.pad_token_id)
decoded_labels = self.tokenizer.batch_decode(
labels, skip_special_tokens=True
)
predictions, labels = postprocess_text(decoded_preds, decoded_labels)
else:
predictions = (
np.squeeze(predictions)
if self._task == SEQREGRESSION
else np.argmax(predictions, axis=2)
if self._task == TOKENCLASSIFICATION
else np.argmax(predictions, axis=1)
)
metric_dict = {
"automl_metric": metric_loss_score(
metric_name=self._metric,
y_predict=predictions,
y_true=labels,
labels=self._training_args.label_list,
)
}
else:
loss, metric_dict = self._metric(
X_test=self._X_val,
y_test=self._y_val,
estimator=self,
labels=None,
X_train=self._X_train,
y_train=self._y_train,
)
metric_dict["automl_metric"] = loss
return metric_dict
def _init_model_for_predict(self):
from .nlp.huggingface.trainer import TrainerForAuto
"""
Need to reinit training_args because of a bug in deepspeed: if not reinit, the deepspeed config will be inconsistent
with HF config https://github.com/huggingface/transformers/blob/main/src/transformers/training_args.py#L947
"""
training_args = self._TrainingArguments(
local_rank=-1, model_path=self._checkpoint_path, fp16=self.fp16
)
for key, val in self._training_args.__dict__.items():
if key not in ("local_rank", "model_path", "fp16"):
setattr(training_args, key, val)
self._training_args = training_args
new_trainer = TrainerForAuto(
model=self._model_init(),
args=self._training_args,
data_collator=self.data_collator,
compute_metrics=self._compute_metrics_by_dataset_name,
)
if self._task in NLG_TASKS:
setattr(new_trainer, "_is_seq2seq", True)
return new_trainer
def predict_proba(self, X, **pred_kwargs):
from datasets import Dataset
if pred_kwargs:
for key, val in pred_kwargs.items():
setattr(self._training_args, key, val)
assert (
self._task in CLASSIFICATION
), "predict_proba() only for classification tasks."
X_test, _ = self._preprocess(X, **self._kwargs)
test_dataset = Dataset.from_pandas(X_test)
new_trainer = self._init_model_for_predict()
predictions = new_trainer.predict(test_dataset)
return predictions.predictions
def score(self, X_val: DataFrame, y_val: Series, **kwargs):
import transformers
transformers.logging.set_verbosity_error()
self._metric = kwargs["metric"]
eval_dataset, X_val, y_val = self.preprocess_data(X_val, y_val)
new_trainer = self._init_model_for_predict()
return new_trainer.evaluate(eval_dataset)
def predict(self, X, **pred_kwargs):
import transformers
from datasets import Dataset
transformers.logging.set_verbosity_error()
if pred_kwargs:
for key, val in pred_kwargs.items():
setattr(self._training_args, key, val)
X_test, _ = self._preprocess(X, **self._kwargs)
test_dataset = Dataset.from_pandas(X_test)
new_trainer = self._init_model_for_predict()
if self._task not in NLG_TASKS:
predictions = new_trainer.predict(test_dataset)
else:
predictions = new_trainer.predict(
test_dataset,
metric_key_prefix="predict",
)
if self._task == SEQCLASSIFICATION:
return np.argmax(predictions.predictions, axis=1)
elif self._task == SEQREGRESSION:
return predictions.predictions.reshape((len(predictions.predictions),))
elif self._task == TOKENCLASSIFICATION:
return np.argmax(predictions.predictions, axis=2)
elif self._task == SUMMARIZATION:
decoded_preds = self.tokenizer.batch_decode(
predictions.predictions, skip_special_tokens=True
)
return decoded_preds
elif self._task == MULTICHOICECLASSIFICATION:
return np.argmax(predictions.predictions, axis=1)
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
params[TransformersEstimator.ITER_HP] = params.get(
TransformersEstimator.ITER_HP, sys.maxsize
)
return params
class TransformersEstimatorModelSelection(TransformersEstimator):
def __init__(self, task="seq-classification", **config):
super().__init__(task, **config)
@classmethod
def search_space(cls, data_size, task, **params):
search_space_dict = TransformersEstimator.search_space(
data_size, task, **params
)
"""
For model selection, use the same search space regardless of memory constraint
If OOM, user should change the search space themselves
"""
search_space_dict["model_path"] = {
"domain": tune.choice(
[
"google/electra-base-discriminator",
"bert-base-uncased",
"roberta-base",
"facebook/muppet-roberta-base",
"google/electra-small-discriminator",
]
),
"init_value": "facebook/muppet-roberta-base",
}
return search_space_dict
class SKLearnEstimator(BaseEstimator):
"""The base class for tuning scikit-learn estimators."""
def __init__(self, task="binary", **config):
super().__init__(task, **config)
def _preprocess(self, X):
if isinstance(X, DataFrame):
cat_columns = X.select_dtypes(include=["category"]).columns
if not cat_columns.empty:
X = X.copy()
X[cat_columns] = X[cat_columns].apply(lambda x: x.cat.codes)
elif isinstance(X, np.ndarray) and X.dtype.kind not in "buif":
# numpy array is not of numeric dtype
X = DataFrame(X)
for col in X.columns:
if isinstance(X[col][0], str):
X[col] = X[col].astype("category").cat.codes
X = X.to_numpy()
return X
class LGBMEstimator(BaseEstimator):
"""The class for tuning LGBM, using sklearn API."""
ITER_HP = "n_estimators"
HAS_CALLBACK = True
DEFAULT_ITER = 100
@classmethod
def search_space(cls, data_size, **params):
upper = max(5, min(32768, int(data_size[0]))) # upper must be larger than lower
return {
"n_estimators": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"num_leaves": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"min_child_samples": {
"domain": tune.lograndint(lower=2, upper=2**7 + 1),
"init_value": 20,
},
"learning_rate": {
"domain": tune.loguniform(lower=1 / 1024, upper=1.0),
"init_value": 0.1,
},
"log_max_bin": { # log transformed with base 2
"domain": tune.lograndint(lower=3, upper=11),
"init_value": 8,
},
"colsample_bytree": {
"domain": tune.uniform(lower=0.01, upper=1.0),
"init_value": 1.0,
},
"reg_alpha": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1 / 1024,
},
"reg_lambda": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1.0,
},
}
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
if "log_max_bin" in params:
params["max_bin"] = (1 << params.pop("log_max_bin")) - 1
return params
@classmethod
def size(cls, config):
num_leaves = int(
round(
config.get("num_leaves")
or config.get("max_leaves")
or 1 << config.get("max_depth", 16)
)
)
n_estimators = int(round(config["n_estimators"]))
return (num_leaves * 3 + (num_leaves - 1) * 4 + 1.0) * n_estimators * 8
def __init__(self, task="binary", **config):
super().__init__(task, **config)
if "verbose" not in self.params:
self.params["verbose"] = -1
if "regression" == task:
from lightgbm import LGBMRegressor
self.estimator_class = LGBMRegressor
elif "rank" == task:
from lightgbm import LGBMRanker
self.estimator_class = LGBMRanker
else:
from lightgbm import LGBMClassifier
self.estimator_class = LGBMClassifier
self._time_per_iter = None
self._train_size = 0
self._mem_per_iter = -1
self.HAS_CALLBACK = self.HAS_CALLBACK and self._callbacks(0, 0) is not None
def _preprocess(self, X):
if (
not isinstance(X, DataFrame)
and issparse(X)
and np.issubdtype(X.dtype, np.integer)
):
X = X.astype(float)
elif isinstance(X, np.ndarray) and X.dtype.kind not in "buif":
# numpy array is not of numeric dtype
X = DataFrame(X)
for col in X.columns:
if isinstance(X[col][0], str):
X[col] = X[col].astype("category").cat.codes
X = X.to_numpy()
return X
def fit(self, X_train, y_train, budget=None, **kwargs):
start_time = time.time()
deadline = start_time + budget if budget else np.inf
n_iter = self.params.get(self.ITER_HP, self.DEFAULT_ITER)
trained = False
if not self.HAS_CALLBACK:
mem0 = psutil.virtual_memory().available if psutil is not None else 1
if (
(
not self._time_per_iter
or abs(self._train_size - X_train.shape[0]) > 4
)
and budget is not None
or self._mem_per_iter < 0
and psutil is not None
) and n_iter > 1:
self.params[self.ITER_HP] = 1
self._t1 = self._fit(X_train, y_train, **kwargs)
if budget is not None and self._t1 >= budget or n_iter == 1:
return self._t1
mem1 = psutil.virtual_memory().available if psutil is not None else 1
self._mem1 = mem0 - mem1
self.params[self.ITER_HP] = min(n_iter, 4)
self._t2 = self._fit(X_train, y_train, **kwargs)
mem2 = psutil.virtual_memory().available if psutil is not None else 1
self._mem2 = max(mem0 - mem2, self._mem1)
# if self._mem1 <= 0:
# self._mem_per_iter = self._mem2 / (self.params[self.ITER_HP] + 1)
# elif self._mem2 <= 0:
# self._mem_per_iter = self._mem1
# else:
self._mem_per_iter = min(
self._mem1, self._mem2 / self.params[self.ITER_HP]
)
# if self._mem_per_iter <= 1 and psutil is not None:
# n_iter = self.params[self.ITER_HP]
self._time_per_iter = (
(self._t2 - self._t1) / (self.params[self.ITER_HP] - 1)
if self._t2 > self._t1
else self._t1
if self._t1
else 0.001
)
self._train_size = X_train.shape[0]
if (
budget is not None
and self._t1 + self._t2 >= budget
or n_iter == self.params[self.ITER_HP]
):
# self.params[self.ITER_HP] = n_iter
return time.time() - start_time
trained = True
# logger.debug(mem0)
# logger.debug(self._mem_per_iter)
if n_iter > 1:
max_iter = min(
n_iter,
int(
(budget - time.time() + start_time - self._t1)
/ self._time_per_iter
+ 1
)
if budget is not None
else n_iter,
int((1 - FREE_MEM_RATIO) * mem0 / self._mem_per_iter)
if psutil is not None and self._mem_per_iter > 0
else n_iter,
)
if trained and max_iter <= self.params[self.ITER_HP]:
return time.time() - start_time
# when not trained, train at least one iter
self.params[self.ITER_HP] = max(max_iter, 1)
if self.HAS_CALLBACK:
kwargs_callbacks = kwargs.get("callbacks")
if kwargs_callbacks:
callbacks = kwargs_callbacks + self._callbacks(start_time, deadline)
kwargs.pop("callbacks")
else:
callbacks = self._callbacks(start_time, deadline)
if isinstance(self, XGBoostSklearnEstimator):
from xgboost import __version__
if __version__ >= "1.6.0":
# since xgboost>=1.6.0, callbacks can't be passed in fit()
self.params["callbacks"] = callbacks
callbacks = None
self._fit(
X_train,
y_train,
callbacks=callbacks,
**kwargs,
)
if callbacks is None:
# for xgboost>=1.6.0, pop callbacks to enable pickle
callbacks = self.params.pop("callbacks")
self._model.set_params(callbacks=callbacks[:-1])
best_iteration = (
self._model.get_booster().best_iteration
if isinstance(self, XGBoostSklearnEstimator)
else self._model.best_iteration_
)
if best_iteration is not None:
self._model.set_params(n_estimators=best_iteration + 1)
else:
self._fit(X_train, y_train, **kwargs)
train_time = time.time() - start_time
return train_time
def _callbacks(self, start_time, deadline) -> List[Callable]:
return [partial(self._callback, start_time, deadline)]
def _callback(self, start_time, deadline, env) -> None:
from lightgbm.callback import EarlyStopException
now = time.time()
if env.iteration == 0:
self._time_per_iter = now - start_time
if now + self._time_per_iter > deadline:
raise EarlyStopException(env.iteration, env.evaluation_result_list)
if psutil is not None:
mem = psutil.virtual_memory()
if mem.available / mem.total < FREE_MEM_RATIO:
raise EarlyStopException(env.iteration, env.evaluation_result_list)
class XGBoostEstimator(SKLearnEstimator):
"""The class for tuning XGBoost regressor, not using sklearn API."""
DEFAULT_ITER = 10
@classmethod
def search_space(cls, data_size, **params):
upper = max(5, min(32768, int(data_size[0]))) # upper must be larger than lower
return {
"n_estimators": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"max_leaves": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"max_depth": {
"domain": tune.choice([0, 6, 12]),
"init_value": 0,
},
"min_child_weight": {
"domain": tune.loguniform(lower=0.001, upper=128),
"init_value": 1.0,
},
"learning_rate": {
"domain": tune.loguniform(lower=1 / 1024, upper=1.0),
"init_value": 0.1,
},
"subsample": {
"domain": tune.uniform(lower=0.1, upper=1.0),
"init_value": 1.0,
},
"colsample_bylevel": {
"domain": tune.uniform(lower=0.01, upper=1.0),
"init_value": 1.0,
},
"colsample_bytree": {
"domain": tune.uniform(lower=0.01, upper=1.0),
"init_value": 1.0,
},
"reg_alpha": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1 / 1024,
},
"reg_lambda": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1.0,
},
}
@classmethod
def size(cls, config):
return LGBMEstimator.size(config)
@classmethod
def cost_relative2lgbm(cls):
return 1.6
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
max_depth = params["max_depth"] = params.get("max_depth", 0)
if max_depth == 0:
params["grow_policy"] = params.get("grow_policy", "lossguide")
params["tree_method"] = params.get("tree_method", "hist")
# params["booster"] = params.get("booster", "gbtree")
params["use_label_encoder"] = params.get("use_label_encoder", False)
if "n_jobs" in config:
params["nthread"] = params.pop("n_jobs")
return params
def __init__(
self,
task="regression",
**config,
):
super().__init__(task, **config)
self.params["verbosity"] = 0
def fit(self, X_train, y_train, budget=None, **kwargs):
import xgboost as xgb
start_time = time.time()
deadline = start_time + budget if budget else np.inf
if issparse(X_train):
if xgb.__version__ < "1.6.0":
# "auto" fails for sparse input since xgboost 1.6.0
self.params["tree_method"] = "auto"
else:
X_train = self._preprocess(X_train)
if "sample_weight" in kwargs:
dtrain = xgb.DMatrix(X_train, label=y_train, weight=kwargs["sample_weight"])
else:
dtrain = xgb.DMatrix(X_train, label=y_train)
objective = self.params.get("objective")
if isinstance(objective, str):
obj = None
else:
obj = objective
if "objective" in self.params:
del self.params["objective"]
_n_estimators = self.params.pop("n_estimators")
callbacks = XGBoostEstimator._callbacks(start_time, deadline)
if callbacks:
self._model = xgb.train(
self.params,
dtrain,
_n_estimators,
obj=obj,
callbacks=callbacks,
)
self.params["n_estimators"] = self._model.best_iteration + 1
else:
self._model = xgb.train(self.params, dtrain, _n_estimators, obj=obj)
self.params["n_estimators"] = _n_estimators
self.params["objective"] = objective
del dtrain
train_time = time.time() - start_time
return train_time
def predict(self, X, **kwargs):
import xgboost as xgb
if not issparse(X):
X = self._preprocess(X)
dtest = xgb.DMatrix(X)
return super().predict(dtest)
@classmethod
def _callbacks(cls, start_time, deadline):
try:
from xgboost.callback import TrainingCallback
except ImportError: # for xgboost<1.3
return None
class ResourceLimit(TrainingCallback):
def after_iteration(self, model, epoch, evals_log) -> bool:
now = time.time()
if epoch == 0:
self._time_per_iter = now - start_time
if now + self._time_per_iter > deadline:
return True
if psutil is not None:
mem = psutil.virtual_memory()
if mem.available / mem.total < FREE_MEM_RATIO:
return True
return False
return [ResourceLimit()]
class XGBoostSklearnEstimator(SKLearnEstimator, LGBMEstimator):
"""The class for tuning XGBoost with unlimited depth, using sklearn API."""
DEFAULT_ITER = 10
@classmethod
def search_space(cls, data_size, **params):
space = XGBoostEstimator.search_space(data_size)
space.pop("max_depth")
return space
@classmethod
def cost_relative2lgbm(cls):
return XGBoostEstimator.cost_relative2lgbm()
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
max_depth = params["max_depth"] = params.get("max_depth", 0)
if max_depth == 0:
params["grow_policy"] = params.get("grow_policy", "lossguide")
params["tree_method"] = params.get("tree_method", "hist")
params["use_label_encoder"] = params.get("use_label_encoder", False)
return params
def __init__(
self,
task="binary",
**config,
):
super().__init__(task, **config)
del self.params["verbose"]
self.params["verbosity"] = 0
import xgboost as xgb
self.estimator_class = xgb.XGBRegressor
if "rank" == task:
self.estimator_class = xgb.XGBRanker
elif task in CLASSIFICATION:
self.estimator_class = xgb.XGBClassifier
self._xgb_version = xgb.__version__
def fit(self, X_train, y_train, budget=None, **kwargs):
if issparse(X_train) and self._xgb_version < "1.6.0":
# "auto" fails for sparse input since xgboost 1.6.0
self.params["tree_method"] = "auto"
if kwargs.get("gpu_per_trial"):
self.params["tree_method"] = "gpu_hist"
kwargs.pop("gpu_per_trial")
return super().fit(X_train, y_train, budget, **kwargs)
def _callbacks(self, start_time, deadline) -> List[Callable]:
return XGBoostEstimator._callbacks(start_time, deadline)
class XGBoostLimitDepthEstimator(XGBoostSklearnEstimator):
"""The class for tuning XGBoost with limited depth, using sklearn API."""
@classmethod
def search_space(cls, data_size, **params):
space = XGBoostEstimator.search_space(data_size)
space.pop("max_leaves")
upper = max(6, int(np.log2(data_size[0])))
space["max_depth"] = {
"domain": tune.randint(lower=1, upper=min(upper, 16)),
"init_value": 6,
"low_cost_init_value": 1,
}
space["learning_rate"]["init_value"] = 0.3
space["n_estimators"]["init_value"] = 10
return space
@classmethod
def cost_relative2lgbm(cls):
return 64
class RandomForestEstimator(SKLearnEstimator, LGBMEstimator):
"""The class for tuning Random Forest."""
HAS_CALLBACK = False
nrows = 101
@classmethod
def search_space(cls, data_size, task, **params):
RandomForestEstimator.nrows = int(data_size[0])
upper = min(2048, RandomForestEstimator.nrows)
init = 1 / np.sqrt(data_size[1]) if task in CLASSIFICATION else 1
lower = min(0.1, init)
space = {
"n_estimators": {
"domain": tune.lograndint(lower=4, upper=max(5, upper)),
"init_value": 4,
"low_cost_init_value": 4,
},
"max_features": {
"domain": tune.loguniform(lower=lower, upper=1.0),
"init_value": init,
},
"max_leaves": {
"domain": tune.lograndint(
lower=4,
upper=max(5, min(32768, RandomForestEstimator.nrows >> 1)), #
),
"init_value": 4,
"low_cost_init_value": 4,
},
}
if task in CLASSIFICATION:
space["criterion"] = {
"domain": tune.choice(["gini", "entropy"]),
# "init_value": "gini",
}
return space
@classmethod
def cost_relative2lgbm(cls):
return 2
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
if "max_leaves" in params:
params["max_leaf_nodes"] = params.get(
"max_leaf_nodes", params.pop("max_leaves")
)
if self._task not in CLASSIFICATION and "criterion" in config:
params.pop("criterion")
return params
def __init__(
self,
task="binary",
**params,
):
super().__init__(task, **params)
self.params["verbose"] = 0
self.estimator_class = RandomForestRegressor
if task in CLASSIFICATION:
self.estimator_class = RandomForestClassifier
class ExtraTreesEstimator(RandomForestEstimator):
"""The class for tuning Extra Trees."""
@classmethod
def cost_relative2lgbm(cls):
return 1.9
def __init__(self, task="binary", **params):
super().__init__(task, **params)
if "regression" in task:
self.estimator_class = ExtraTreesRegressor
else:
self.estimator_class = ExtraTreesClassifier
class LRL1Classifier(SKLearnEstimator):
"""The class for tuning Logistic Regression with L1 regularization."""
@classmethod
def search_space(cls, **params):
return {
"C": {
"domain": tune.loguniform(lower=0.03125, upper=32768.0),
"init_value": 1.0,
},
}
@classmethod
def cost_relative2lgbm(cls):
return 160
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
params["tol"] = params.get("tol", 0.0001)
params["solver"] = params.get("solver", "saga")
params["penalty"] = params.get("penalty", "l1")
return params
def __init__(self, task="binary", **config):
super().__init__(task, **config)
assert task in CLASSIFICATION, "LogisticRegression for classification task only"
self.estimator_class = LogisticRegression
class LRL2Classifier(SKLearnEstimator):
"""The class for tuning Logistic Regression with L2 regularization."""
limit_resource = True
@classmethod
def search_space(cls, **params):
return LRL1Classifier.search_space(**params)
@classmethod
def cost_relative2lgbm(cls):
return 25
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
params["tol"] = params.get("tol", 0.0001)
params["solver"] = params.get("solver", "lbfgs")
params["penalty"] = params.get("penalty", "l2")
return params
def __init__(self, task="binary", **config):
super().__init__(task, **config)
assert task in CLASSIFICATION, "LogisticRegression for classification task only"
self.estimator_class = LogisticRegression
class CatBoostEstimator(BaseEstimator):
"""The class for tuning CatBoost."""
ITER_HP = "n_estimators"
DEFAULT_ITER = 1000
@classmethod
def search_space(cls, data_size, **params):
upper = max(min(round(1500000 / data_size[0]), 150), 12)
return {
"early_stopping_rounds": {
"domain": tune.lograndint(lower=10, upper=upper),
"init_value": 10,
"low_cost_init_value": 10,
},
"learning_rate": {
"domain": tune.loguniform(lower=0.005, upper=0.2),
"init_value": 0.1,
},
"n_estimators": {
"domain": 8192,
"init_value": 8192,
},
}
@classmethod
def size(cls, config):
n_estimators = config.get("n_estimators", 8192)
max_leaves = 64
return (max_leaves * 3 + (max_leaves - 1) * 4 + 1.0) * n_estimators * 8
@classmethod
def cost_relative2lgbm(cls):
return 15
def _preprocess(self, X):
if isinstance(X, DataFrame):
cat_columns = X.select_dtypes(include=["category"]).columns
if not cat_columns.empty:
X = X.copy()
X[cat_columns] = X[cat_columns].apply(
lambda x: x.cat.rename_categories(
[
str(c) if isinstance(c, float) else c
for c in x.cat.categories
]
)
)
elif isinstance(X, np.ndarray) and X.dtype.kind not in "buif":
# numpy array is not of numeric dtype
X = DataFrame(X)
for col in X.columns:
if isinstance(X[col][0], str):
X[col] = X[col].astype("category").cat.codes
X = X.to_numpy()
return X
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
params["n_estimators"] = params.get("n_estimators", 8192)
if "n_jobs" in params:
params["thread_count"] = params.pop("n_jobs")
return params
def __init__(
self,
task="binary",
**config,
):
super().__init__(task, **config)
self.params.update(
{
"verbose": config.get("verbose", False),
"random_seed": config.get("random_seed", 10242048),
}
)
from catboost import CatBoostRegressor
self.estimator_class = CatBoostRegressor
if task in CLASSIFICATION:
from catboost import CatBoostClassifier
self.estimator_class = CatBoostClassifier
def fit(self, X_train, y_train, budget=None, **kwargs):
start_time = time.time()
deadline = start_time + budget if budget else np.inf
train_dir = f"catboost_{str(start_time)}"
X_train = self._preprocess(X_train)
if isinstance(X_train, DataFrame):
cat_features = list(X_train.select_dtypes(include="category").columns)
else:
cat_features = []
n = max(int(len(y_train) * 0.9), len(y_train) - 1000)
X_tr, y_tr = X_train[:n], y_train[:n]
if "sample_weight" in kwargs:
weight = kwargs["sample_weight"]
if weight is not None:
kwargs["sample_weight"] = weight[:n]
else:
weight = None
from catboost import Pool, __version__
model = self.estimator_class(train_dir=train_dir, **self.params)
if __version__ >= "0.26":
model.fit(
X_tr,
y_tr,
cat_features=cat_features,
eval_set=Pool(
data=X_train[n:], label=y_train[n:], cat_features=cat_features
),
callbacks=CatBoostEstimator._callbacks(start_time, deadline),
**kwargs,
)
else:
model.fit(
X_tr,
y_tr,
cat_features=cat_features,
eval_set=Pool(
data=X_train[n:], label=y_train[n:], cat_features=cat_features
),
**kwargs,
)
shutil.rmtree(train_dir, ignore_errors=True)
if weight is not None:
kwargs["sample_weight"] = weight
self._model = model
self.params[self.ITER_HP] = self._model.tree_count_
train_time = time.time() - start_time
return train_time
@classmethod
def _callbacks(cls, start_time, deadline):
class ResourceLimit:
def after_iteration(self, info) -> bool:
now = time.time()
if info.iteration == 1:
self._time_per_iter = now - start_time
if now + self._time_per_iter > deadline:
return False
if psutil is not None:
mem = psutil.virtual_memory()
if mem.available / mem.total < FREE_MEM_RATIO:
return False
return True # can continue
return [ResourceLimit()]
class KNeighborsEstimator(BaseEstimator):
@classmethod
def search_space(cls, data_size, **params):
upper = min(512, int(data_size[0] / 2))
return {
"n_neighbors": {
"domain": tune.lograndint(lower=1, upper=max(2, upper)),
"init_value": 5,
"low_cost_init_value": 1,
},
}
@classmethod
def cost_relative2lgbm(cls):
return 30
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
params["weights"] = params.get("weights", "distance")
return params
def __init__(self, task="binary", **config):
super().__init__(task, **config)
if task in CLASSIFICATION:
from sklearn.neighbors import KNeighborsClassifier
self.estimator_class = KNeighborsClassifier
else:
from sklearn.neighbors import KNeighborsRegressor
self.estimator_class = KNeighborsRegressor
def _preprocess(self, X):
if isinstance(X, DataFrame):
cat_columns = X.select_dtypes(["category"]).columns
if X.shape[1] == len(cat_columns):
raise ValueError("kneighbor requires at least one numeric feature")
X = X.drop(cat_columns, axis=1)
elif isinstance(X, np.ndarray) and X.dtype.kind not in "buif":
# drop categocial columns if any
X = DataFrame(X)
cat_columns = []
for col in X.columns:
if isinstance(X[col][0], str):
cat_columns.append(col)
X = X.drop(cat_columns, axis=1)
X = X.to_numpy()
return X
class Prophet(SKLearnEstimator):
"""The class for tuning Prophet."""
@classmethod
def search_space(cls, **params):
space = {
"changepoint_prior_scale": {
"domain": tune.loguniform(lower=0.001, upper=0.05),
"init_value": 0.05,
"low_cost_init_value": 0.001,
},
"seasonality_prior_scale": {
"domain": tune.loguniform(lower=0.01, upper=10),
"init_value": 10,
},
"holidays_prior_scale": {
"domain": tune.loguniform(lower=0.01, upper=10),
"init_value": 10,
},
"seasonality_mode": {
"domain": tune.choice(["additive", "multiplicative"]),
"init_value": "multiplicative",
},
}
return space
def __init__(self, task="ts_forecast", n_jobs=1, **params):
super().__init__(task, **params)
def _join(self, X_train, y_train):
assert TS_TIMESTAMP_COL in X_train, (
"Dataframe for training ts_forecast model must have column"
f' "{TS_TIMESTAMP_COL}" with the dates in X_train.'
)
y_train = DataFrame(y_train, columns=[TS_VALUE_COL])
train_df = X_train.join(y_train)
return train_df
def fit(self, X_train, y_train, budget=None, **kwargs):
from prophet import Prophet
current_time = time.time()
train_df = self._join(X_train, y_train)
train_df = self._preprocess(train_df)
cols = list(train_df)
cols.remove(TS_TIMESTAMP_COL)
cols.remove(TS_VALUE_COL)
logging.getLogger("prophet").setLevel(logging.WARNING)
model = Prophet(**self.params)
for regressor in cols:
model.add_regressor(regressor)
with suppress_stdout_stderr():
model.fit(train_df)
train_time = time.time() - current_time
self._model = model
return train_time
def predict(self, X, **kwargs):
if isinstance(X, int):
raise ValueError(
"predict() with steps is only supported for arima/sarimax."
" For Prophet, pass a dataframe with the first column containing"
" the timestamp values."
)
if self._model is not None:
X = self._preprocess(X)
forecast = self._model.predict(X)
return forecast["yhat"]
else:
logger.warning(
"Estimator is not fit yet. Please run fit() before predict()."
)
return np.ones(X.shape[0])
def score(self, X_val: DataFrame, y_val: Series, **kwargs):
from sklearn.metrics import r2_score
from .ml import metric_loss_score
y_pred = self.predict(X_val)
self._metric = kwargs.get("metric", None)
if self._metric:
return metric_loss_score(self._metric, y_pred, y_val)
else:
return r2_score(y_pred, y_val)
class ARIMA(Prophet):
"""The class for tuning ARIMA."""
@classmethod
def search_space(cls, **params):
space = {
"p": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 2,
"low_cost_init_value": 0,
},
"d": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 2,
"low_cost_init_value": 0,
},
"q": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 1,
"low_cost_init_value": 0,
},
}
return space
def _join(self, X_train, y_train):
train_df = super()._join(X_train, y_train)
train_df.index = to_datetime(train_df[TS_TIMESTAMP_COL])
train_df = train_df.drop(TS_TIMESTAMP_COL, axis=1)
return train_df
def fit(self, X_train, y_train, budget=None, **kwargs):
import warnings
warnings.filterwarnings("ignore")
from statsmodels.tsa.arima.model import ARIMA as ARIMA_estimator
current_time = time.time()
train_df = self._join(X_train, y_train)
train_df = self._preprocess(train_df)
regressors = list(train_df)
regressors.remove(TS_VALUE_COL)
if regressors:
model = ARIMA_estimator(
train_df[[TS_VALUE_COL]],
exog=train_df[regressors],
order=(self.params["p"], self.params["d"], self.params["q"]),
enforce_stationarity=False,
enforce_invertibility=False,
)
else:
model = ARIMA_estimator(
train_df,
order=(self.params["p"], self.params["d"], self.params["q"]),
enforce_stationarity=False,
enforce_invertibility=False,
)
with suppress_stdout_stderr():
model = model.fit()
train_time = time.time() - current_time
self._model = model
return train_time
def predict(self, X, **kwargs):
if self._model is not None:
if isinstance(X, int):
forecast = self._model.forecast(steps=X)
elif isinstance(X, DataFrame):
start = X[TS_TIMESTAMP_COL].iloc[0]
end = X[TS_TIMESTAMP_COL].iloc[-1]
if len(X.columns) > 1:
X = self._preprocess(X.drop(columns=TS_TIMESTAMP_COL))
regressors = list(X)
forecast = self._model.predict(
start=start, end=end, exog=X[regressors]
)
else:
forecast = self._model.predict(start=start, end=end)
else:
raise ValueError(
"X needs to be either a pandas Dataframe with dates as the first column"
" or an int number of periods for predict()."
)
return forecast
else:
return np.ones(X if isinstance(X, int) else X.shape[0])
class SARIMAX(ARIMA):
"""The class for tuning SARIMA."""
@classmethod
def search_space(cls, **params):
space = {
"p": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 2,
"low_cost_init_value": 0,
},
"d": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 2,
"low_cost_init_value": 0,
},
"q": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 1,
"low_cost_init_value": 0,
},
"P": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 1,
"low_cost_init_value": 0,
},
"D": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 1,
"low_cost_init_value": 0,
},
"Q": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 1,
"low_cost_init_value": 0,
},
"s": {
"domain": tune.choice([1, 4, 6, 12]),
"init_value": 12,
},
}
return space
def fit(self, X_train, y_train, budget=None, **kwargs):
import warnings
warnings.filterwarnings("ignore")
from statsmodels.tsa.statespace.sarimax import SARIMAX as SARIMAX_estimator
current_time = time.time()
train_df = self._join(X_train, y_train)
train_df = self._preprocess(train_df)
regressors = list(train_df)
regressors.remove(TS_VALUE_COL)
if regressors:
model = SARIMAX_estimator(
train_df[[TS_VALUE_COL]],
exog=train_df[regressors],
order=(self.params["p"], self.params["d"], self.params["q"]),
seasonality_order=(
self.params["P"],
self.params["D"],
self.params["Q"],
self.params["s"],
),
enforce_stationarity=False,
enforce_invertibility=False,
)
else:
model = SARIMAX_estimator(
train_df,
order=(self.params["p"], self.params["d"], self.params["q"]),
seasonality_order=(
self.params["P"],
self.params["D"],
self.params["Q"],
self.params["s"],
),
enforce_stationarity=False,
enforce_invertibility=False,
)
with suppress_stdout_stderr():
model = model.fit()
train_time = time.time() - current_time
self._model = model
return train_time
class TS_SKLearn(SKLearnEstimator):
"""The class for tuning SKLearn Regressors for time-series forecasting, using hcrystalball"""
base_class = SKLearnEstimator
@classmethod
def search_space(cls, data_size, pred_horizon, **params):
space = cls.base_class.search_space(data_size, **params)
space.update(
{
"optimize_for_horizon": {
"domain": tune.choice([True, False]),
"init_value": False,
"low_cost_init_value": False,
},
"lags": {
"domain": tune.randint(
lower=1, upper=max(2, int(np.sqrt(data_size[0])))
),
"init_value": 3,
},
}
)
return space
def __init__(self, task="ts_forecast", **params):
super().__init__(task, **params)
self.hcrystaball_model = None
self.ts_task = (
"regression" if task in TS_FORECASTREGRESSION else "classification"
)
def transform_X(self, X):
cols = list(X)
if len(cols) == 1:
ds_col = cols[0]
X = DataFrame(index=X[ds_col])
elif len(cols) > 1:
ds_col = cols[0]
exog_cols = cols[1:]
X = X[exog_cols].set_index(X[ds_col])
return X
def _fit(self, X_train, y_train, budget=None, **kwargs):
from hcrystalball.wrappers import get_sklearn_wrapper
X_train = self.transform_X(X_train)
X_train = self._preprocess(X_train)
params = self.params.copy()
lags = params.pop("lags")
optimize_for_horizon = params.pop("optimize_for_horizon")
estimator = self.base_class(task=self.ts_task, **params)
self.hcrystaball_model = get_sklearn_wrapper(estimator.estimator_class)
self.hcrystaball_model.lags = int(lags)
self.hcrystaball_model.fit(X_train, y_train)
if optimize_for_horizon:
# Direct Multi-step Forecast Strategy - fit a seperate model for each horizon
model_list = []
for i in range(1, kwargs["period"] + 1):
(
X_fit,
y_fit,
) = self.hcrystaball_model._transform_data_to_tsmodel_input_format(
X_train, y_train, i
)
self.hcrystaball_model.model.set_params(**estimator.params)
model = self.hcrystaball_model.model.fit(X_fit, y_fit)
model_list.append(model)
self._model = model_list
else:
(
X_fit,
y_fit,
) = self.hcrystaball_model._transform_data_to_tsmodel_input_format(
X_train, y_train, kwargs["period"]
)
self.hcrystaball_model.model.set_params(**estimator.params)
model = self.hcrystaball_model.model.fit(X_fit, y_fit)
self._model = model
def fit(self, X_train, y_train, budget=None, **kwargs):
current_time = time.time()
self._fit(X_train, y_train, budget=budget, **kwargs)
train_time = time.time() - current_time
return train_time
def predict(self, X, **kwargs):
if self._model is not None:
X = self.transform_X(X)
X = self._preprocess(X)
if isinstance(self._model, list):
assert len(self._model) == len(
X
), "Model is optimized for horizon, length of X must be equal to `period`."
preds = []
for i in range(1, len(self._model) + 1):
(
X_pred,
_,
) = self.hcrystaball_model._transform_data_to_tsmodel_input_format(
X.iloc[:i, :]
)
preds.append(self._model[i - 1].predict(X_pred)[-1])
forecast = DataFrame(
data=np.asarray(preds).reshape(-1, 1),
columns=[self.hcrystaball_model.name],
index=X.index,
)
else:
(
X_pred,
_,
) = self.hcrystaball_model._transform_data_to_tsmodel_input_format(X)
forecast = self._model.predict(X_pred)
return forecast
else:
logger.warning(
"Estimator is not fit yet. Please run fit() before predict()."
)
return np.ones(X.shape[0])
class LGBM_TS(TS_SKLearn):
"""The class for tuning LGBM Regressor for time-series forecasting"""
base_class = LGBMEstimator
class XGBoost_TS(TS_SKLearn):
"""The class for tuning XGBoost Regressor for time-series forecasting"""
base_class = XGBoostSklearnEstimator
# catboost regressor is invalid because it has a `name` parameter, making it incompatible with hcrystalball
# class CatBoost_TS_Regressor(TS_Regressor):
# base_class = CatBoostEstimator
class RF_TS(TS_SKLearn):
"""The class for tuning Random Forest Regressor for time-series forecasting"""
base_class = RandomForestEstimator
class ExtraTrees_TS(TS_SKLearn):
"""The class for tuning Extra Trees Regressor for time-series forecasting"""
base_class = ExtraTreesEstimator
class XGBoostLimitDepth_TS(TS_SKLearn):
"""The class for tuning XGBoost Regressor with unlimited depth for time-series forecasting"""
base_class = XGBoostLimitDepthEstimator
class suppress_stdout_stderr(object):
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = (os.dup(1), os.dup(2))
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
os.close(self.null_fds[0])
os.close(self.null_fds[1])
| 36.646787 | 144 | 0.541121 |
from contextlib import contextmanager
from functools import partial
import signal
import os
from typing import Callable, List
import numpy as np
import time
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.ensemble import ExtraTreesRegressor, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.dummy import DummyClassifier, DummyRegressor
from scipy.sparse import issparse
import logging
import shutil
from pandas import DataFrame, Series, to_datetime
import sys
import math
from . import tune
from .data import (
group_counts,
CLASSIFICATION,
TS_FORECASTREGRESSION,
TS_TIMESTAMP_COL,
TS_VALUE_COL,
SEQCLASSIFICATION,
SEQREGRESSION,
TOKENCLASSIFICATION,
SUMMARIZATION,
NLG_TASKS,
MULTICHOICECLASSIFICATION,
)
try:
import psutil
except ImportError:
psutil = None
try:
import resource
except ImportError:
resource = None
logger = logging.getLogger("flaml.automl")
FREE_MEM_RATIO = 0.2
def TimeoutHandler(sig, frame):
raise TimeoutError(sig, frame)
@contextmanager
def limit_resource(memory_limit, time_limit):
if memory_limit > 0:
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
if soft < 0 and (hard < 0 or memory_limit <= hard) or memory_limit < soft:
try:
resource.setrlimit(resource.RLIMIT_AS, (int(memory_limit), hard))
except ValueError:
pass
main_thread = False
if time_limit is not None:
try:
signal.signal(signal.SIGALRM, TimeoutHandler)
signal.alarm(int(time_limit) or 1)
main_thread = True
except ValueError:
pass
try:
yield
finally:
if main_thread:
signal.alarm(0)
if memory_limit > 0:
resource.setrlimit(resource.RLIMIT_AS, (soft, hard))
class BaseEstimator:
def __init__(self, task="binary", **config):
self._task = task
self.params = self.config2params(config)
self.estimator_class = self._model = None
if "_estimator_type" in config:
self._estimator_type = self.params.pop("_estimator_type")
else:
self._estimator_type = (
"classifier" if task in CLASSIFICATION else "regressor"
)
def get_params(self, deep=False):
params = self.params.copy()
params["task"] = self._task
if hasattr(self, "_estimator_type"):
params["_estimator_type"] = self._estimator_type
return params
@property
def classes_(self):
return self._model.classes_
@property
def n_features_in_(self):
return self._model.n_features_in_
@property
def model(self):
return self._model
@property
def estimator(self):
return self._model
def _preprocess(self, X):
return X
def _fit(self, X_train, y_train, **kwargs):
current_time = time.time()
if "groups" in kwargs:
kwargs = kwargs.copy()
groups = kwargs.pop("groups")
if self._task == "rank":
kwargs["group"] = group_counts(groups)
# groups_val = kwargs.get('groups_val')
# if groups_val is not None:
# kwargs['eval_group'] = [group_counts(groups_val)]
# kwargs['eval_set'] = [
# (kwargs['X_val'], kwargs['y_val'])]
# kwargs['verbose'] = False
# del kwargs['groups_val'], kwargs['X_val'], kwargs['y_val']
X_train = self._preprocess(X_train)
model = self.estimator_class(**self.params)
if logger.level == logging.DEBUG:
# xgboost 1.6 doesn't display all the params in the model str
logger.debug(f"flaml.model - {model} fit started with params {self.params}")
model.fit(X_train, y_train, **kwargs)
if logger.level == logging.DEBUG:
logger.debug(f"flaml.model - {model} fit finished")
train_time = time.time() - current_time
self._model = model
return train_time
def fit(self, X_train, y_train, budget=None, **kwargs):
if (
getattr(self, "limit_resource", None)
and resource is not None
and (budget is not None or psutil is not None)
):
start_time = time.time()
mem = psutil.virtual_memory() if psutil is not None else None
try:
with limit_resource(
mem.available * (1 - FREE_MEM_RATIO)
+ psutil.Process(os.getpid()).memory_info().rss
if mem is not None
else -1,
budget,
):
train_time = self._fit(X_train, y_train, **kwargs)
except (MemoryError, TimeoutError) as e:
logger.warning(f"{e.__class__} {e}")
if self._task in CLASSIFICATION:
model = DummyClassifier()
else:
model = DummyRegressor()
X_train = self._preprocess(X_train)
model.fit(X_train, y_train)
self._model = model
train_time = time.time() - start_time
else:
train_time = self._fit(X_train, y_train, **kwargs)
return train_time
def predict(self, X, **kwargs):
if self._model is not None:
X = self._preprocess(X)
return self._model.predict(X)
else:
logger.warning(
"Estimator is not fit yet. Please run fit() before predict()."
)
return np.ones(X.shape[0])
def predict_proba(self, X, **kwargs):
assert self._task in CLASSIFICATION, "predict_proba() only for classification."
X = self._preprocess(X)
return self._model.predict_proba(X)
def score(self, X_val: DataFrame, y_val: Series, **kwargs):
from .ml import metric_loss_score
from .ml import is_min_metric
if self._model is not None:
if self._task == "rank":
raise NotImplementedError(
"AutoML.score() is not implemented for ranking"
)
else:
X_val = self._preprocess(X_val)
metric = kwargs.get("metric", None)
if metric:
y_pred = self.predict(X_val, **kwargs)
if is_min_metric(metric):
return metric_loss_score(metric, y_pred, y_val)
else:
return 1.0 - metric_loss_score(metric, y_pred, y_val)
else:
return self._model.score(X_val, y_val, **kwargs)
else:
logger.warning(
"Estimator is not fit yet. Please run fit() before predict()."
)
return 0.0
def cleanup(self):
del self._model
self._model = None
@classmethod
def search_space(cls, data_size, task, **params):
return {}
@classmethod
def size(cls, config: dict) -> float:
return 1.0
@classmethod
def cost_relative2lgbm(cls) -> float:
return 1.0
@classmethod
def init(cls):
pass
def config2params(self, config: dict) -> dict:
params = config.copy()
if "FLAML_sample_size" in params:
params.pop("FLAML_sample_size")
return params
class TransformersEstimator(BaseEstimator):
ITER_HP = "global_max_steps"
def __init__(self, task="seq-classification", **config):
super().__init__(task, **config)
import uuid
self.trial_id = str(uuid.uuid1().hex)[:8]
if task not in NLG_TASKS:
from .nlp.huggingface.training_args import (
TrainingArgumentsForAuto as TrainingArguments,
)
else:
from .nlp.huggingface.training_args import (
Seq2SeqTrainingArgumentsForAuto as TrainingArguments,
)
self._TrainingArguments = TrainingArguments
@staticmethod
def _join(X_train, y_train, task):
y_train = DataFrame(y_train, index=X_train.index)
y_train.columns = ["label"] if task != TOKENCLASSIFICATION else ["labels"]
train_df = X_train.join(y_train)
return train_df
@classmethod
def search_space(cls, data_size, task, **params):
search_space_dict = {
"learning_rate": {
"domain": tune.loguniform(lower=1e-6, upper=1e-3),
"init_value": 1e-5,
},
"num_train_epochs": {
"domain": tune.loguniform(lower=0.1, upper=10.0),
"init_value": 3.0,
},
"per_device_train_batch_size": {
"domain": tune.choice([4, 8, 16, 32]),
"init_value": 32,
},
"warmup_ratio": {
"domain": tune.uniform(lower=0.0, upper=0.3),
"init_value": 0.0,
},
"weight_decay": {
"domain": tune.uniform(lower=0.0, upper=0.3),
"init_value": 0.0,
},
"adam_epsilon": {
"domain": tune.loguniform(lower=1e-8, upper=1e-6),
"init_value": 1e-6,
},
"seed": {"domain": tune.choice(list(range(40, 45))), "init_value": 42},
"global_max_steps": {
"domain": sys.maxsize,
"init_value": sys.maxsize,
},
}
return search_space_dict
@property
def checkpoint_freq(self):
return (
int(
min(self._training_args.num_train_epochs, 1)
* len(self._X_train)
/ self._training_args.per_device_train_batch_size
/ self._training_args.ckpt_per_epoch
)
+ 1
)
@property
def fp16(self):
return self._kwargs.get("gpu_per_trial") and self._training_args.fp16
@property
def no_cuda(self):
return not self._kwargs.get("gpu_per_trial")
def _set_training_args(self, **kwargs):
from .nlp.utils import date_str, Counter
for (key, val) in kwargs.items():
assert key not in self.params, (
"Since {} is in the search space, it cannot exist in 'custom_fit_kwargs' at the same time."
"If you need to fix the value of {} to {}, the only way is to add a single-value domain in the search "
"space by adding:\n '{}': {{ 'domain': {} }} to 'custom_hp'. For example:"
'automl_settings["custom_hp"] = {{ "transformer": {{ "model_path": {{ "domain" : '
'"google/electra-small-discriminator" }} }} }}'.format(
key, key, val, key, val
)
)
self._training_args = self._TrainingArguments(**kwargs)
for key, val in self.params.items():
if hasattr(self._training_args, key):
setattr(self._training_args, key, val)
local_dir = os.path.join(
self._training_args.output_dir, "train_{}".format(date_str())
)
if self._use_ray is True:
import ray
self._training_args.output_dir = ray.tune.get_trial_dir()
else:
self._training_args.output_dir = Counter.get_trial_fold_name(
local_dir, self.params, self.trial_id
)
self._training_args.eval_steps = (
self._training_args.logging_steps
) = self._training_args.saving_steps = self.checkpoint_freq
self._training_args.fp16 = self.fp16
self._training_args.no_cuda = self.no_cuda
def _preprocess(self, X, y=None, **kwargs):
from .nlp.utils import tokenize_text, is_a_list_of_str
is_str = str(X.dtypes[0]) in ("string", "str")
is_list_of_str = is_a_list_of_str(X[list(X.keys())[0]].to_list()[0])
if is_str or is_list_of_str:
return tokenize_text(
X=X,
Y=y,
task=self._task,
hf_args=self._training_args,
tokenizer=self.tokenizer,
)
else:
return X, None
def _model_init(self):
from .nlp.utils import load_model
this_model = load_model(
checkpoint_path=self._training_args.model_path,
task=self._task,
num_labels=self.num_labels,
)
return this_model
def preprocess_data(self, X, y):
from datasets import Dataset
if (self._task not in NLG_TASKS) and (self._task != TOKENCLASSIFICATION):
processed_X, _ = self._preprocess(X=X, **self._kwargs)
processed_y = y
else:
processed_X, processed_y = self._preprocess(X=X, y=y, **self._kwargs)
processed_dataset = Dataset.from_pandas(
TransformersEstimator._join(processed_X, processed_y, self._task)
)
return processed_dataset, processed_X, processed_y
@property
def num_labels(self):
from .data import SEQCLASSIFICATION, SEQREGRESSION, TOKENCLASSIFICATION
if self._task == SEQREGRESSION:
return 1
elif self._task == SEQCLASSIFICATION:
return len(set(self._y_train))
elif self._task == TOKENCLASSIFICATION:
return len(set([a for b in self._y_train.tolist() for a in b]))
else:
return None
@property
def tokenizer(self):
from transformers import AutoTokenizer
if self._task == SUMMARIZATION:
return AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=self._training_args.model_path,
cache_dir=None,
use_fast=True,
revision="main",
use_auth_token=None,
)
else:
return AutoTokenizer.from_pretrained(
self._training_args.model_path,
use_fast=True,
add_prefix_space=True
if "roberta" in self._training_args.model_path
else False,
)
@property
def data_collator(self):
from .nlp.huggingface.data_collator import task_to_datacollator_class
return (
task_to_datacollator_class[self._task](
tokenizer=self.tokenizer,
pad_to_multiple_of=8,
)
if self._task in (MULTICHOICECLASSIFICATION, TOKENCLASSIFICATION)
else None
)
def fit(
self,
X_train: DataFrame,
y_train: Series,
budget=None,
X_val=None,
y_val=None,
gpu_per_trial=None,
metric=None,
**kwargs,
):
import transformers
transformers.logging.set_verbosity_error()
from transformers import TrainerCallback
from transformers.trainer_utils import set_seed
from .nlp.huggingface.trainer import TrainerForAuto
try:
from ray.tune import is_session_enabled
self._use_ray = is_session_enabled()
except ImportError:
self._use_ray = False
this_params = self.params
self._kwargs = kwargs
self._X_train, self._y_train = X_train, y_train
self._set_training_args(**kwargs)
train_dataset, self._X_train, self._y_train = self.preprocess_data(
X_train, y_train
)
if X_val is not None:
eval_dataset, self._X_val, self._y_val = self.preprocess_data(X_val, y_val)
else:
eval_dataset, self._X_val, self._y_val = None, None, None
set_seed(self.params.get("seed", self._training_args.seed))
self._metric = metric
class EarlyStoppingCallbackForAuto(TrainerCallback):
def on_train_begin(self, args, state, control, **callback_kwargs):
self.train_begin_time = time.time()
def on_step_begin(self, args, state, control, **callback_kwargs):
self.step_begin_time = time.time()
def on_step_end(self, args, state, control, **callback_kwargs):
if state.global_step == 1:
self.time_per_iter = time.time() - self.step_begin_time
if (
budget
and (
time.time() + self.time_per_iter
> self.train_begin_time + budget
)
or state.global_step >= this_params[TransformersEstimator.ITER_HP]
):
control.should_training_stop = True
control.should_save = True
control.should_evaluate = True
return control
def on_epoch_end(self, args, state, control, **callback_kwargs):
if (
control.should_training_stop
or state.epoch + 1 >= args.num_train_epochs
):
control.should_save = True
control.should_evaluate = True
self._trainer = TrainerForAuto(
args=self._training_args,
model_init=self._model_init,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
tokenizer=self.tokenizer,
data_collator=self.data_collator,
compute_metrics=self._compute_metrics_by_dataset_name,
callbacks=[EarlyStoppingCallbackForAuto],
)
if self._task in NLG_TASKS:
setattr(self._trainer, "_is_seq2seq", True)
if gpu_per_trial is not None:
tmp_cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "")
self._trainer.args._n_gpu = gpu_per_trial
if tmp_cuda_visible_devices.count(",") != math.ceil(gpu_per_trial) - 1:
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(x) for x in range(math.ceil(gpu_per_trial))]
)
import time
start_time = time.time()
self._trainer.train()
if gpu_per_trial is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = tmp_cuda_visible_devices
self.params[self.ITER_HP] = self._trainer.state.global_step
self._checkpoint_path = self._select_checkpoint(self._trainer)
self._ckpt_remains = list(self._trainer.ckpt_to_metric.keys())
if hasattr(self._trainer, "intermediate_results"):
self.intermediate_results = [
x[1]
for x in sorted(
self._trainer.intermediate_results.items(), key=lambda x: x[0]
)
]
self._trainer = None
return time.time() - start_time
def _delete_one_ckpt(self, ckpt_location):
if self._use_ray is False:
try:
shutil.rmtree(ckpt_location)
except FileNotFoundError:
logger.warning("checkpoint {} not found".format(ckpt_location))
def cleanup(self):
super().cleanup()
if hasattr(self, "_ckpt_remains"):
for each_ckpt in self._ckpt_remains:
self._delete_one_ckpt(each_ckpt)
def _select_checkpoint(self, trainer):
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
if trainer.ckpt_to_metric:
best_ckpt, _ = min(
trainer.ckpt_to_metric.items(), key=lambda x: x[1]["eval_loss"]
)
best_ckpt_global_step = trainer.ckpt_to_global_step[best_ckpt]
for each_ckpt in list(trainer.ckpt_to_metric):
if each_ckpt != best_ckpt:
del trainer.ckpt_to_metric[each_ckpt]
del trainer.ckpt_to_global_step[each_ckpt]
self._delete_one_ckpt(each_ckpt)
else:
best_ckpt_global_step = trainer.state.global_step
best_ckpt = os.path.join(
trainer.args.output_dir,
f"{PREFIX_CHECKPOINT_DIR}-{best_ckpt_global_step}",
)
self.params[self.ITER_HP] = best_ckpt_global_step
logger.debug(trainer.state.global_step)
logger.debug(trainer.ckpt_to_global_step)
return best_ckpt
def _compute_metrics_by_dataset_name(self, eval_pred):
if isinstance(self._metric, str):
from .ml import metric_loss_score
from .nlp.utils import postprocess_text
predictions, labels = eval_pred
if self._task in NLG_TASKS:
if isinstance(predictions, tuple):
predictions = np.argmax(predictions[0], axis=2)
decoded_preds = self.tokenizer.batch_decode(
predictions, skip_special_tokens=True
)
labels = np.where(labels != -100, labels, self.tokenizer.pad_token_id)
decoded_labels = self.tokenizer.batch_decode(
labels, skip_special_tokens=True
)
predictions, labels = postprocess_text(decoded_preds, decoded_labels)
else:
predictions = (
np.squeeze(predictions)
if self._task == SEQREGRESSION
else np.argmax(predictions, axis=2)
if self._task == TOKENCLASSIFICATION
else np.argmax(predictions, axis=1)
)
metric_dict = {
"automl_metric": metric_loss_score(
metric_name=self._metric,
y_predict=predictions,
y_true=labels,
labels=self._training_args.label_list,
)
}
else:
loss, metric_dict = self._metric(
X_test=self._X_val,
y_test=self._y_val,
estimator=self,
labels=None,
X_train=self._X_train,
y_train=self._y_train,
)
metric_dict["automl_metric"] = loss
return metric_dict
def _init_model_for_predict(self):
from .nlp.huggingface.trainer import TrainerForAuto
training_args = self._TrainingArguments(
local_rank=-1, model_path=self._checkpoint_path, fp16=self.fp16
)
for key, val in self._training_args.__dict__.items():
if key not in ("local_rank", "model_path", "fp16"):
setattr(training_args, key, val)
self._training_args = training_args
new_trainer = TrainerForAuto(
model=self._model_init(),
args=self._training_args,
data_collator=self.data_collator,
compute_metrics=self._compute_metrics_by_dataset_name,
)
if self._task in NLG_TASKS:
setattr(new_trainer, "_is_seq2seq", True)
return new_trainer
def predict_proba(self, X, **pred_kwargs):
from datasets import Dataset
if pred_kwargs:
for key, val in pred_kwargs.items():
setattr(self._training_args, key, val)
assert (
self._task in CLASSIFICATION
), "predict_proba() only for classification tasks."
X_test, _ = self._preprocess(X, **self._kwargs)
test_dataset = Dataset.from_pandas(X_test)
new_trainer = self._init_model_for_predict()
predictions = new_trainer.predict(test_dataset)
return predictions.predictions
def score(self, X_val: DataFrame, y_val: Series, **kwargs):
import transformers
transformers.logging.set_verbosity_error()
self._metric = kwargs["metric"]
eval_dataset, X_val, y_val = self.preprocess_data(X_val, y_val)
new_trainer = self._init_model_for_predict()
return new_trainer.evaluate(eval_dataset)
def predict(self, X, **pred_kwargs):
import transformers
from datasets import Dataset
transformers.logging.set_verbosity_error()
if pred_kwargs:
for key, val in pred_kwargs.items():
setattr(self._training_args, key, val)
X_test, _ = self._preprocess(X, **self._kwargs)
test_dataset = Dataset.from_pandas(X_test)
new_trainer = self._init_model_for_predict()
if self._task not in NLG_TASKS:
predictions = new_trainer.predict(test_dataset)
else:
predictions = new_trainer.predict(
test_dataset,
metric_key_prefix="predict",
)
if self._task == SEQCLASSIFICATION:
return np.argmax(predictions.predictions, axis=1)
elif self._task == SEQREGRESSION:
return predictions.predictions.reshape((len(predictions.predictions),))
elif self._task == TOKENCLASSIFICATION:
return np.argmax(predictions.predictions, axis=2)
elif self._task == SUMMARIZATION:
decoded_preds = self.tokenizer.batch_decode(
predictions.predictions, skip_special_tokens=True
)
return decoded_preds
elif self._task == MULTICHOICECLASSIFICATION:
return np.argmax(predictions.predictions, axis=1)
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
params[TransformersEstimator.ITER_HP] = params.get(
TransformersEstimator.ITER_HP, sys.maxsize
)
return params
class TransformersEstimatorModelSelection(TransformersEstimator):
def __init__(self, task="seq-classification", **config):
super().__init__(task, **config)
@classmethod
def search_space(cls, data_size, task, **params):
search_space_dict = TransformersEstimator.search_space(
data_size, task, **params
)
search_space_dict["model_path"] = {
"domain": tune.choice(
[
"google/electra-base-discriminator",
"bert-base-uncased",
"roberta-base",
"facebook/muppet-roberta-base",
"google/electra-small-discriminator",
]
),
"init_value": "facebook/muppet-roberta-base",
}
return search_space_dict
class SKLearnEstimator(BaseEstimator):
def __init__(self, task="binary", **config):
super().__init__(task, **config)
def _preprocess(self, X):
if isinstance(X, DataFrame):
cat_columns = X.select_dtypes(include=["category"]).columns
if not cat_columns.empty:
X = X.copy()
X[cat_columns] = X[cat_columns].apply(lambda x: x.cat.codes)
elif isinstance(X, np.ndarray) and X.dtype.kind not in "buif":
X = DataFrame(X)
for col in X.columns:
if isinstance(X[col][0], str):
X[col] = X[col].astype("category").cat.codes
X = X.to_numpy()
return X
class LGBMEstimator(BaseEstimator):
ITER_HP = "n_estimators"
HAS_CALLBACK = True
DEFAULT_ITER = 100
@classmethod
def search_space(cls, data_size, **params):
upper = max(5, min(32768, int(data_size[0])))
return {
"n_estimators": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"num_leaves": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"min_child_samples": {
"domain": tune.lograndint(lower=2, upper=2**7 + 1),
"init_value": 20,
},
"learning_rate": {
"domain": tune.loguniform(lower=1 / 1024, upper=1.0),
"init_value": 0.1,
},
"log_max_bin": {
"domain": tune.lograndint(lower=3, upper=11),
"init_value": 8,
},
"colsample_bytree": {
"domain": tune.uniform(lower=0.01, upper=1.0),
"init_value": 1.0,
},
"reg_alpha": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1 / 1024,
},
"reg_lambda": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1.0,
},
}
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
if "log_max_bin" in params:
params["max_bin"] = (1 << params.pop("log_max_bin")) - 1
return params
@classmethod
def size(cls, config):
num_leaves = int(
round(
config.get("num_leaves")
or config.get("max_leaves")
or 1 << config.get("max_depth", 16)
)
)
n_estimators = int(round(config["n_estimators"]))
return (num_leaves * 3 + (num_leaves - 1) * 4 + 1.0) * n_estimators * 8
def __init__(self, task="binary", **config):
super().__init__(task, **config)
if "verbose" not in self.params:
self.params["verbose"] = -1
if "regression" == task:
from lightgbm import LGBMRegressor
self.estimator_class = LGBMRegressor
elif "rank" == task:
from lightgbm import LGBMRanker
self.estimator_class = LGBMRanker
else:
from lightgbm import LGBMClassifier
self.estimator_class = LGBMClassifier
self._time_per_iter = None
self._train_size = 0
self._mem_per_iter = -1
self.HAS_CALLBACK = self.HAS_CALLBACK and self._callbacks(0, 0) is not None
def _preprocess(self, X):
if (
not isinstance(X, DataFrame)
and issparse(X)
and np.issubdtype(X.dtype, np.integer)
):
X = X.astype(float)
elif isinstance(X, np.ndarray) and X.dtype.kind not in "buif":
X = DataFrame(X)
for col in X.columns:
if isinstance(X[col][0], str):
X[col] = X[col].astype("category").cat.codes
X = X.to_numpy()
return X
def fit(self, X_train, y_train, budget=None, **kwargs):
start_time = time.time()
deadline = start_time + budget if budget else np.inf
n_iter = self.params.get(self.ITER_HP, self.DEFAULT_ITER)
trained = False
if not self.HAS_CALLBACK:
mem0 = psutil.virtual_memory().available if psutil is not None else 1
if (
(
not self._time_per_iter
or abs(self._train_size - X_train.shape[0]) > 4
)
and budget is not None
or self._mem_per_iter < 0
and psutil is not None
) and n_iter > 1:
self.params[self.ITER_HP] = 1
self._t1 = self._fit(X_train, y_train, **kwargs)
if budget is not None and self._t1 >= budget or n_iter == 1:
return self._t1
mem1 = psutil.virtual_memory().available if psutil is not None else 1
self._mem1 = mem0 - mem1
self.params[self.ITER_HP] = min(n_iter, 4)
self._t2 = self._fit(X_train, y_train, **kwargs)
mem2 = psutil.virtual_memory().available if psutil is not None else 1
self._mem2 = max(mem0 - mem2, self._mem1)
self._mem_per_iter = min(
self._mem1, self._mem2 / self.params[self.ITER_HP]
)
self._time_per_iter = (
(self._t2 - self._t1) / (self.params[self.ITER_HP] - 1)
if self._t2 > self._t1
else self._t1
if self._t1
else 0.001
)
self._train_size = X_train.shape[0]
if (
budget is not None
and self._t1 + self._t2 >= budget
or n_iter == self.params[self.ITER_HP]
):
return time.time() - start_time
trained = True
if n_iter > 1:
max_iter = min(
n_iter,
int(
(budget - time.time() + start_time - self._t1)
/ self._time_per_iter
+ 1
)
if budget is not None
else n_iter,
int((1 - FREE_MEM_RATIO) * mem0 / self._mem_per_iter)
if psutil is not None and self._mem_per_iter > 0
else n_iter,
)
if trained and max_iter <= self.params[self.ITER_HP]:
return time.time() - start_time
self.params[self.ITER_HP] = max(max_iter, 1)
if self.HAS_CALLBACK:
kwargs_callbacks = kwargs.get("callbacks")
if kwargs_callbacks:
callbacks = kwargs_callbacks + self._callbacks(start_time, deadline)
kwargs.pop("callbacks")
else:
callbacks = self._callbacks(start_time, deadline)
if isinstance(self, XGBoostSklearnEstimator):
from xgboost import __version__
if __version__ >= "1.6.0":
self.params["callbacks"] = callbacks
callbacks = None
self._fit(
X_train,
y_train,
callbacks=callbacks,
**kwargs,
)
if callbacks is None:
# for xgboost>=1.6.0, pop callbacks to enable pickle
callbacks = self.params.pop("callbacks")
self._model.set_params(callbacks=callbacks[:-1])
best_iteration = (
self._model.get_booster().best_iteration
if isinstance(self, XGBoostSklearnEstimator)
else self._model.best_iteration_
)
if best_iteration is not None:
self._model.set_params(n_estimators=best_iteration + 1)
else:
self._fit(X_train, y_train, **kwargs)
train_time = time.time() - start_time
return train_time
def _callbacks(self, start_time, deadline) -> List[Callable]:
return [partial(self._callback, start_time, deadline)]
def _callback(self, start_time, deadline, env) -> None:
from lightgbm.callback import EarlyStopException
now = time.time()
if env.iteration == 0:
self._time_per_iter = now - start_time
if now + self._time_per_iter > deadline:
raise EarlyStopException(env.iteration, env.evaluation_result_list)
if psutil is not None:
mem = psutil.virtual_memory()
if mem.available / mem.total < FREE_MEM_RATIO:
raise EarlyStopException(env.iteration, env.evaluation_result_list)
class XGBoostEstimator(SKLearnEstimator):
DEFAULT_ITER = 10
@classmethod
def search_space(cls, data_size, **params):
upper = max(5, min(32768, int(data_size[0]))) # upper must be larger than lower
return {
"n_estimators": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"max_leaves": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"max_depth": {
"domain": tune.choice([0, 6, 12]),
"init_value": 0,
},
"min_child_weight": {
"domain": tune.loguniform(lower=0.001, upper=128),
"init_value": 1.0,
},
"learning_rate": {
"domain": tune.loguniform(lower=1 / 1024, upper=1.0),
"init_value": 0.1,
},
"subsample": {
"domain": tune.uniform(lower=0.1, upper=1.0),
"init_value": 1.0,
},
"colsample_bylevel": {
"domain": tune.uniform(lower=0.01, upper=1.0),
"init_value": 1.0,
},
"colsample_bytree": {
"domain": tune.uniform(lower=0.01, upper=1.0),
"init_value": 1.0,
},
"reg_alpha": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1 / 1024,
},
"reg_lambda": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1.0,
},
}
@classmethod
def size(cls, config):
return LGBMEstimator.size(config)
@classmethod
def cost_relative2lgbm(cls):
return 1.6
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
max_depth = params["max_depth"] = params.get("max_depth", 0)
if max_depth == 0:
params["grow_policy"] = params.get("grow_policy", "lossguide")
params["tree_method"] = params.get("tree_method", "hist")
# params["booster"] = params.get("booster", "gbtree")
params["use_label_encoder"] = params.get("use_label_encoder", False)
if "n_jobs" in config:
params["nthread"] = params.pop("n_jobs")
return params
def __init__(
self,
task="regression",
**config,
):
super().__init__(task, **config)
self.params["verbosity"] = 0
def fit(self, X_train, y_train, budget=None, **kwargs):
import xgboost as xgb
start_time = time.time()
deadline = start_time + budget if budget else np.inf
if issparse(X_train):
if xgb.__version__ < "1.6.0":
# "auto" fails for sparse input since xgboost 1.6.0
self.params["tree_method"] = "auto"
else:
X_train = self._preprocess(X_train)
if "sample_weight" in kwargs:
dtrain = xgb.DMatrix(X_train, label=y_train, weight=kwargs["sample_weight"])
else:
dtrain = xgb.DMatrix(X_train, label=y_train)
objective = self.params.get("objective")
if isinstance(objective, str):
obj = None
else:
obj = objective
if "objective" in self.params:
del self.params["objective"]
_n_estimators = self.params.pop("n_estimators")
callbacks = XGBoostEstimator._callbacks(start_time, deadline)
if callbacks:
self._model = xgb.train(
self.params,
dtrain,
_n_estimators,
obj=obj,
callbacks=callbacks,
)
self.params["n_estimators"] = self._model.best_iteration + 1
else:
self._model = xgb.train(self.params, dtrain, _n_estimators, obj=obj)
self.params["n_estimators"] = _n_estimators
self.params["objective"] = objective
del dtrain
train_time = time.time() - start_time
return train_time
def predict(self, X, **kwargs):
import xgboost as xgb
if not issparse(X):
X = self._preprocess(X)
dtest = xgb.DMatrix(X)
return super().predict(dtest)
@classmethod
def _callbacks(cls, start_time, deadline):
try:
from xgboost.callback import TrainingCallback
except ImportError: # for xgboost<1.3
return None
class ResourceLimit(TrainingCallback):
def after_iteration(self, model, epoch, evals_log) -> bool:
now = time.time()
if epoch == 0:
self._time_per_iter = now - start_time
if now + self._time_per_iter > deadline:
return True
if psutil is not None:
mem = psutil.virtual_memory()
if mem.available / mem.total < FREE_MEM_RATIO:
return True
return False
return [ResourceLimit()]
class XGBoostSklearnEstimator(SKLearnEstimator, LGBMEstimator):
DEFAULT_ITER = 10
@classmethod
def search_space(cls, data_size, **params):
space = XGBoostEstimator.search_space(data_size)
space.pop("max_depth")
return space
@classmethod
def cost_relative2lgbm(cls):
return XGBoostEstimator.cost_relative2lgbm()
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
max_depth = params["max_depth"] = params.get("max_depth", 0)
if max_depth == 0:
params["grow_policy"] = params.get("grow_policy", "lossguide")
params["tree_method"] = params.get("tree_method", "hist")
params["use_label_encoder"] = params.get("use_label_encoder", False)
return params
def __init__(
self,
task="binary",
**config,
):
super().__init__(task, **config)
del self.params["verbose"]
self.params["verbosity"] = 0
import xgboost as xgb
self.estimator_class = xgb.XGBRegressor
if "rank" == task:
self.estimator_class = xgb.XGBRanker
elif task in CLASSIFICATION:
self.estimator_class = xgb.XGBClassifier
self._xgb_version = xgb.__version__
def fit(self, X_train, y_train, budget=None, **kwargs):
if issparse(X_train) and self._xgb_version < "1.6.0":
# "auto" fails for sparse input since xgboost 1.6.0
self.params["tree_method"] = "auto"
if kwargs.get("gpu_per_trial"):
self.params["tree_method"] = "gpu_hist"
kwargs.pop("gpu_per_trial")
return super().fit(X_train, y_train, budget, **kwargs)
def _callbacks(self, start_time, deadline) -> List[Callable]:
return XGBoostEstimator._callbacks(start_time, deadline)
class XGBoostLimitDepthEstimator(XGBoostSklearnEstimator):
@classmethod
def search_space(cls, data_size, **params):
space = XGBoostEstimator.search_space(data_size)
space.pop("max_leaves")
upper = max(6, int(np.log2(data_size[0])))
space["max_depth"] = {
"domain": tune.randint(lower=1, upper=min(upper, 16)),
"init_value": 6,
"low_cost_init_value": 1,
}
space["learning_rate"]["init_value"] = 0.3
space["n_estimators"]["init_value"] = 10
return space
@classmethod
def cost_relative2lgbm(cls):
return 64
class RandomForestEstimator(SKLearnEstimator, LGBMEstimator):
HAS_CALLBACK = False
nrows = 101
@classmethod
def search_space(cls, data_size, task, **params):
RandomForestEstimator.nrows = int(data_size[0])
upper = min(2048, RandomForestEstimator.nrows)
init = 1 / np.sqrt(data_size[1]) if task in CLASSIFICATION else 1
lower = min(0.1, init)
space = {
"n_estimators": {
"domain": tune.lograndint(lower=4, upper=max(5, upper)),
"init_value": 4,
"low_cost_init_value": 4,
},
"max_features": {
"domain": tune.loguniform(lower=lower, upper=1.0),
"init_value": init,
},
"max_leaves": {
"domain": tune.lograndint(
lower=4,
upper=max(5, min(32768, RandomForestEstimator.nrows >> 1)), #
),
"init_value": 4,
"low_cost_init_value": 4,
},
}
if task in CLASSIFICATION:
space["criterion"] = {
"domain": tune.choice(["gini", "entropy"]),
# "init_value": "gini",
}
return space
@classmethod
def cost_relative2lgbm(cls):
return 2
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
if "max_leaves" in params:
params["max_leaf_nodes"] = params.get(
"max_leaf_nodes", params.pop("max_leaves")
)
if self._task not in CLASSIFICATION and "criterion" in config:
params.pop("criterion")
return params
def __init__(
self,
task="binary",
**params,
):
super().__init__(task, **params)
self.params["verbose"] = 0
self.estimator_class = RandomForestRegressor
if task in CLASSIFICATION:
self.estimator_class = RandomForestClassifier
class ExtraTreesEstimator(RandomForestEstimator):
@classmethod
def cost_relative2lgbm(cls):
return 1.9
def __init__(self, task="binary", **params):
super().__init__(task, **params)
if "regression" in task:
self.estimator_class = ExtraTreesRegressor
else:
self.estimator_class = ExtraTreesClassifier
class LRL1Classifier(SKLearnEstimator):
@classmethod
def search_space(cls, **params):
return {
"C": {
"domain": tune.loguniform(lower=0.03125, upper=32768.0),
"init_value": 1.0,
},
}
@classmethod
def cost_relative2lgbm(cls):
return 160
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
params["tol"] = params.get("tol", 0.0001)
params["solver"] = params.get("solver", "saga")
params["penalty"] = params.get("penalty", "l1")
return params
def __init__(self, task="binary", **config):
super().__init__(task, **config)
assert task in CLASSIFICATION, "LogisticRegression for classification task only"
self.estimator_class = LogisticRegression
class LRL2Classifier(SKLearnEstimator):
limit_resource = True
@classmethod
def search_space(cls, **params):
return LRL1Classifier.search_space(**params)
@classmethod
def cost_relative2lgbm(cls):
return 25
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
params["tol"] = params.get("tol", 0.0001)
params["solver"] = params.get("solver", "lbfgs")
params["penalty"] = params.get("penalty", "l2")
return params
def __init__(self, task="binary", **config):
super().__init__(task, **config)
assert task in CLASSIFICATION, "LogisticRegression for classification task only"
self.estimator_class = LogisticRegression
class CatBoostEstimator(BaseEstimator):
ITER_HP = "n_estimators"
DEFAULT_ITER = 1000
@classmethod
def search_space(cls, data_size, **params):
upper = max(min(round(1500000 / data_size[0]), 150), 12)
return {
"early_stopping_rounds": {
"domain": tune.lograndint(lower=10, upper=upper),
"init_value": 10,
"low_cost_init_value": 10,
},
"learning_rate": {
"domain": tune.loguniform(lower=0.005, upper=0.2),
"init_value": 0.1,
},
"n_estimators": {
"domain": 8192,
"init_value": 8192,
},
}
@classmethod
def size(cls, config):
n_estimators = config.get("n_estimators", 8192)
max_leaves = 64
return (max_leaves * 3 + (max_leaves - 1) * 4 + 1.0) * n_estimators * 8
@classmethod
def cost_relative2lgbm(cls):
return 15
def _preprocess(self, X):
if isinstance(X, DataFrame):
cat_columns = X.select_dtypes(include=["category"]).columns
if not cat_columns.empty:
X = X.copy()
X[cat_columns] = X[cat_columns].apply(
lambda x: x.cat.rename_categories(
[
str(c) if isinstance(c, float) else c
for c in x.cat.categories
]
)
)
elif isinstance(X, np.ndarray) and X.dtype.kind not in "buif":
# numpy array is not of numeric dtype
X = DataFrame(X)
for col in X.columns:
if isinstance(X[col][0], str):
X[col] = X[col].astype("category").cat.codes
X = X.to_numpy()
return X
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
params["n_estimators"] = params.get("n_estimators", 8192)
if "n_jobs" in params:
params["thread_count"] = params.pop("n_jobs")
return params
def __init__(
self,
task="binary",
**config,
):
super().__init__(task, **config)
self.params.update(
{
"verbose": config.get("verbose", False),
"random_seed": config.get("random_seed", 10242048),
}
)
from catboost import CatBoostRegressor
self.estimator_class = CatBoostRegressor
if task in CLASSIFICATION:
from catboost import CatBoostClassifier
self.estimator_class = CatBoostClassifier
def fit(self, X_train, y_train, budget=None, **kwargs):
start_time = time.time()
deadline = start_time + budget if budget else np.inf
train_dir = f"catboost_{str(start_time)}"
X_train = self._preprocess(X_train)
if isinstance(X_train, DataFrame):
cat_features = list(X_train.select_dtypes(include="category").columns)
else:
cat_features = []
n = max(int(len(y_train) * 0.9), len(y_train) - 1000)
X_tr, y_tr = X_train[:n], y_train[:n]
if "sample_weight" in kwargs:
weight = kwargs["sample_weight"]
if weight is not None:
kwargs["sample_weight"] = weight[:n]
else:
weight = None
from catboost import Pool, __version__
model = self.estimator_class(train_dir=train_dir, **self.params)
if __version__ >= "0.26":
model.fit(
X_tr,
y_tr,
cat_features=cat_features,
eval_set=Pool(
data=X_train[n:], label=y_train[n:], cat_features=cat_features
),
callbacks=CatBoostEstimator._callbacks(start_time, deadline),
**kwargs,
)
else:
model.fit(
X_tr,
y_tr,
cat_features=cat_features,
eval_set=Pool(
data=X_train[n:], label=y_train[n:], cat_features=cat_features
),
**kwargs,
)
shutil.rmtree(train_dir, ignore_errors=True)
if weight is not None:
kwargs["sample_weight"] = weight
self._model = model
self.params[self.ITER_HP] = self._model.tree_count_
train_time = time.time() - start_time
return train_time
@classmethod
def _callbacks(cls, start_time, deadline):
class ResourceLimit:
def after_iteration(self, info) -> bool:
now = time.time()
if info.iteration == 1:
self._time_per_iter = now - start_time
if now + self._time_per_iter > deadline:
return False
if psutil is not None:
mem = psutil.virtual_memory()
if mem.available / mem.total < FREE_MEM_RATIO:
return False
return True # can continue
return [ResourceLimit()]
class KNeighborsEstimator(BaseEstimator):
@classmethod
def search_space(cls, data_size, **params):
upper = min(512, int(data_size[0] / 2))
return {
"n_neighbors": {
"domain": tune.lograndint(lower=1, upper=max(2, upper)),
"init_value": 5,
"low_cost_init_value": 1,
},
}
@classmethod
def cost_relative2lgbm(cls):
return 30
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
params["weights"] = params.get("weights", "distance")
return params
def __init__(self, task="binary", **config):
super().__init__(task, **config)
if task in CLASSIFICATION:
from sklearn.neighbors import KNeighborsClassifier
self.estimator_class = KNeighborsClassifier
else:
from sklearn.neighbors import KNeighborsRegressor
self.estimator_class = KNeighborsRegressor
def _preprocess(self, X):
if isinstance(X, DataFrame):
cat_columns = X.select_dtypes(["category"]).columns
if X.shape[1] == len(cat_columns):
raise ValueError("kneighbor requires at least one numeric feature")
X = X.drop(cat_columns, axis=1)
elif isinstance(X, np.ndarray) and X.dtype.kind not in "buif":
# drop categocial columns if any
X = DataFrame(X)
cat_columns = []
for col in X.columns:
if isinstance(X[col][0], str):
cat_columns.append(col)
X = X.drop(cat_columns, axis=1)
X = X.to_numpy()
return X
class Prophet(SKLearnEstimator):
@classmethod
def search_space(cls, **params):
space = {
"changepoint_prior_scale": {
"domain": tune.loguniform(lower=0.001, upper=0.05),
"init_value": 0.05,
"low_cost_init_value": 0.001,
},
"seasonality_prior_scale": {
"domain": tune.loguniform(lower=0.01, upper=10),
"init_value": 10,
},
"holidays_prior_scale": {
"domain": tune.loguniform(lower=0.01, upper=10),
"init_value": 10,
},
"seasonality_mode": {
"domain": tune.choice(["additive", "multiplicative"]),
"init_value": "multiplicative",
},
}
return space
def __init__(self, task="ts_forecast", n_jobs=1, **params):
super().__init__(task, **params)
def _join(self, X_train, y_train):
assert TS_TIMESTAMP_COL in X_train, (
"Dataframe for training ts_forecast model must have column"
f' "{TS_TIMESTAMP_COL}" with the dates in X_train.'
)
y_train = DataFrame(y_train, columns=[TS_VALUE_COL])
train_df = X_train.join(y_train)
return train_df
def fit(self, X_train, y_train, budget=None, **kwargs):
from prophet import Prophet
current_time = time.time()
train_df = self._join(X_train, y_train)
train_df = self._preprocess(train_df)
cols = list(train_df)
cols.remove(TS_TIMESTAMP_COL)
cols.remove(TS_VALUE_COL)
logging.getLogger("prophet").setLevel(logging.WARNING)
model = Prophet(**self.params)
for regressor in cols:
model.add_regressor(regressor)
with suppress_stdout_stderr():
model.fit(train_df)
train_time = time.time() - current_time
self._model = model
return train_time
def predict(self, X, **kwargs):
if isinstance(X, int):
raise ValueError(
"predict() with steps is only supported for arima/sarimax."
" For Prophet, pass a dataframe with the first column containing"
" the timestamp values."
)
if self._model is not None:
X = self._preprocess(X)
forecast = self._model.predict(X)
return forecast["yhat"]
else:
logger.warning(
"Estimator is not fit yet. Please run fit() before predict()."
)
return np.ones(X.shape[0])
def score(self, X_val: DataFrame, y_val: Series, **kwargs):
from sklearn.metrics import r2_score
from .ml import metric_loss_score
y_pred = self.predict(X_val)
self._metric = kwargs.get("metric", None)
if self._metric:
return metric_loss_score(self._metric, y_pred, y_val)
else:
return r2_score(y_pred, y_val)
class ARIMA(Prophet):
@classmethod
def search_space(cls, **params):
space = {
"p": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 2,
"low_cost_init_value": 0,
},
"d": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 2,
"low_cost_init_value": 0,
},
"q": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 1,
"low_cost_init_value": 0,
},
}
return space
def _join(self, X_train, y_train):
train_df = super()._join(X_train, y_train)
train_df.index = to_datetime(train_df[TS_TIMESTAMP_COL])
train_df = train_df.drop(TS_TIMESTAMP_COL, axis=1)
return train_df
def fit(self, X_train, y_train, budget=None, **kwargs):
import warnings
warnings.filterwarnings("ignore")
from statsmodels.tsa.arima.model import ARIMA as ARIMA_estimator
current_time = time.time()
train_df = self._join(X_train, y_train)
train_df = self._preprocess(train_df)
regressors = list(train_df)
regressors.remove(TS_VALUE_COL)
if regressors:
model = ARIMA_estimator(
train_df[[TS_VALUE_COL]],
exog=train_df[regressors],
order=(self.params["p"], self.params["d"], self.params["q"]),
enforce_stationarity=False,
enforce_invertibility=False,
)
else:
model = ARIMA_estimator(
train_df,
order=(self.params["p"], self.params["d"], self.params["q"]),
enforce_stationarity=False,
enforce_invertibility=False,
)
with suppress_stdout_stderr():
model = model.fit()
train_time = time.time() - current_time
self._model = model
return train_time
def predict(self, X, **kwargs):
if self._model is not None:
if isinstance(X, int):
forecast = self._model.forecast(steps=X)
elif isinstance(X, DataFrame):
start = X[TS_TIMESTAMP_COL].iloc[0]
end = X[TS_TIMESTAMP_COL].iloc[-1]
if len(X.columns) > 1:
X = self._preprocess(X.drop(columns=TS_TIMESTAMP_COL))
regressors = list(X)
forecast = self._model.predict(
start=start, end=end, exog=X[regressors]
)
else:
forecast = self._model.predict(start=start, end=end)
else:
raise ValueError(
"X needs to be either a pandas Dataframe with dates as the first column"
" or an int number of periods for predict()."
)
return forecast
else:
return np.ones(X if isinstance(X, int) else X.shape[0])
class SARIMAX(ARIMA):
@classmethod
def search_space(cls, **params):
space = {
"p": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 2,
"low_cost_init_value": 0,
},
"d": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 2,
"low_cost_init_value": 0,
},
"q": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 1,
"low_cost_init_value": 0,
},
"P": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 1,
"low_cost_init_value": 0,
},
"D": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 1,
"low_cost_init_value": 0,
},
"Q": {
"domain": tune.qrandint(lower=0, upper=10, q=1),
"init_value": 1,
"low_cost_init_value": 0,
},
"s": {
"domain": tune.choice([1, 4, 6, 12]),
"init_value": 12,
},
}
return space
def fit(self, X_train, y_train, budget=None, **kwargs):
import warnings
warnings.filterwarnings("ignore")
from statsmodels.tsa.statespace.sarimax import SARIMAX as SARIMAX_estimator
current_time = time.time()
train_df = self._join(X_train, y_train)
train_df = self._preprocess(train_df)
regressors = list(train_df)
regressors.remove(TS_VALUE_COL)
if regressors:
model = SARIMAX_estimator(
train_df[[TS_VALUE_COL]],
exog=train_df[regressors],
order=(self.params["p"], self.params["d"], self.params["q"]),
seasonality_order=(
self.params["P"],
self.params["D"],
self.params["Q"],
self.params["s"],
),
enforce_stationarity=False,
enforce_invertibility=False,
)
else:
model = SARIMAX_estimator(
train_df,
order=(self.params["p"], self.params["d"], self.params["q"]),
seasonality_order=(
self.params["P"],
self.params["D"],
self.params["Q"],
self.params["s"],
),
enforce_stationarity=False,
enforce_invertibility=False,
)
with suppress_stdout_stderr():
model = model.fit()
train_time = time.time() - current_time
self._model = model
return train_time
class TS_SKLearn(SKLearnEstimator):
base_class = SKLearnEstimator
@classmethod
def search_space(cls, data_size, pred_horizon, **params):
space = cls.base_class.search_space(data_size, **params)
space.update(
{
"optimize_for_horizon": {
"domain": tune.choice([True, False]),
"init_value": False,
"low_cost_init_value": False,
},
"lags": {
"domain": tune.randint(
lower=1, upper=max(2, int(np.sqrt(data_size[0])))
),
"init_value": 3,
},
}
)
return space
def __init__(self, task="ts_forecast", **params):
super().__init__(task, **params)
self.hcrystaball_model = None
self.ts_task = (
"regression" if task in TS_FORECASTREGRESSION else "classification"
)
def transform_X(self, X):
cols = list(X)
if len(cols) == 1:
ds_col = cols[0]
X = DataFrame(index=X[ds_col])
elif len(cols) > 1:
ds_col = cols[0]
exog_cols = cols[1:]
X = X[exog_cols].set_index(X[ds_col])
return X
def _fit(self, X_train, y_train, budget=None, **kwargs):
from hcrystalball.wrappers import get_sklearn_wrapper
X_train = self.transform_X(X_train)
X_train = self._preprocess(X_train)
params = self.params.copy()
lags = params.pop("lags")
optimize_for_horizon = params.pop("optimize_for_horizon")
estimator = self.base_class(task=self.ts_task, **params)
self.hcrystaball_model = get_sklearn_wrapper(estimator.estimator_class)
self.hcrystaball_model.lags = int(lags)
self.hcrystaball_model.fit(X_train, y_train)
if optimize_for_horizon:
# Direct Multi-step Forecast Strategy - fit a seperate model for each horizon
model_list = []
for i in range(1, kwargs["period"] + 1):
(
X_fit,
y_fit,
) = self.hcrystaball_model._transform_data_to_tsmodel_input_format(
X_train, y_train, i
)
self.hcrystaball_model.model.set_params(**estimator.params)
model = self.hcrystaball_model.model.fit(X_fit, y_fit)
model_list.append(model)
self._model = model_list
else:
(
X_fit,
y_fit,
) = self.hcrystaball_model._transform_data_to_tsmodel_input_format(
X_train, y_train, kwargs["period"]
)
self.hcrystaball_model.model.set_params(**estimator.params)
model = self.hcrystaball_model.model.fit(X_fit, y_fit)
self._model = model
def fit(self, X_train, y_train, budget=None, **kwargs):
current_time = time.time()
self._fit(X_train, y_train, budget=budget, **kwargs)
train_time = time.time() - current_time
return train_time
def predict(self, X, **kwargs):
if self._model is not None:
X = self.transform_X(X)
X = self._preprocess(X)
if isinstance(self._model, list):
assert len(self._model) == len(
X
), "Model is optimized for horizon, length of X must be equal to `period`."
preds = []
for i in range(1, len(self._model) + 1):
(
X_pred,
_,
) = self.hcrystaball_model._transform_data_to_tsmodel_input_format(
X.iloc[:i, :]
)
preds.append(self._model[i - 1].predict(X_pred)[-1])
forecast = DataFrame(
data=np.asarray(preds).reshape(-1, 1),
columns=[self.hcrystaball_model.name],
index=X.index,
)
else:
(
X_pred,
_,
) = self.hcrystaball_model._transform_data_to_tsmodel_input_format(X)
forecast = self._model.predict(X_pred)
return forecast
else:
logger.warning(
"Estimator is not fit yet. Please run fit() before predict()."
)
return np.ones(X.shape[0])
class LGBM_TS(TS_SKLearn):
base_class = LGBMEstimator
class XGBoost_TS(TS_SKLearn):
base_class = XGBoostSklearnEstimator
# catboost regressor is invalid because it has a `name` parameter, making it incompatible with hcrystalball
# class CatBoost_TS_Regressor(TS_Regressor):
# base_class = CatBoostEstimator
class RF_TS(TS_SKLearn):
base_class = RandomForestEstimator
class ExtraTrees_TS(TS_SKLearn):
base_class = ExtraTreesEstimator
class XGBoostLimitDepth_TS(TS_SKLearn):
base_class = XGBoostLimitDepthEstimator
class suppress_stdout_stderr(object):
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = (os.dup(1), os.dup(2))
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
os.close(self.null_fds[0])
os.close(self.null_fds[1])
| true | true |
f731890bb291e3decda86f604a2ae15b9cc3c857 | 3,892 | py | Python | fishpi/ui/camera_view.py | FishPi/FishPi-POCV---Command---Control | 6df8e9db29c1b4769ddedb3a89a21fadae260709 | [
"BSD-2-Clause"
] | 18 | 2015-01-17T17:03:07.000Z | 2020-10-17T06:38:26.000Z | fishpi/ui/camera_view.py | FishPi/FishPi-POCV---Command---Control | 6df8e9db29c1b4769ddedb3a89a21fadae260709 | [
"BSD-2-Clause"
] | null | null | null | fishpi/ui/camera_view.py | FishPi/FishPi-POCV---Command---Control | 6df8e9db29c1b4769ddedb3a89a21fadae260709 | [
"BSD-2-Clause"
] | 9 | 2015-02-14T01:42:46.000Z | 2019-08-26T20:24:36.000Z | #!/usr/bin/python
#
# FishPi - An autonomous drop in the ocean
#
# Simple viewer for onboard camera
#
import argparse
import io
import sys
import socket
import struct
# from StringIO import StringIO
import wx
class CameraPanel(wx.Panel):
def __init__(self, parent, server, port=8001, enabled=True):
wx.Panel.__init__(self, parent,
size=(320, 240), style=wx.SUNKEN_BORDER)
self.enabled = enabled
if self.enabled:
self.client_socket = socket.socket()
self.client_socket.connect((server, port))
# Make a file-like object out of the connection
self.connection = self.client_socket.makefile('wb')
self.update()
def update(self):
""" Update panel with new image. """
if self.enabled:
try:
# Read the length of the image as a 32-bit unsigned int. If the
# length is zero, quit
image_len = struct.unpack('<L', self.connection.read(4))[0]
if not image_len:
print "Could not read image length. Skipping.."
self.enabled = False
return
print("Image length: %s" % image_len)
# Construct a stream to hold the image data and read the image
# data from the connection
image_stream = io.BytesIO()
image_stream.write(self.connection.read(image_len))
# Rewind the stream, open it as an image with PIL and do some
# processing on it
image_stream.seek(0)
# this part is from the previous version of this.
# let's see how it integrates with the new code
# img = wx.ImageFromStream(StringIO(image_stream))
img = wx.ImageFromStream(image_stream)
bmp = wx.BitmapFromImage(img)
ctrl = wx.StaticBitmap(self, -1, bmp)
# image = Image.open(image_stream)
# print('Image is %dx%d' % image.size)
# image.verify()
# print('Image is verified')
except Exception, e:
print("Exception: %s, closing client" % e)
self.shut_down()
def shut_down(self):
self.connection.close()
self.client_socket.close()
class CameraViewer(wx.Frame):
""" Simple Frame containing CameraPanel and timer callback. """
def __init__(self, parent, title, server, port):
# initialise frame
wx.Frame.__init__(self, parent, title=title, size=(320, 240))
# add camera frame
print "using camera at %s:%s" % (server, port)
self.camera_frame = CameraPanel(self, server, port)
# setup callback timer
interval_time = 250
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_timer, self.timer)
self.timer.Start(interval_time, False)
# and go
self.Show()
def on_timer(self, event):
self.camera_frame.update()
def main():
""" Main entry point for application.
Parses command line args for server and launches wx. """
# parse cmd line args
parser = argparse.ArgumentParser(description="raspberry pi - onboard view")
parser.add_argument("-server", help="server for camera stream",
default="raspberrypi.local", type=str, action='store')
parser.add_argument("-port", help="port for camera stream",
default=8000, type=int, action='store')
selected_args = parser.parse_args()
server = selected_args.server
port = selected_args.port
# run UI loop
app = wx.App(False)
frame = CameraViewer(None, "raspberry pi - onboard view", server, port)
app.MainLoop()
frame.shut_down()
return 0
if __name__ == "__main__":
status = main()
sys.exit(status)
| 34.442478 | 79 | 0.591213 |
import argparse
import io
import sys
import socket
import struct
import wx
class CameraPanel(wx.Panel):
def __init__(self, parent, server, port=8001, enabled=True):
wx.Panel.__init__(self, parent,
size=(320, 240), style=wx.SUNKEN_BORDER)
self.enabled = enabled
if self.enabled:
self.client_socket = socket.socket()
self.client_socket.connect((server, port))
self.connection = self.client_socket.makefile('wb')
self.update()
def update(self):
""" Update panel with new image. """
if self.enabled:
try:
image_len = struct.unpack('<L', self.connection.read(4))[0]
if not image_len:
print "Could not read image length. Skipping.."
self.enabled = False
return
print("Image length: %s" % image_len)
image_stream = io.BytesIO()
image_stream.write(self.connection.read(image_len))
image_stream.seek(0)
# img = wx.ImageFromStream(StringIO(image_stream))
img = wx.ImageFromStream(image_stream)
bmp = wx.BitmapFromImage(img)
ctrl = wx.StaticBitmap(self, -1, bmp)
# image = Image.open(image_stream)
# print('Image is %dx%d' % image.size)
# image.verify()
# print('Image is verified')
except Exception, e:
print("Exception: %s, closing client" % e)
self.shut_down()
def shut_down(self):
self.connection.close()
self.client_socket.close()
class CameraViewer(wx.Frame):
""" Simple Frame containing CameraPanel and timer callback. """
def __init__(self, parent, title, server, port):
# initialise frame
wx.Frame.__init__(self, parent, title=title, size=(320, 240))
# add camera frame
print "using camera at %s:%s" % (server, port)
self.camera_frame = CameraPanel(self, server, port)
# setup callback timer
interval_time = 250
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_timer, self.timer)
self.timer.Start(interval_time, False)
# and go
self.Show()
def on_timer(self, event):
self.camera_frame.update()
def main():
""" Main entry point for application.
Parses command line args for server and launches wx. """
# parse cmd line args
parser = argparse.ArgumentParser(description="raspberry pi - onboard view")
parser.add_argument("-server", help="server for camera stream",
default="raspberrypi.local", type=str, action='store')
parser.add_argument("-port", help="port for camera stream",
default=8000, type=int, action='store')
selected_args = parser.parse_args()
server = selected_args.server
port = selected_args.port
# run UI loop
app = wx.App(False)
frame = CameraViewer(None, "raspberry pi - onboard view", server, port)
app.MainLoop()
frame.shut_down()
return 0
if __name__ == "__main__":
status = main()
sys.exit(status)
| false | true |
f7318945e70a68298c48631812fa81322c407bc9 | 12,916 | py | Python | env/lib/python3.6/site-packages/nibabel/tests/test_image_load_save.py | Raniac/NEURO-LEARN | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | env/lib/python3.6/site-packages/nibabel/tests/test_image_load_save.py | Raniac/neurolearn_dev | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | env/lib/python3.6/site-packages/nibabel/tests/test_image_load_save.py | Raniac/NEURO-LEARN | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 1 | 2020-07-17T12:49:49.000Z | 2020-07-17T12:49:49.000Z | # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
''' Tests for loader function '''
from __future__ import division, print_function, absolute_import
from io import BytesIO
import shutil
from os.path import dirname, join as pjoin
from tempfile import mkdtemp
import numpy as np
from .. import analyze as ana
from .. import spm99analyze as spm99
from .. import spm2analyze as spm2
from .. import nifti1 as ni1
from .. import loadsave as nils
from .. import (Nifti1Image, Nifti1Header, Nifti1Pair, Nifti2Image, Nifti2Pair,
Minc1Image, Minc2Image, Spm2AnalyzeImage, Spm99AnalyzeImage,
AnalyzeImage, MGHImage, all_image_classes)
from ..tmpdirs import InTemporaryDirectory
from ..volumeutils import native_code, swapped_code
from ..optpkg import optional_package
from ..spatialimages import SpatialImage
from numpy.testing import assert_array_equal, assert_array_almost_equal
from nose.tools import assert_true, assert_equal, assert_raises
_, have_scipy, _ = optional_package('scipy') # No scipy=>no SPM-format writing
DATA_PATH = pjoin(dirname(__file__), 'data')
MGH_DATA_PATH = pjoin(dirname(__file__), '..', 'freesurfer', 'tests', 'data')
def round_trip(img):
# round trip a nifti single
sio = BytesIO()
img.file_map['image'].fileobj = sio
img.to_file_map()
img2 = Nifti1Image.from_file_map(img.file_map)
return img2
def test_conversion_spatialimages():
shape = (2, 4, 6)
affine = np.diag([1, 2, 3, 1])
klasses = [klass for klass in all_image_classes
if klass.rw and issubclass(klass, SpatialImage)]
for npt in np.float32, np.int16:
data = np.arange(np.prod(shape), dtype=npt).reshape(shape)
for r_class in klasses:
if not r_class.makeable:
continue
img = r_class(data, affine)
img.set_data_dtype(npt)
for w_class in klasses:
if not w_class.makeable:
continue
img2 = w_class.from_image(img)
assert_array_equal(img2.get_data(), data)
assert_array_equal(img2.affine, affine)
def test_save_load_endian():
shape = (2, 4, 6)
affine = np.diag([1, 2, 3, 1])
data = np.arange(np.prod(shape), dtype='f4').reshape(shape)
# Native endian image
img = Nifti1Image(data, affine)
assert_equal(img.header.endianness, native_code)
img2 = round_trip(img)
assert_equal(img2.header.endianness, native_code)
assert_array_equal(img2.get_data(), data)
# byte swapped endian image
bs_hdr = img.header.as_byteswapped()
bs_img = Nifti1Image(data, affine, bs_hdr)
assert_equal(bs_img.header.endianness, swapped_code)
# of course the data is the same because it's not written to disk
assert_array_equal(bs_img.get_data(), data)
# Check converting to another image
cbs_img = AnalyzeImage.from_image(bs_img)
# this will make the header native by doing the header conversion
cbs_hdr = cbs_img.header
assert_equal(cbs_hdr.endianness, native_code)
# and the byte order follows it back into another image
cbs_img2 = Nifti1Image.from_image(cbs_img)
cbs_hdr2 = cbs_img2.header
assert_equal(cbs_hdr2.endianness, native_code)
# Try byteswapped round trip
bs_img2 = round_trip(bs_img)
bs_data2 = bs_img2.get_data()
# now the data dtype was swapped endian, so the read data is too
assert_equal(bs_data2.dtype.byteorder, swapped_code)
assert_equal(bs_img2.header.endianness, swapped_code)
assert_array_equal(bs_data2, data)
# Now mix up byteswapped data and non-byteswapped header
mixed_img = Nifti1Image(bs_data2, affine)
assert_equal(mixed_img.header.endianness, native_code)
m_img2 = round_trip(mixed_img)
assert_equal(m_img2.header.endianness, native_code)
assert_array_equal(m_img2.get_data(), data)
def test_save_load():
shape = (2, 4, 6)
npt = np.float32
data = np.arange(np.prod(shape), dtype=npt).reshape(shape)
affine = np.diag([1, 2, 3, 1])
affine[:3, 3] = [3, 2, 1]
img = ni1.Nifti1Image(data, affine)
img.set_data_dtype(npt)
with InTemporaryDirectory() as pth:
nifn = 'an_image.nii'
sifn = 'another_image.img'
ni1.save(img, nifn)
re_img = nils.load(nifn)
assert_true(isinstance(re_img, ni1.Nifti1Image))
assert_array_equal(re_img.get_data(), data)
assert_array_equal(re_img.affine, affine)
# These and subsequent del statements are to prevent confusing
# windows errors when trying to open files or delete the
# temporary directory.
del re_img
if have_scipy: # skip we we cannot read .mat files
spm2.save(img, sifn)
re_img2 = nils.load(sifn)
assert_true(isinstance(re_img2, spm2.Spm2AnalyzeImage))
assert_array_equal(re_img2.get_data(), data)
assert_array_equal(re_img2.affine, affine)
del re_img2
spm99.save(img, sifn)
re_img3 = nils.load(sifn)
assert_true(isinstance(re_img3,
spm99.Spm99AnalyzeImage))
assert_array_equal(re_img3.get_data(), data)
assert_array_equal(re_img3.affine, affine)
ni1.save(re_img3, nifn)
del re_img3
re_img = nils.load(nifn)
assert_true(isinstance(re_img, ni1.Nifti1Image))
assert_array_equal(re_img.get_data(), data)
assert_array_equal(re_img.affine, affine)
del re_img
def test_two_to_one():
# test going from two to one file in save
shape = (2, 4, 6)
npt = np.float32
data = np.arange(np.prod(shape), dtype=npt).reshape(shape)
affine = np.diag([1, 2, 3, 1])
affine[:3, 3] = [3, 2, 1]
# single file format
img = ni1.Nifti1Image(data, affine)
assert_equal(img.header['magic'], b'n+1')
str_io = BytesIO()
img.file_map['image'].fileobj = str_io
# check that the single format vox offset stays at zero
img.to_file_map()
assert_equal(img.header['magic'], b'n+1')
assert_equal(img.header['vox_offset'], 0)
# make a new pair image, with the single image header
pimg = ni1.Nifti1Pair(data, affine, img.header)
isio = BytesIO()
hsio = BytesIO()
pimg.file_map['image'].fileobj = isio
pimg.file_map['header'].fileobj = hsio
pimg.to_file_map()
# the offset stays at zero (but is 352 on disk)
assert_equal(pimg.header['magic'], b'ni1')
assert_equal(pimg.header['vox_offset'], 0)
assert_array_equal(pimg.get_data(), data)
# same for from_image, going from single image to pair format
ana_img = ana.AnalyzeImage.from_image(img)
assert_equal(ana_img.header['vox_offset'], 0)
# back to the single image, save it again to a stringio
str_io = BytesIO()
img.file_map['image'].fileobj = str_io
img.to_file_map()
assert_equal(img.header['vox_offset'], 0)
aimg = ana.AnalyzeImage.from_image(img)
assert_equal(aimg.header['vox_offset'], 0)
aimg = spm99.Spm99AnalyzeImage.from_image(img)
assert_equal(aimg.header['vox_offset'], 0)
aimg = spm2.Spm2AnalyzeImage.from_image(img)
assert_equal(aimg.header['vox_offset'], 0)
nfimg = ni1.Nifti1Pair.from_image(img)
assert_equal(nfimg.header['vox_offset'], 0)
# now set the vox offset directly
hdr = nfimg.header
hdr['vox_offset'] = 16
assert_equal(nfimg.header['vox_offset'], 16)
# check it gets properly set by the nifti single image
nfimg = ni1.Nifti1Image.from_image(img)
assert_equal(nfimg.header['vox_offset'], 0)
def test_negative_load_save():
shape = (1, 2, 5)
data = np.arange(10).reshape(shape) - 10.0
affine = np.eye(4)
hdr = ni1.Nifti1Header()
hdr.set_data_dtype(np.int16)
img = Nifti1Image(data, affine, hdr)
str_io = BytesIO()
img.file_map['image'].fileobj = str_io
img.to_file_map()
str_io.seek(0)
re_img = Nifti1Image.from_file_map(img.file_map)
assert_array_almost_equal(re_img.get_data(), data, 4)
def test_filename_save():
# This is to test the logic in the load and save routines, relating
# extensions to filetypes
# Tuples of class, ext, loadedclass
inklass_ext_loadklasses = (
(Nifti1Image, '.nii', Nifti1Image),
(Nifti2Image, '.nii', Nifti2Image),
(Nifti1Pair, '.nii', Nifti1Image),
(Nifti2Pair, '.nii', Nifti2Image),
(Nifti1Image, '.img', Nifti1Pair),
(Nifti2Image, '.img', Nifti2Pair),
(Nifti1Pair, '.img', Nifti1Pair),
(Nifti2Pair, '.img', Nifti2Pair),
(Nifti1Image, '.hdr', Nifti1Pair),
(Nifti2Image, '.hdr', Nifti2Pair),
(Nifti1Pair, '.hdr', Nifti1Pair),
(Nifti2Pair, '.hdr', Nifti2Pair),
(Minc1Image, '.nii', Nifti1Image),
(Minc1Image, '.img', Nifti1Pair),
(Spm2AnalyzeImage, '.nii', Nifti1Image),
(Spm2AnalyzeImage, '.img', Spm2AnalyzeImage),
(Spm99AnalyzeImage, '.nii', Nifti1Image),
(Spm99AnalyzeImage, '.img', Spm2AnalyzeImage),
(AnalyzeImage, '.nii', Nifti1Image),
(AnalyzeImage, '.img', Spm2AnalyzeImage),
)
shape = (2, 4, 6)
affine = np.diag([1, 2, 3, 1])
data = np.arange(np.prod(shape), dtype='f4').reshape(shape)
for inklass, out_ext, loadklass in inklass_ext_loadklasses:
if not have_scipy:
# We can't load a SPM analyze type without scipy. These types have
# a 'mat' file (the type we can't load)
if ('mat', '.mat') in loadklass.files_types:
continue
img = inklass(data, affine)
try:
pth = mkdtemp()
fname = pjoin(pth, 'image' + out_ext)
nils.save(img, fname)
rt_img = nils.load(fname)
assert_array_almost_equal(rt_img.get_data(), data)
assert_true(type(rt_img) is loadklass)
# delete image to allow file close. Otherwise windows
# raises an error when trying to delete the directory
del rt_img
finally:
shutil.rmtree(pth)
def test_analyze_detection():
# Test detection of Analyze, Nifti1 and Nifti2
# Algorithm is as described in loadsave:which_analyze_type
def wat(hdr):
return nils.which_analyze_type(hdr.binaryblock)
n1_hdr = Nifti1Header(b'\0' * 348, check=False)
assert_equal(wat(n1_hdr), None)
n1_hdr['sizeof_hdr'] = 540
assert_equal(wat(n1_hdr), 'nifti2')
assert_equal(wat(n1_hdr.as_byteswapped()), 'nifti2')
n1_hdr['sizeof_hdr'] = 348
assert_equal(wat(n1_hdr), 'analyze')
assert_equal(wat(n1_hdr.as_byteswapped()), 'analyze')
n1_hdr['magic'] = b'n+1'
assert_equal(wat(n1_hdr), 'nifti1')
assert_equal(wat(n1_hdr.as_byteswapped()), 'nifti1')
n1_hdr['magic'] = b'ni1'
assert_equal(wat(n1_hdr), 'nifti1')
assert_equal(wat(n1_hdr.as_byteswapped()), 'nifti1')
# Doesn't matter what magic is if it's not a nifti1 magic
n1_hdr['magic'] = b'ni2'
assert_equal(wat(n1_hdr), 'analyze')
n1_hdr['sizeof_hdr'] = 0
n1_hdr['magic'] = b''
assert_equal(wat(n1_hdr), None)
n1_hdr['magic'] = 'n+1'
assert_equal(wat(n1_hdr), 'nifti1')
n1_hdr['magic'] = 'ni1'
assert_equal(wat(n1_hdr), 'nifti1')
def test_guessed_image_type():
# Test whether we can guess the image type from example files
assert_equal(nils.guessed_image_type(
pjoin(DATA_PATH, 'example4d.nii.gz')),
Nifti1Image)
assert_equal(nils.guessed_image_type(
pjoin(DATA_PATH, 'nifti1.hdr')),
Nifti1Pair)
assert_equal(nils.guessed_image_type(
pjoin(DATA_PATH, 'example_nifti2.nii.gz')),
Nifti2Image)
assert_equal(nils.guessed_image_type(
pjoin(DATA_PATH, 'nifti2.hdr')),
Nifti2Pair)
assert_equal(nils.guessed_image_type(
pjoin(DATA_PATH, 'tiny.mnc')),
Minc1Image)
assert_equal(nils.guessed_image_type(
pjoin(DATA_PATH, 'small.mnc')),
Minc2Image)
assert_equal(nils.guessed_image_type(
pjoin(DATA_PATH, 'test.mgz')),
MGHImage)
assert_equal(nils.guessed_image_type(
pjoin(DATA_PATH, 'analyze.hdr')),
Spm2AnalyzeImage)
def test_fail_save():
with InTemporaryDirectory():
dataobj = np.ones((10, 10, 10), dtype=np.float16)
affine = np.eye(4, dtype=np.float32)
img = SpatialImage(dataobj, affine)
# Fails because float16 is not supported.
with assert_raises(AttributeError):
nils.save(img, 'foo.nii.gz')
del img
| 38.440476 | 79 | 0.650743 |
)
assert_true(isinstance(re_img2, spm2.Spm2AnalyzeImage))
assert_array_equal(re_img2.get_data(), data)
assert_array_equal(re_img2.affine, affine)
del re_img2
spm99.save(img, sifn)
re_img3 = nils.load(sifn)
assert_true(isinstance(re_img3,
spm99.Spm99AnalyzeImage))
assert_array_equal(re_img3.get_data(), data)
assert_array_equal(re_img3.affine, affine)
ni1.save(re_img3, nifn)
del re_img3
re_img = nils.load(nifn)
assert_true(isinstance(re_img, ni1.Nifti1Image))
assert_array_equal(re_img.get_data(), data)
assert_array_equal(re_img.affine, affine)
del re_img
def test_two_to_one():
# test going from two to one file in save
shape = (2, 4, 6)
npt = np.float32
data = np.arange(np.prod(shape), dtype=npt).reshape(shape)
affine = np.diag([1, 2, 3, 1])
affine[:3, 3] = [3, 2, 1]
# single file format
img = ni1.Nifti1Image(data, affine)
assert_equal(img.header['magic'], b'n+1')
str_io = BytesIO()
img.file_map['image'].fileobj = str_io
# check that the single format vox offset stays at zero
img.to_file_map()
assert_equal(img.header['magic'], b'n+1')
assert_equal(img.header['vox_offset'], 0)
# make a new pair image, with the single image header
pimg = ni1.Nifti1Pair(data, affine, img.header)
isio = BytesIO()
hsio = BytesIO()
pimg.file_map['image'].fileobj = isio
pimg.file_map['header'].fileobj = hsio
pimg.to_file_map()
# the offset stays at zero (but is 352 on disk)
assert_equal(pimg.header['magic'], b'ni1')
assert_equal(pimg.header['vox_offset'], 0)
assert_array_equal(pimg.get_data(), data)
# same for from_image, going from single image to pair format
ana_img = ana.AnalyzeImage.from_image(img)
assert_equal(ana_img.header['vox_offset'], 0)
# back to the single image, save it again to a stringio
str_io = BytesIO()
img.file_map['image'].fileobj = str_io
img.to_file_map()
assert_equal(img.header['vox_offset'], 0)
aimg = ana.AnalyzeImage.from_image(img)
assert_equal(aimg.header['vox_offset'], 0)
aimg = spm99.Spm99AnalyzeImage.from_image(img)
assert_equal(aimg.header['vox_offset'], 0)
aimg = spm2.Spm2AnalyzeImage.from_image(img)
assert_equal(aimg.header['vox_offset'], 0)
nfimg = ni1.Nifti1Pair.from_image(img)
assert_equal(nfimg.header['vox_offset'], 0)
# now set the vox offset directly
hdr = nfimg.header
hdr['vox_offset'] = 16
assert_equal(nfimg.header['vox_offset'], 16)
# check it gets properly set by the nifti single image
nfimg = ni1.Nifti1Image.from_image(img)
assert_equal(nfimg.header['vox_offset'], 0)
def test_negative_load_save():
shape = (1, 2, 5)
data = np.arange(10).reshape(shape) - 10.0
affine = np.eye(4)
hdr = ni1.Nifti1Header()
hdr.set_data_dtype(np.int16)
img = Nifti1Image(data, affine, hdr)
str_io = BytesIO()
img.file_map['image'].fileobj = str_io
img.to_file_map()
str_io.seek(0)
re_img = Nifti1Image.from_file_map(img.file_map)
assert_array_almost_equal(re_img.get_data(), data, 4)
def test_filename_save():
# This is to test the logic in the load and save routines, relating
# extensions to filetypes
# Tuples of class, ext, loadedclass
inklass_ext_loadklasses = (
(Nifti1Image, '.nii', Nifti1Image),
(Nifti2Image, '.nii', Nifti2Image),
(Nifti1Pair, '.nii', Nifti1Image),
(Nifti2Pair, '.nii', Nifti2Image),
(Nifti1Image, '.img', Nifti1Pair),
(Nifti2Image, '.img', Nifti2Pair),
(Nifti1Pair, '.img', Nifti1Pair),
(Nifti2Pair, '.img', Nifti2Pair),
(Nifti1Image, '.hdr', Nifti1Pair),
(Nifti2Image, '.hdr', Nifti2Pair),
(Nifti1Pair, '.hdr', Nifti1Pair),
(Nifti2Pair, '.hdr', Nifti2Pair),
(Minc1Image, '.nii', Nifti1Image),
(Minc1Image, '.img', Nifti1Pair),
(Spm2AnalyzeImage, '.nii', Nifti1Image),
(Spm2AnalyzeImage, '.img', Spm2AnalyzeImage),
(Spm99AnalyzeImage, '.nii', Nifti1Image),
(Spm99AnalyzeImage, '.img', Spm2AnalyzeImage),
(AnalyzeImage, '.nii', Nifti1Image),
(AnalyzeImage, '.img', Spm2AnalyzeImage),
)
shape = (2, 4, 6)
affine = np.diag([1, 2, 3, 1])
data = np.arange(np.prod(shape), dtype='f4').reshape(shape)
for inklass, out_ext, loadklass in inklass_ext_loadklasses:
if not have_scipy:
# We can't load a SPM analyze type without scipy. These types have
if ('mat', '.mat') in loadklass.files_types:
continue
img = inklass(data, affine)
try:
pth = mkdtemp()
fname = pjoin(pth, 'image' + out_ext)
nils.save(img, fname)
rt_img = nils.load(fname)
assert_array_almost_equal(rt_img.get_data(), data)
assert_true(type(rt_img) is loadklass)
# delete image to allow file close. Otherwise windows
# raises an error when trying to delete the directory
del rt_img
finally:
shutil.rmtree(pth)
def test_analyze_detection():
# Test detection of Analyze, Nifti1 and Nifti2
# Algorithm is as described in loadsave:which_analyze_type
def wat(hdr):
return nils.which_analyze_type(hdr.binaryblock)
n1_hdr = Nifti1Header(b'\0' * 348, check=False)
assert_equal(wat(n1_hdr), None)
n1_hdr['sizeof_hdr'] = 540
assert_equal(wat(n1_hdr), 'nifti2')
assert_equal(wat(n1_hdr.as_byteswapped()), 'nifti2')
n1_hdr['sizeof_hdr'] = 348
assert_equal(wat(n1_hdr), 'analyze')
assert_equal(wat(n1_hdr.as_byteswapped()), 'analyze')
n1_hdr['magic'] = b'n+1'
assert_equal(wat(n1_hdr), 'nifti1')
assert_equal(wat(n1_hdr.as_byteswapped()), 'nifti1')
n1_hdr['magic'] = b'ni1'
assert_equal(wat(n1_hdr), 'nifti1')
assert_equal(wat(n1_hdr.as_byteswapped()), 'nifti1')
# Doesn't matter what magic is if it's not a nifti1 magic
n1_hdr['magic'] = b'ni2'
assert_equal(wat(n1_hdr), 'analyze')
n1_hdr['sizeof_hdr'] = 0
n1_hdr['magic'] = b''
assert_equal(wat(n1_hdr), None)
n1_hdr['magic'] = 'n+1'
assert_equal(wat(n1_hdr), 'nifti1')
n1_hdr['magic'] = 'ni1'
assert_equal(wat(n1_hdr), 'nifti1')
def test_guessed_image_type():
# Test whether we can guess the image type from example files
assert_equal(nils.guessed_image_type(
pjoin(DATA_PATH, 'example4d.nii.gz')),
Nifti1Image)
assert_equal(nils.guessed_image_type(
pjoin(DATA_PATH, 'nifti1.hdr')),
Nifti1Pair)
assert_equal(nils.guessed_image_type(
pjoin(DATA_PATH, 'example_nifti2.nii.gz')),
Nifti2Image)
assert_equal(nils.guessed_image_type(
pjoin(DATA_PATH, 'nifti2.hdr')),
Nifti2Pair)
assert_equal(nils.guessed_image_type(
pjoin(DATA_PATH, 'tiny.mnc')),
Minc1Image)
assert_equal(nils.guessed_image_type(
pjoin(DATA_PATH, 'small.mnc')),
Minc2Image)
assert_equal(nils.guessed_image_type(
pjoin(DATA_PATH, 'test.mgz')),
MGHImage)
assert_equal(nils.guessed_image_type(
pjoin(DATA_PATH, 'analyze.hdr')),
Spm2AnalyzeImage)
def test_fail_save():
with InTemporaryDirectory():
dataobj = np.ones((10, 10, 10), dtype=np.float16)
affine = np.eye(4, dtype=np.float32)
img = SpatialImage(dataobj, affine)
# Fails because float16 is not supported.
with assert_raises(AttributeError):
nils.save(img, 'foo.nii.gz')
del img
| true | true |
f7318a7c9b9961d14143aebdcada5b709c5812c1 | 2,945 | py | Python | tests/integration/qbs/test_qbs_submission_data.py | nealedj/eq-survey-runner | b8e6cddae6068f6c8fd60e21d31d58aaa79bbb34 | [
"MIT"
] | null | null | null | tests/integration/qbs/test_qbs_submission_data.py | nealedj/eq-survey-runner | b8e6cddae6068f6c8fd60e21d31d58aaa79bbb34 | [
"MIT"
] | 1 | 2018-11-05T12:00:51.000Z | 2018-11-05T12:00:51.000Z | tests/integration/qbs/test_qbs_submission_data.py | nealedj/eq-survey-runner | b8e6cddae6068f6c8fd60e21d31d58aaa79bbb34 | [
"MIT"
] | null | null | null | from tests.integration.integration_test_case import IntegrationTestCase
class TestQbsSubmissionData(IntegrationTestCase):
def test_submission_data_2_0001(self):
self.submission_data('2', '0001')
def submission_data(self, eq_id, form_type_id):
self.launchSurvey(eq_id, form_type_id, roles=['dumper'])
# We are on the introduction page
self.assertInPage('>Start survey<')
self.assertInPage('Quarterly Business Survey')
# We proceed to the questionnaire
self.post(action='start_questionnaire')
# We are in the Questionnaire
self.assertInPage('>Quarterly Business Survey</')
self.assertInPage('what was the number of employees for Integration Tests?')
self.assertInPage('>Save and continue<')
# When I submit answers
self.post(post_data={'number-of-employees-total': '10'})
self.post(post_data={'number-of-employees-male-more-30-hours': '1',
'number-of-employees-male-less-30-hours': '2',
'number-of-employees-female-more-30-hours': '3',
'number-of-employees-female-less-30-hours': '4'})
# There are no validation errors (we're on the summary screen)
self.assertInUrl('summary')
self.assertInPage('>Quarterly Business Survey</')
self.assertInPage('>Check your answers and submit<')
self.assertInPage('You can check your answers below')
self.assertInPage('>Submit answers<')
# And the JSON response contains the data I submitted
actual = self.dumpSubmission()
expected = {
'submission': {
'origin': 'uk.gov.ons.edc.eq',
'started_at': actual['submission']['started_at'],
'submitted_at': actual['submission']['submitted_at'],
'case_id': actual['submission']['case_id'],
'collection': {
'exercise_sid': '789',
'period': '201604',
'instrument_id': '0001'
},
'survey_id': '139',
'flushed': False,
'tx_id': actual['submission']['tx_id'],
'data': {
'50': '10',
'51': '1',
'52': '2',
'53': '3',
'54': '4'
},
'type': 'uk.gov.ons.edc.eq:surveyresponse',
'version': '0.0.1',
'metadata': {
'ref_period_end_date': '2016-04-30',
'ref_period_start_date': '2016-04-01',
'ru_ref': '123456789012A',
'user_id': 'integration-test'
}
}
}
# Enable full dictionary diffs on test failure
self.maxDiff = None
self.assertDictEqual(actual, expected)
| 38.75 | 84 | 0.529372 | from tests.integration.integration_test_case import IntegrationTestCase
class TestQbsSubmissionData(IntegrationTestCase):
def test_submission_data_2_0001(self):
self.submission_data('2', '0001')
def submission_data(self, eq_id, form_type_id):
self.launchSurvey(eq_id, form_type_id, roles=['dumper'])
self.assertInPage('>Start survey<')
self.assertInPage('Quarterly Business Survey')
self.post(action='start_questionnaire')
self.assertInPage('>Quarterly Business Survey</')
self.assertInPage('what was the number of employees for Integration Tests?')
self.assertInPage('>Save and continue<')
self.post(post_data={'number-of-employees-total': '10'})
self.post(post_data={'number-of-employees-male-more-30-hours': '1',
'number-of-employees-male-less-30-hours': '2',
'number-of-employees-female-more-30-hours': '3',
'number-of-employees-female-less-30-hours': '4'})
self.assertInUrl('summary')
self.assertInPage('>Quarterly Business Survey</')
self.assertInPage('>Check your answers and submit<')
self.assertInPage('You can check your answers below')
self.assertInPage('>Submit answers<')
# And the JSON response contains the data I submitted
actual = self.dumpSubmission()
expected = {
'submission': {
'origin': 'uk.gov.ons.edc.eq',
'started_at': actual['submission']['started_at'],
'submitted_at': actual['submission']['submitted_at'],
'case_id': actual['submission']['case_id'],
'collection': {
'exercise_sid': '789',
'period': '201604',
'instrument_id': '0001'
},
'survey_id': '139',
'flushed': False,
'tx_id': actual['submission']['tx_id'],
'data': {
'50': '10',
'51': '1',
'52': '2',
'53': '3',
'54': '4'
},
'type': 'uk.gov.ons.edc.eq:surveyresponse',
'version': '0.0.1',
'metadata': {
'ref_period_end_date': '2016-04-30',
'ref_period_start_date': '2016-04-01',
'ru_ref': '123456789012A',
'user_id': 'integration-test'
}
}
}
# Enable full dictionary diffs on test failure
self.maxDiff = None
self.assertDictEqual(actual, expected)
| true | true |
f7318bf8ab84bb950ae4d28f761bd4399b07f385 | 46,765 | py | Python | src/transformers/configuration_utils.py | elusenji/transformers | af14c61973effd8b8077ac61b3f24bdd4a632f25 | [
"Apache-2.0"
] | 3 | 2022-01-15T08:06:07.000Z | 2022-03-10T07:13:18.000Z | src/transformers/configuration_utils.py | arron1227/transformers | b18dfd95e1f60ae65a959a7b255fc06522170d1b | [
"Apache-2.0"
] | null | null | null | src/transformers/configuration_utils.py | arron1227/transformers | b18dfd95e1f60ae65a959a7b255fc06522170d1b | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class and utilities."""
import copy
import json
import os
import re
import warnings
from typing import Any, Dict, List, Optional, Tuple, Union
from packaging import version
from requests import HTTPError
from . import __version__
from .dynamic_module_utils import custom_object_save
from .utils import (
CONFIG_NAME,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_path,
copy_func,
hf_bucket_url,
is_offline_mode,
is_remote_url,
is_torch_available,
logging,
)
logger = logging.get_logger(__name__)
_re_configuration_file = re.compile(r"config\.(.*)\.json")
class PretrainedConfig(PushToHubMixin):
r"""
Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as
methods for loading/downloading/saving configurations.
<Tip>
A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to
initialize a model does **not** load the model weights. It only affects the model's configuration.
</Tip>
Class attributes (overridden by derived classes):
- **model_type** (`str`) -- An identifier for the model type, serialized into the JSON file, and used to recreate
the correct object in [`~transformers.AutoConfig`].
- **is_composition** (`bool`) -- Whether the config class is composed of multiple sub-configs. In this case the
config has to be initialized from two or more configs of type [`~transformers.PretrainedConfig`] like:
[`~transformers.EncoderDecoderConfig`] or [`~RagConfig`].
- **keys_to_ignore_at_inference** (`List[str]`) -- A list of keys to ignore by default when looking at dictionary
outputs of the model during inference.
- **attribute_map** (`Dict[str, str]`) -- A dict that maps model specific attribute names to the standardized
naming of attributes.
Common attributes (present in all subclasses):
- **vocab_size** (`int`) -- The number of tokens in the vocabulary, which is also the first dimension of the
embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT).
- **hidden_size** (`int`) -- The hidden size of the model.
- **num_attention_heads** (`int`) -- The number of attention heads used in the multi-head attention layers of the
model.
- **num_hidden_layers** (`int`) -- The number of blocks in the model.
Arg:
name_or_path (`str`, *optional*, defaults to `""`):
Store the string that was passed to [`PreTrainedModel.from_pretrained`] or
[`TFPreTrainedModel.from_pretrained`] as `pretrained_model_name_or_path` if the configuration was created
with such a method.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not the model should return all hidden-states.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not the model should returns all attentions.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not the model should return a [`~transformers.utils.ModelOutput`] instead of a plain tuple.
is_encoder_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as an encoder/decoder or not.
is_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as decoder or not (in which case it's used as an encoder).
cross_attention_hidden_size** (`bool`, *optional*):
The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder
setting and the cross-attention hidden dimension differs from `self.config.hidden_size`.
add_cross_attention (`bool`, *optional*, defaults to `False`):
Whether cross-attention layers should be added to the model. Note, this option is only relevant for models
that can be used as decoder models within the [`EncoderDecoderModel`] class, which consists of all models
in `AUTO_MODELS_FOR_CAUSAL_LM`.
tie_encoder_decoder (`bool`, *optional*, defaults to `False`):
Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder
and decoder model to have the exact same parameter names.
prune_heads (`Dict[int, List[int]]`, *optional*, defaults to `{}`):
Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of
heads to prune in said layer.
For instance `{1: [0, 2], 2: [2, 3]}` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
chunk_size_feed_forward (`int`, *optional*, defaults to `0`):
The chunk size of all feed forward layers in the residual attention blocks. A chunk size of `0` means that
the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes `n` <
sequence_length embeddings at a time. For more information on feed forward chunking, see [How does Feed
Forward Chunking work?](../glossary.html#feed-forward-chunking).
> Parameters for sequence generation
max_length (`int`, *optional*, defaults to 20):
Maximum length that will be used by default in the `generate` method of the model.
min_length (`int`, *optional*, defaults to 10):
Minimum length that will be used by default in the `generate` method of the model.
do_sample (`bool`, *optional*, defaults to `False`):
Flag that will be used by default in the `generate` method of the model. Whether or not to use sampling ;
use greedy decoding otherwise.
early_stopping (`bool`, *optional*, defaults to `False`):
Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search
when at least `num_beams` sentences are finished per batch or not.
num_beams (`int`, *optional*, defaults to 1):
Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means
no beam search.
num_beam_groups (`int`, *optional*, defaults to 1):
Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams
that will be used by default in the `generate` method of the model. 1 means no group beam search.
diversity_penalty (`float`, *optional*, defaults to 0.0):
Value to control diversity for group beam search. that will be used by default in the `generate` method of
the model. 0 means no diversity penalty. The higher the penalty, the more diverse are the outputs.
temperature (`float`, *optional*, defaults to 1):
The value used to module the next token probabilities that will be used by default in the `generate` method
of the model. Must be strictly positive.
top_k (`int`, *optional*, defaults to 50):
Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in
the `generate` method of the model.
top_p (`float`, *optional*, defaults to 1):
Value that will be used by default in the `generate` method of the model for `top_p`. If set to float < 1,
only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation.
repetition_penalty (`float`, *optional*, defaults to 1):
Parameter for repetition penalty that will be used by default in the `generate` method of the model. 1.0
means no penalty.
length_penalty (`float`, *optional*, defaults to 1):
Exponential penalty to the length that will be used by default in the `generate` method of the model.
no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by default in the
`generate` method of the model for `no_repeat_ngram_size`. If set to int > 0, all ngrams of that size can
only occur once.
encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by
default in the `generate` method of the model for `encoder_no_repeat_ngram_size`. If set to int > 0, all
ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`.
bad_words_ids (`List[int]`, *optional*):
List of token ids that are not allowed to be generated that will be used by default in the `generate`
method of the model. In order to get the tokens of the words that should not appear in the generated text,
use `tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences (`int`, *optional*, defaults to 1):
Number of independently computed returned sequences for each element in the batch that will be used by
default in the `generate` method of the model.
output_scores (`bool`, *optional*, defaults to `False`):
Whether the model should return the logits when used for generation.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether the model should return a [`~transformers.utils.ModelOutput`] instead of a `torch.LongTensor`.
forced_bos_token_id (`int`, *optional*):
The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for
multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target
language token.
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached.
remove_invalid_values (`bool`, *optional*):
Whether to remove possible _nan_ and _inf_ outputs of the model to prevent the generation method to crash.
Note that using `remove_invalid_values` can slow down generation.
> Parameters for fine-tuning tasks
architectures (`List[str]`, *optional*):
Model architectures that can be used with the model pretrained weights.
finetuning_task (`str`, *optional*):
Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow
or PyTorch) checkpoint.
id2label (`Dict[int, str]`, *optional*):
A map from index (for instance prediction index, or target index) to label.
label2id (`Dict[str, int]`, *optional*): A map from label to index for the model.
num_labels (`int`, *optional*):
Number of labels to use in the last layer added to the model, typically for a classification task.
task_specific_params (`Dict[str, Any]`, *optional*):
Additional keyword arguments to store for the current task.
problem_type (`str`, *optional*):
Problem type for `XxxForSequenceClassification` models. Can be one of `"regression"`,
`"single_label_classification"` or `"multi_label_classification"`.
> Parameters linked to the tokenizer
tokenizer_class (`str`, *optional*):
The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the
model by default).
prefix (`str`, *optional*):
A specific prompt that should be added at the beginning of each text before calling the model.
bos_token_id (`int`, *optional*): The id of the _beginning-of-stream_ token.
pad_token_id (`int`, *optional*): The id of the _padding_ token.
eos_token_id (`int`, *optional*): The id of the _end-of-stream_ token.
decoder_start_token_id (`int`, *optional*):
If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token.
sep_token_id (`int`, *optional*): The id of the _separation_ token.
> PyTorch specific parameters
torchscript (`bool`, *optional*, defaults to `False`):
Whether or not the model should be used with Torchscript.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
model has a output word embedding layer.
torch_dtype (`str`, *optional*):
The `dtype` of the weights. This attribute can be used to initialize the model to a non-default `dtype`
(which is normally `float32`) and thus allow for optimal storage allocation. For example, if the saved
model is `float16`, ideally we want to load it back using the minimal amount of memory needed to load
`float16` weights. Since the config object is stored in plain text, this attribute contains just the
floating type string without the `torch.` prefix. For example, for `torch.float16` ``torch_dtype` is the
`"float16"` string.
This attribute is currently not being used during model loading time, but this may change in the future
versions. But we can already start preparing for the future by saving the dtype with save_pretrained.
> TensorFlow specific parameters
use_bfloat16 (`bool`, *optional*, defaults to `False`):
Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models).
"""
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
def __setattr__(self, key, value):
if key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
super().__setattr__(key, value)
def __getattribute__(self, key):
if key != "attribute_map" and key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
return super().__getattribute__(key)
def __init__(self, **kwargs):
# Attributes with defaults
self.return_dict = kwargs.pop("return_dict", True)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_attentions = kwargs.pop("output_attentions", False)
self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
self.torch_dtype = kwargs.pop("torch_dtype", None) # Only used by PyTorch models
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
self.tie_word_embeddings = kwargs.pop(
"tie_word_embeddings", True
) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models.
# Is decoder is used in encoder-decoder models to differentiate encoder from decoder
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
self.cross_attention_hidden_size = kwargs.pop("cross_attention_hidden_size", None)
self.add_cross_attention = kwargs.pop("add_cross_attention", False)
self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False)
# Parameters for sequence generation
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.num_beam_groups = kwargs.pop("num_beam_groups", 1)
self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.typical_p = kwargs.pop("typical_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0)
self.output_scores = kwargs.pop("output_scores", False)
self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False)
self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None)
self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None)
self.remove_invalid_values = kwargs.pop("remove_invalid_values", False)
self.exponential_decay_length_penalty = kwargs.pop("exponential_decay_length_penalty", None)
# Fine-tuning task arguments
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.id2label = kwargs.pop("id2label", None)
self.label2id = kwargs.pop("label2id", None)
if self.id2label is not None:
kwargs.pop("num_labels", None)
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
# Keys are always strings in JSON so convert ids to int here.
else:
self.num_labels = kwargs.pop("num_labels", 2)
if self.torch_dtype is not None and isinstance(self.torch_dtype, str):
# we will start using self.torch_dtype in v5, but to be consistent with
# from_pretrained's torch_dtype arg convert it to an actual torch.dtype object
if is_torch_available():
import torch
self.torch_dtype = getattr(torch, self.torch_dtype)
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
self.tokenizer_class = kwargs.pop("tokenizer_class", None)
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.sep_token_id = kwargs.pop("sep_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
# task specific arguments
self.task_specific_params = kwargs.pop("task_specific_params", None)
# regression / multi-label classification
self.problem_type = kwargs.pop("problem_type", None)
allowed_problem_types = ("regression", "single_label_classification", "multi_label_classification")
if self.problem_type is not None and self.problem_type not in allowed_problem_types:
raise ValueError(
f"The config parameter `problem_type` was not understood: received {self.problem_type} "
"but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid."
)
# TPU arguments
if kwargs.pop("xla_device", None) is not None:
logger.warning(
"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
"safely remove it from your `config.json` file."
)
# Name or path to the pretrained checkpoint
self._name_or_path = str(kwargs.pop("name_or_path", ""))
# Drop the transformers version info
self.transformers_version = kwargs.pop("transformers_version", None)
# Deal with gradient checkpointing
if kwargs.get("gradient_checkpointing", False):
warnings.warn(
"Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 "
"Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the "
"`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`."
)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
@property
def name_or_path(self) -> str:
return getattr(self, "_name_or_path", None)
@name_or_path.setter
def name_or_path(self, value):
self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding)
@property
def use_return_dict(self) -> bool:
"""
`bool`: Whether or not return [`~utils.ModelOutput`] instead of tuples.
"""
# If torchscript is set, force `return_dict=False` to avoid jit errors
return self.return_dict and not self.torchscript
@property
def num_labels(self) -> int:
"""
`int`: The number of labels for classification models.
"""
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels: int):
if not hasattr(self, "id2label") or self.id2label is None or len(self.id2label) != num_labels:
self.id2label = {i: f"LABEL_{i}" for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
"""
Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the
[`~PretrainedConfig.from_pretrained`] class method.
Args:
save_directory (`str` or `os.PathLike`):
Directory where the configuration JSON file will be saved (will be created if it does not exist).
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it.
<Tip warning={true}>
Using `push_to_hub=True` will synchronize the repository you are pushing to with `save_directory`,
which requires `save_directory` to be a local clone of the repo you are pushing to if it's an existing
folder. Pass along `temp_dir=True` to use a temporary directory instead.
</Tip>
kwargs:
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
"""
if os.path.isfile(save_directory):
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
# If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be
# loaded from the Hub.
if self._auto_class is not None:
custom_object_save(self, save_directory, config=self)
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file, use_diff=True)
logger.info(f"Configuration saved in {output_config_file}")
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Configuration pushed to the hub in this commit: {url}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
r"""
Instantiate a [`PretrainedConfig`] (or a derived class) from a pretrained model configuration.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or
namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`.
- a path to a *directory* containing a configuration file saved using the
[`~PretrainedConfig.save_pretrained`] method, e.g., `./my_model_directory/`.
- a path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if
they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file
exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `transformers-cli login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final configuration object.
If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
part of `kwargs` which has not been used to update `config` and is otherwise ignored.
kwargs (`Dict[str, Any]`, *optional*):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the `return_unused_kwargs` keyword parameter.
<Tip>
Passing `use_auth_token=True` is required when you want to use a private model.
</Tip>
Returns:
[`PretrainedConfig`]: The configuration object instantiated from this pretrained model.
Examples:
```python
# We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a
# derived class: BertConfig
config = BertConfig.from_pretrained(
"bert-base-uncased"
) # Download configuration from huggingface.co and cache.
config = BertConfig.from_pretrained(
"./test/saved_model/"
) # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')*
config = BertConfig.from_pretrained("./test/saved_model/my_configuration.json")
config = BertConfig.from_pretrained("bert-base-uncased", output_attentions=True, foo=False)
assert config.output_attentions == True
config, unused_kwargs = BertConfig.from_pretrained(
"bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True
)
assert config.output_attentions == True
assert unused_kwargs == {"foo": False}
```"""
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
[`PretrainedConfig`] using `from_dict`.
Parameters:
pretrained_model_name_or_path (`str` or `os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
`Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.
"""
original_kwargs = copy.deepcopy(kwargs)
# Get config dict associated with the base config file
config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
# That config file may point us toward another config file to use.
if "configuration_files" in config_dict:
configuration_file = get_configuration_file(config_dict["configuration_files"])
config_dict, kwargs = cls._get_config_dict(
pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs
)
return config_dict, kwargs
@classmethod
def _get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
use_auth_token = kwargs.pop("use_auth_token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
config_file = pretrained_model_name_or_path
else:
configuration_file = kwargs.pop("_configuration_file", CONFIG_NAME)
if os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, configuration_file)
else:
config_file = hf_bucket_url(
pretrained_model_name_or_path, filename=configuration_file, revision=revision, mirror=None
)
try:
# Load from URL or cache if already cached
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier listed on "
"'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having "
"permission to this repo with `use_auth_token` or log in with `huggingface-cli login` and pass "
"`use_auth_token=True`."
)
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this "
f"model name. Check the model page at 'https://huggingface.co/{pretrained_model_name_or_path}' for "
"available revisions."
)
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {configuration_file}."
)
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}"
)
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it in the cached "
f"files and it looks like {pretrained_model_name_or_path} is not the path to a directory containing a "
"{configuration_file} file.\nCheckout your internet connection or see how to run the library in "
"offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'."
)
except EnvironmentError:
raise EnvironmentError(
f"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a {configuration_file} file"
)
try:
# Load config dict
config_dict = cls._dict_from_json_file(resolved_config_file)
except (json.JSONDecodeError, UnicodeDecodeError):
raise EnvironmentError(
f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file."
)
if resolved_config_file == config_file:
logger.info(f"loading configuration file {config_file}")
else:
logger.info(f"loading configuration file {config_file} from cache at {resolved_config_file}")
return config_dict, kwargs
@classmethod
def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig":
"""
Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters.
Args:
config_dict (`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method.
kwargs (`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
[`PretrainedConfig`]: The configuration object instantiated from those parameters.
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
config = cls(**config_dict)
if hasattr(config, "pruned_heads"):
config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
if key != "torch_dtype":
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f"Model config {config}")
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "PretrainedConfig":
"""
Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters.
Args:
json_file (`str` or `os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
[`PretrainedConfig`]: The configuration object instantiated from that JSON file.
"""
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
def to_diff_dict(self) -> Dict[str, Any]:
"""
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
"""
config_dict = self.to_dict()
# get the default config dict
default_config_dict = PretrainedConfig().to_dict()
# get class specific config dict
class_config_dict = self.__class__().to_dict() if not self.is_composition else {}
serializable_config_dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if (
key not in default_config_dict
or key == "transformers_version"
or value != default_config_dict[key]
or (key in class_config_dict and value != class_config_dict[key])
):
serializable_config_dict[key] = value
self.dict_torch_dtype_to_str(serializable_config_dict)
return serializable_config_dict
def to_dict(self) -> Dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
if "_auto_class" in output:
del output["_auto_class"]
# Transformers version when serializing the model
output["transformers_version"] = __version__
self.dict_torch_dtype_to_str(output)
return output
def to_json_string(self, use_diff: bool = True) -> str:
"""
Serializes this instance to a JSON string.
Args:
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON string.
Returns:
`str`: String containing all the attributes that make up this configuration instance in JSON format.
"""
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
"""
Save this instance to a JSON file.
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string(use_diff=use_diff))
def update(self, config_dict: Dict[str, Any]):
"""
Updates attributes of this class with attributes from `config_dict`.
Args:
config_dict (`Dict[str, Any]`): Dictionary of attributes that should be updated for this class.
"""
for key, value in config_dict.items():
setattr(self, key, value)
def update_from_string(self, update_str: str):
"""
Updates attributes of this class with attributes from `update_str`.
The expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example:
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
The keys to change have to already exist in the config object.
Args:
update_str (`str`): String with attributes that should be updated for this class.
"""
d = dict(x.split("=") for x in update_str.split(","))
for k, v in d.items():
if not hasattr(self, k):
raise ValueError(f"key {k} isn't in the original config dict")
old_v = getattr(self, k)
if isinstance(old_v, bool):
if v.lower() in ["true", "1", "y", "yes"]:
v = True
elif v.lower() in ["false", "0", "n", "no"]:
v = False
else:
raise ValueError(f"can't derive true or false from {v} (key {k})")
elif isinstance(old_v, int):
v = int(v)
elif isinstance(old_v, float):
v = float(v)
elif not isinstance(old_v, str):
raise ValueError(
f"You can only update int, float, bool or string values in the config, got {v} for key {k}"
)
setattr(self, k, v)
def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
"""
Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,
converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"*
string, which can then be stored in the json format.
"""
if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
for value in d.values():
if isinstance(value, dict):
self.dict_torch_dtype_to_str(value)
@classmethod
def register_for_auto_class(cls, auto_class="AutoConfig"):
"""
Register this class with a given auto class. This should only be used for custom configurations as the ones in
the library are already mapped with `AutoConfig`.
<Tip warning={true}>
This API is experimental and may have some slight breaking changes in the next releases.
</Tip>
Args:
auto_class (`str` or `type`, *optional*, defaults to `"AutoConfig"`):
The auto class to register this new configuration with.
"""
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f"{auto_class} is not a valid auto class.")
cls._auto_class = auto_class
def get_configuration_file(configuration_files: List[str]) -> str:
"""
Get the configuration file to use for this version of transformers.
Args:
configuration_files (`List[str]`): The list of available configuration files.
Returns:
`str`: The configuration file to use.
"""
configuration_files_map = {}
for file_name in configuration_files:
search = _re_configuration_file.search(file_name)
if search is not None:
v = search.groups()[0]
configuration_files_map[v] = file_name
available_versions = sorted(configuration_files_map.keys())
# Defaults to FULL_CONFIGURATION_FILE and then try to look at some newer versions.
configuration_file = CONFIG_NAME
transformers_version = version.parse(__version__)
for v in available_versions:
if version.parse(v) <= transformers_version:
configuration_file = configuration_files_map[v]
else:
# No point going further since the versions are sorted.
break
return configuration_file
PretrainedConfig.push_to_hub = copy_func(PretrainedConfig.push_to_hub)
PretrainedConfig.push_to_hub.__doc__ = PretrainedConfig.push_to_hub.__doc__.format(
object="config", object_class="AutoConfig", object_files="configuration file"
)
| 50.284946 | 129 | 0.653459 |
import copy
import json
import os
import re
import warnings
from typing import Any, Dict, List, Optional, Tuple, Union
from packaging import version
from requests import HTTPError
from . import __version__
from .dynamic_module_utils import custom_object_save
from .utils import (
CONFIG_NAME,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_path,
copy_func,
hf_bucket_url,
is_offline_mode,
is_remote_url,
is_torch_available,
logging,
)
logger = logging.get_logger(__name__)
_re_configuration_file = re.compile(r"config\.(.*)\.json")
class PretrainedConfig(PushToHubMixin):
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
def __setattr__(self, key, value):
if key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
super().__setattr__(key, value)
def __getattribute__(self, key):
if key != "attribute_map" and key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
return super().__getattribute__(key)
def __init__(self, **kwargs):
self.return_dict = kwargs.pop("return_dict", True)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_attentions = kwargs.pop("output_attentions", False)
self.torchscript = kwargs.pop("torchscript", False)
self.torch_dtype = kwargs.pop("torch_dtype", None)
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
self.tie_word_embeddings = kwargs.pop(
"tie_word_embeddings", True
)
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
self.cross_attention_hidden_size = kwargs.pop("cross_attention_hidden_size", None)
self.add_cross_attention = kwargs.pop("add_cross_attention", False)
self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False)
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.num_beam_groups = kwargs.pop("num_beam_groups", 1)
self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.typical_p = kwargs.pop("typical_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0)
self.output_scores = kwargs.pop("output_scores", False)
self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False)
self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None)
self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None)
self.remove_invalid_values = kwargs.pop("remove_invalid_values", False)
self.exponential_decay_length_penalty = kwargs.pop("exponential_decay_length_penalty", None)
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.id2label = kwargs.pop("id2label", None)
self.label2id = kwargs.pop("label2id", None)
if self.id2label is not None:
kwargs.pop("num_labels", None)
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
else:
self.num_labels = kwargs.pop("num_labels", 2)
if self.torch_dtype is not None and isinstance(self.torch_dtype, str):
if is_torch_available():
import torch
self.torch_dtype = getattr(torch, self.torch_dtype)
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
self.tokenizer_class = kwargs.pop("tokenizer_class", None)
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.sep_token_id = kwargs.pop("sep_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
# task specific arguments
self.task_specific_params = kwargs.pop("task_specific_params", None)
# regression / multi-label classification
self.problem_type = kwargs.pop("problem_type", None)
allowed_problem_types = ("regression", "single_label_classification", "multi_label_classification")
if self.problem_type is not None and self.problem_type not in allowed_problem_types:
raise ValueError(
f"The config parameter `problem_type` was not understood: received {self.problem_type} "
"but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid."
)
# TPU arguments
if kwargs.pop("xla_device", None) is not None:
logger.warning(
"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
"safely remove it from your `config.json` file."
)
# Name or path to the pretrained checkpoint
self._name_or_path = str(kwargs.pop("name_or_path", ""))
# Drop the transformers version info
self.transformers_version = kwargs.pop("transformers_version", None)
# Deal with gradient checkpointing
if kwargs.get("gradient_checkpointing", False):
warnings.warn(
"Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 "
"Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the "
"`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`."
)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
@property
def name_or_path(self) -> str:
return getattr(self, "_name_or_path", None)
@name_or_path.setter
def name_or_path(self, value):
self._name_or_path = str(value)
@property
def use_return_dict(self) -> bool:
return self.return_dict and not self.torchscript
@property
def num_labels(self) -> int:
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels: int):
if not hasattr(self, "id2label") or self.id2label is None or len(self.id2label) != num_labels:
self.id2label = {i: f"LABEL_{i}" for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
if os.path.isfile(save_directory):
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
if self._auto_class is not None:
custom_object_save(self, save_directory, config=self)
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file, use_diff=True)
logger.info(f"Configuration saved in {output_config_file}")
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Configuration pushed to the hub in this commit: {url}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
original_kwargs = copy.deepcopy(kwargs)
config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
if "configuration_files" in config_dict:
configuration_file = get_configuration_file(config_dict["configuration_files"])
config_dict, kwargs = cls._get_config_dict(
pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs
)
return config_dict, kwargs
@classmethod
def _get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
use_auth_token = kwargs.pop("use_auth_token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
config_file = pretrained_model_name_or_path
else:
configuration_file = kwargs.pop("_configuration_file", CONFIG_NAME)
if os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, configuration_file)
else:
config_file = hf_bucket_url(
pretrained_model_name_or_path, filename=configuration_file, revision=revision, mirror=None
)
try:
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier listed on "
"'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having "
"permission to this repo with `use_auth_token` or log in with `huggingface-cli login` and pass "
"`use_auth_token=True`."
)
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this "
f"model name. Check the model page at 'https://huggingface.co/{pretrained_model_name_or_path}' for "
"available revisions."
)
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {configuration_file}."
)
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}"
)
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it in the cached "
f"files and it looks like {pretrained_model_name_or_path} is not the path to a directory containing a "
"{configuration_file} file.\nCheckout your internet connection or see how to run the library in "
"offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'."
)
except EnvironmentError:
raise EnvironmentError(
f"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a {configuration_file} file"
)
try:
config_dict = cls._dict_from_json_file(resolved_config_file)
except (json.JSONDecodeError, UnicodeDecodeError):
raise EnvironmentError(
f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file."
)
if resolved_config_file == config_file:
logger.info(f"loading configuration file {config_file}")
else:
logger.info(f"loading configuration file {config_file} from cache at {resolved_config_file}")
return config_dict, kwargs
@classmethod
def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig":
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
config = cls(**config_dict)
if hasattr(config, "pruned_heads"):
config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
if key != "torch_dtype":
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f"Model config {config}")
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "PretrainedConfig":
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
def to_diff_dict(self) -> Dict[str, Any]:
config_dict = self.to_dict()
default_config_dict = PretrainedConfig().to_dict()
class_config_dict = self.__class__().to_dict() if not self.is_composition else {}
serializable_config_dict = {}
for key, value in config_dict.items():
if (
key not in default_config_dict
or key == "transformers_version"
or value != default_config_dict[key]
or (key in class_config_dict and value != class_config_dict[key])
):
serializable_config_dict[key] = value
self.dict_torch_dtype_to_str(serializable_config_dict)
return serializable_config_dict
def to_dict(self) -> Dict[str, Any]:
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
if "_auto_class" in output:
del output["_auto_class"]
output["transformers_version"] = __version__
self.dict_torch_dtype_to_str(output)
return output
def to_json_string(self, use_diff: bool = True) -> str:
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string(use_diff=use_diff))
def update(self, config_dict: Dict[str, Any]):
for key, value in config_dict.items():
setattr(self, key, value)
def update_from_string(self, update_str: str):
d = dict(x.split("=") for x in update_str.split(","))
for k, v in d.items():
if not hasattr(self, k):
raise ValueError(f"key {k} isn't in the original config dict")
old_v = getattr(self, k)
if isinstance(old_v, bool):
if v.lower() in ["true", "1", "y", "yes"]:
v = True
elif v.lower() in ["false", "0", "n", "no"]:
v = False
else:
raise ValueError(f"can't derive true or false from {v} (key {k})")
elif isinstance(old_v, int):
v = int(v)
elif isinstance(old_v, float):
v = float(v)
elif not isinstance(old_v, str):
raise ValueError(
f"You can only update int, float, bool or string values in the config, got {v} for key {k}"
)
setattr(self, k, v)
def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
for value in d.values():
if isinstance(value, dict):
self.dict_torch_dtype_to_str(value)
@classmethod
def register_for_auto_class(cls, auto_class="AutoConfig"):
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f"{auto_class} is not a valid auto class.")
cls._auto_class = auto_class
def get_configuration_file(configuration_files: List[str]) -> str:
configuration_files_map = {}
for file_name in configuration_files:
search = _re_configuration_file.search(file_name)
if search is not None:
v = search.groups()[0]
configuration_files_map[v] = file_name
available_versions = sorted(configuration_files_map.keys())
configuration_file = CONFIG_NAME
transformers_version = version.parse(__version__)
for v in available_versions:
if version.parse(v) <= transformers_version:
configuration_file = configuration_files_map[v]
else:
break
return configuration_file
PretrainedConfig.push_to_hub = copy_func(PretrainedConfig.push_to_hub)
PretrainedConfig.push_to_hub.__doc__ = PretrainedConfig.push_to_hub.__doc__.format(
object="config", object_class="AutoConfig", object_files="configuration file"
)
| true | true |
f7318c2b9fdaa9f536243ae29bdfe008ac0eb2a0 | 22,340 | py | Python | addons/io_scene_gltf2/blender/exp/gltf2_blender_extract.py | inaber0420/glTF-Modo-IO | 57f99aee4e9b6177d25b465b87d731b54a625532 | [
"Apache-2.0"
] | 1,084 | 2018-07-14T07:09:50.000Z | 2022-03-30T16:34:05.000Z | addons/io_scene_gltf2/blender/exp/gltf2_blender_extract.py | inaber0420/glTF-Modo-IO | 57f99aee4e9b6177d25b465b87d731b54a625532 | [
"Apache-2.0"
] | 1,375 | 2018-07-13T22:09:24.000Z | 2022-03-31T00:36:36.000Z | addons/io_scene_gltf2/blender/exp/gltf2_blender_extract.py | inaber0420/glTF-Modo-IO | 57f99aee4e9b6177d25b465b87d731b54a625532 | [
"Apache-2.0"
] | 235 | 2018-07-13T22:04:28.000Z | 2022-03-30T09:15:53.000Z | # Copyright 2018-2021 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from mathutils import Vector
from . import gltf2_blender_export_keys
from ...io.com.gltf2_io_debug import print_console
from io_scene_gltf2.blender.exp import gltf2_blender_gather_skins
def extract_primitives(glTF, blender_mesh, library, blender_object, blender_vertex_groups, modifiers, export_settings):
"""Extract primitives from a mesh."""
print_console('INFO', 'Extracting primitive: ' + blender_mesh.name)
use_normals = export_settings[gltf2_blender_export_keys.NORMALS]
if use_normals:
blender_mesh.calc_normals_split()
use_tangents = False
if use_normals and export_settings[gltf2_blender_export_keys.TANGENTS]:
if blender_mesh.uv_layers.active and len(blender_mesh.uv_layers) > 0:
try:
blender_mesh.calc_tangents()
use_tangents = True
except Exception:
print_console('WARNING', 'Could not calculate tangents. Please try to triangulate the mesh first.')
tex_coord_max = 0
if export_settings[gltf2_blender_export_keys.TEX_COORDS]:
if blender_mesh.uv_layers.active:
tex_coord_max = len(blender_mesh.uv_layers)
color_max = 0
if export_settings[gltf2_blender_export_keys.COLORS]:
color_max = len(blender_mesh.vertex_colors)
armature = None
skin = None
if blender_vertex_groups and export_settings[gltf2_blender_export_keys.SKINS]:
if modifiers is not None:
modifiers_dict = {m.type: m for m in modifiers}
if "ARMATURE" in modifiers_dict:
modifier = modifiers_dict["ARMATURE"]
armature = modifier.object
# Skin must be ignored if the object is parented to a bone of the armature
# (This creates an infinite recursive error)
# So ignoring skin in that case
is_child_of_arma = (
armature and
blender_object and
blender_object.parent_type == "BONE" and
blender_object.parent.name == armature.name
)
if is_child_of_arma:
armature = None
if armature:
skin = gltf2_blender_gather_skins.gather_skin(armature, export_settings)
if not skin:
armature = None
use_morph_normals = use_normals and export_settings[gltf2_blender_export_keys.MORPH_NORMAL]
use_morph_tangents = use_morph_normals and use_tangents and export_settings[gltf2_blender_export_keys.MORPH_TANGENT]
key_blocks = []
if blender_mesh.shape_keys and export_settings[gltf2_blender_export_keys.MORPH]:
key_blocks = [
key_block
for key_block in blender_mesh.shape_keys.key_blocks
if not (key_block == key_block.relative_key or key_block.mute)
]
use_materials = export_settings[gltf2_blender_export_keys.MATERIALS]
# Fetch vert positions and bone data (joint,weights)
locs, morph_locs = __get_positions(blender_mesh, key_blocks, armature, blender_object, export_settings)
if skin:
vert_bones, num_joint_sets = __get_bone_data(blender_mesh, skin, blender_vertex_groups)
# In Blender there is both per-vert data, like position, and also per-loop
# (loop=corner-of-poly) data, like normals or UVs. glTF only has per-vert
# data, so we need to split Blender verts up into potentially-multiple glTF
# verts.
#
# First, we'll collect a "dot" for every loop: a struct that stores all the
# attributes at that loop, namely the vertex index (which determines all
# per-vert data), and all the per-loop data like UVs, etc.
#
# Each unique dot will become one unique glTF vert.
# List all fields the dot struct needs.
dot_fields = [('vertex_index', np.uint32)]
if use_normals:
dot_fields += [('nx', np.float32), ('ny', np.float32), ('nz', np.float32)]
if use_tangents:
dot_fields += [('tx', np.float32), ('ty', np.float32), ('tz', np.float32), ('tw', np.float32)]
for uv_i in range(tex_coord_max):
dot_fields += [('uv%dx' % uv_i, np.float32), ('uv%dy' % uv_i, np.float32)]
for col_i in range(color_max):
dot_fields += [
('color%dr' % col_i, np.float32),
('color%dg' % col_i, np.float32),
('color%db' % col_i, np.float32),
('color%da' % col_i, np.float32),
]
if use_morph_normals:
for morph_i, _ in enumerate(key_blocks):
dot_fields += [
('morph%dnx' % morph_i, np.float32),
('morph%dny' % morph_i, np.float32),
('morph%dnz' % morph_i, np.float32),
]
dots = np.empty(len(blender_mesh.loops), dtype=np.dtype(dot_fields))
vidxs = np.empty(len(blender_mesh.loops))
blender_mesh.loops.foreach_get('vertex_index', vidxs)
dots['vertex_index'] = vidxs
del vidxs
if use_normals:
kbs = key_blocks if use_morph_normals else []
normals, morph_normals = __get_normals(
blender_mesh, kbs, armature, blender_object, export_settings
)
dots['nx'] = normals[:, 0]
dots['ny'] = normals[:, 1]
dots['nz'] = normals[:, 2]
del normals
for morph_i, ns in enumerate(morph_normals):
dots['morph%dnx' % morph_i] = ns[:, 0]
dots['morph%dny' % morph_i] = ns[:, 1]
dots['morph%dnz' % morph_i] = ns[:, 2]
del morph_normals
if use_tangents:
tangents = __get_tangents(blender_mesh, armature, blender_object, export_settings)
dots['tx'] = tangents[:, 0]
dots['ty'] = tangents[:, 1]
dots['tz'] = tangents[:, 2]
del tangents
signs = __get_bitangent_signs(blender_mesh, armature, blender_object, export_settings)
dots['tw'] = signs
del signs
for uv_i in range(tex_coord_max):
uvs = __get_uvs(blender_mesh, uv_i)
dots['uv%dx' % uv_i] = uvs[:, 0]
dots['uv%dy' % uv_i] = uvs[:, 1]
del uvs
for col_i in range(color_max):
colors = __get_colors(blender_mesh, col_i)
dots['color%dr' % col_i] = colors[:, 0]
dots['color%dg' % col_i] = colors[:, 1]
dots['color%db' % col_i] = colors[:, 2]
dots['color%da' % col_i] = colors[:, 3]
del colors
# Calculate triangles and sort them into primitives.
blender_mesh.calc_loop_triangles()
loop_indices = np.empty(len(blender_mesh.loop_triangles) * 3, dtype=np.uint32)
blender_mesh.loop_triangles.foreach_get('loops', loop_indices)
prim_indices = {} # maps material index to TRIANGLES-style indices into dots
if use_materials == "NONE": # Only for None. For placeholder and export, keep primitives
# Put all vertices into one primitive
prim_indices[-1] = loop_indices
else:
# Bucket by material index.
tri_material_idxs = np.empty(len(blender_mesh.loop_triangles), dtype=np.uint32)
blender_mesh.loop_triangles.foreach_get('material_index', tri_material_idxs)
loop_material_idxs = np.repeat(tri_material_idxs, 3) # material index for every loop
unique_material_idxs = np.unique(tri_material_idxs)
del tri_material_idxs
for material_idx in unique_material_idxs:
prim_indices[material_idx] = loop_indices[loop_material_idxs == material_idx]
# Create all the primitives.
primitives = []
for material_idx, dot_indices in prim_indices.items():
# Extract just dots used by this primitive, deduplicate them, and
# calculate indices into this deduplicated list.
prim_dots = dots[dot_indices]
prim_dots, indices = np.unique(prim_dots, return_inverse=True)
if len(prim_dots) == 0:
continue
# Now just move all the data for prim_dots into attribute arrays
attributes = {}
blender_idxs = prim_dots['vertex_index']
attributes['POSITION'] = locs[blender_idxs]
for morph_i, vs in enumerate(morph_locs):
attributes['MORPH_POSITION_%d' % morph_i] = vs[blender_idxs]
if use_normals:
normals = np.empty((len(prim_dots), 3), dtype=np.float32)
normals[:, 0] = prim_dots['nx']
normals[:, 1] = prim_dots['ny']
normals[:, 2] = prim_dots['nz']
attributes['NORMAL'] = normals
if use_tangents:
tangents = np.empty((len(prim_dots), 4), dtype=np.float32)
tangents[:, 0] = prim_dots['tx']
tangents[:, 1] = prim_dots['ty']
tangents[:, 2] = prim_dots['tz']
tangents[:, 3] = prim_dots['tw']
attributes['TANGENT'] = tangents
if use_morph_normals:
for morph_i, _ in enumerate(key_blocks):
ns = np.empty((len(prim_dots), 3), dtype=np.float32)
ns[:, 0] = prim_dots['morph%dnx' % morph_i]
ns[:, 1] = prim_dots['morph%dny' % morph_i]
ns[:, 2] = prim_dots['morph%dnz' % morph_i]
attributes['MORPH_NORMAL_%d' % morph_i] = ns
if use_morph_tangents:
attributes['MORPH_TANGENT_%d' % morph_i] = __calc_morph_tangents(normals, ns, tangents)
for tex_coord_i in range(tex_coord_max):
uvs = np.empty((len(prim_dots), 2), dtype=np.float32)
uvs[:, 0] = prim_dots['uv%dx' % tex_coord_i]
uvs[:, 1] = prim_dots['uv%dy' % tex_coord_i]
attributes['TEXCOORD_%d' % tex_coord_i] = uvs
for color_i in range(color_max):
colors = np.empty((len(prim_dots), 4), dtype=np.float32)
colors[:, 0] = prim_dots['color%dr' % color_i]
colors[:, 1] = prim_dots['color%dg' % color_i]
colors[:, 2] = prim_dots['color%db' % color_i]
colors[:, 3] = prim_dots['color%da' % color_i]
attributes['COLOR_%d' % color_i] = colors
if skin:
joints = [[] for _ in range(num_joint_sets)]
weights = [[] for _ in range(num_joint_sets)]
for vi in blender_idxs:
bones = vert_bones[vi]
for j in range(0, 4 * num_joint_sets):
if j < len(bones):
joint, weight = bones[j]
else:
joint, weight = 0, 0.0
joints[j//4].append(joint)
weights[j//4].append(weight)
for i, (js, ws) in enumerate(zip(joints, weights)):
attributes['JOINTS_%d' % i] = js
attributes['WEIGHTS_%d' % i] = ws
primitives.append({
'attributes': attributes,
'indices': indices,
'material': material_idx,
})
if export_settings['gltf_loose_edges']:
# Find loose edges
loose_edges = [e for e in blender_mesh.edges if e.is_loose]
blender_idxs = [vi for e in loose_edges for vi in e.vertices]
if blender_idxs:
# Export one glTF vert per unique Blender vert in a loose edge
blender_idxs = np.array(blender_idxs, dtype=np.uint32)
blender_idxs, indices = np.unique(blender_idxs, return_inverse=True)
attributes = {}
attributes['POSITION'] = locs[blender_idxs]
for morph_i, vs in enumerate(morph_locs):
attributes['MORPH_POSITION_%d' % morph_i] = vs[blender_idxs]
if skin:
joints = [[] for _ in range(num_joint_sets)]
weights = [[] for _ in range(num_joint_sets)]
for vi in blender_idxs:
bones = vert_bones[vi]
for j in range(0, 4 * num_joint_sets):
if j < len(bones):
joint, weight = bones[j]
else:
joint, weight = 0, 0.0
joints[j//4].append(joint)
weights[j//4].append(weight)
for i, (js, ws) in enumerate(zip(joints, weights)):
attributes['JOINTS_%d' % i] = js
attributes['WEIGHTS_%d' % i] = ws
primitives.append({
'attributes': attributes,
'indices': indices,
'mode': 1, # LINES
'material': 0,
})
if export_settings['gltf_loose_points']:
# Find loose points
verts_in_edge = set(vi for e in blender_mesh.edges for vi in e.vertices)
blender_idxs = [
vi for vi, _ in enumerate(blender_mesh.vertices)
if vi not in verts_in_edge
]
if blender_idxs:
blender_idxs = np.array(blender_idxs, dtype=np.uint32)
attributes = {}
attributes['POSITION'] = locs[blender_idxs]
for morph_i, vs in enumerate(morph_locs):
attributes['MORPH_POSITION_%d' % morph_i] = vs[blender_idxs]
if skin:
joints = [[] for _ in range(num_joint_sets)]
weights = [[] for _ in range(num_joint_sets)]
for vi in blender_idxs:
bones = vert_bones[vi]
for j in range(0, 4 * num_joint_sets):
if j < len(bones):
joint, weight = bones[j]
else:
joint, weight = 0, 0.0
joints[j//4].append(joint)
weights[j//4].append(weight)
for i, (js, ws) in enumerate(zip(joints, weights)):
attributes['JOINTS_%d' % i] = js
attributes['WEIGHTS_%d' % i] = ws
primitives.append({
'attributes': attributes,
'mode': 0, # POINTS
'material': 0,
})
print_console('INFO', 'Primitives created: %d' % len(primitives))
return primitives
def __get_positions(blender_mesh, key_blocks, armature, blender_object, export_settings):
locs = np.empty(len(blender_mesh.vertices) * 3, dtype=np.float32)
source = key_blocks[0].relative_key.data if key_blocks else blender_mesh.vertices
source.foreach_get('co', locs)
locs = locs.reshape(len(blender_mesh.vertices), 3)
morph_locs = []
for key_block in key_blocks:
vs = np.empty(len(blender_mesh.vertices) * 3, dtype=np.float32)
key_block.data.foreach_get('co', vs)
vs = vs.reshape(len(blender_mesh.vertices), 3)
morph_locs.append(vs)
# Transform for skinning
if armature and blender_object:
apply_matrix = armature.matrix_world.inverted_safe() @ blender_object.matrix_world
loc_transform = armature.matrix_world @ apply_matrix
loc_transform = blender_object.matrix_world
locs[:] = __apply_mat_to_all(loc_transform, locs)
for vs in morph_locs:
vs[:] = __apply_mat_to_all(loc_transform, vs)
# glTF stores deltas in morph targets
for vs in morph_locs:
vs -= locs
if export_settings[gltf2_blender_export_keys.YUP]:
__zup2yup(locs)
for vs in morph_locs:
__zup2yup(vs)
return locs, morph_locs
def __get_normals(blender_mesh, key_blocks, armature, blender_object, export_settings):
"""Get normal for each loop."""
if key_blocks:
normals = key_blocks[0].relative_key.normals_split_get()
normals = np.array(normals, dtype=np.float32)
else:
normals = np.empty(len(blender_mesh.loops) * 3, dtype=np.float32)
blender_mesh.calc_normals_split()
blender_mesh.loops.foreach_get('normal', normals)
normals = normals.reshape(len(blender_mesh.loops), 3)
morph_normals = []
for key_block in key_blocks:
ns = np.array(key_block.normals_split_get(), dtype=np.float32)
ns = ns.reshape(len(blender_mesh.loops), 3)
morph_normals.append(ns)
# Transform for skinning
if armature and blender_object:
apply_matrix = (armature.matrix_world.inverted_safe() @ blender_object.matrix_world)
apply_matrix = apply_matrix.to_3x3().inverted_safe().transposed()
normal_transform = armature.matrix_world.to_3x3() @ apply_matrix
normals[:] = __apply_mat_to_all(normal_transform, normals)
__normalize_vecs(normals)
for ns in morph_normals:
ns[:] = __apply_mat_to_all(normal_transform, ns)
__normalize_vecs(ns)
for ns in [normals, *morph_normals]:
# Replace zero normals with the unit UP vector.
# Seems to happen sometimes with degenerate tris?
is_zero = ~ns.any(axis=1)
ns[is_zero, 2] = 1
# glTF stores deltas in morph targets
for ns in morph_normals:
ns -= normals
if export_settings[gltf2_blender_export_keys.YUP]:
__zup2yup(normals)
for ns in morph_normals:
__zup2yup(ns)
return normals, morph_normals
def __get_tangents(blender_mesh, armature, blender_object, export_settings):
"""Get an array of the tangent for each loop."""
tangents = np.empty(len(blender_mesh.loops) * 3, dtype=np.float32)
blender_mesh.loops.foreach_get('tangent', tangents)
tangents = tangents.reshape(len(blender_mesh.loops), 3)
# Transform for skinning
if armature and blender_object:
apply_matrix = armature.matrix_world.inverted_safe() @ blender_object.matrix_world
tangent_transform = apply_matrix.to_quaternion().to_matrix()
tangents = __apply_mat_to_all(tangent_transform, tangents)
__normalize_vecs(tangents)
if export_settings[gltf2_blender_export_keys.YUP]:
__zup2yup(tangents)
return tangents
def __get_bitangent_signs(blender_mesh, armature, blender_object, export_settings):
signs = np.empty(len(blender_mesh.loops), dtype=np.float32)
blender_mesh.loops.foreach_get('bitangent_sign', signs)
# Transform for skinning
if armature and blender_object:
# Bitangent signs should flip when handedness changes
# TODO: confirm
apply_matrix = armature.matrix_world.inverted_safe() @ blender_object.matrix_world
tangent_transform = apply_matrix.to_quaternion().to_matrix()
flipped = tangent_transform.determinant() < 0
if flipped:
signs *= -1
# No change for Zup -> Yup
return signs
def __calc_morph_tangents(normals, morph_normal_deltas, tangents):
# TODO: check if this works
morph_tangent_deltas = np.empty((len(normals), 3), dtype=np.float32)
for i in range(len(normals)):
n = Vector(normals[i])
morph_n = n + Vector(morph_normal_deltas[i]) # convert back to non-delta
t = Vector(tangents[i, :3])
rotation = morph_n.rotation_difference(n)
t_morph = Vector(t)
t_morph.rotate(rotation)
morph_tangent_deltas[i] = t_morph - t # back to delta
return morph_tangent_deltas
def __get_uvs(blender_mesh, uv_i):
layer = blender_mesh.uv_layers[uv_i]
uvs = np.empty(len(blender_mesh.loops) * 2, dtype=np.float32)
layer.data.foreach_get('uv', uvs)
uvs = uvs.reshape(len(blender_mesh.loops), 2)
# Blender UV space -> glTF UV space
# u,v -> u,1-v
uvs[:, 1] *= -1
uvs[:, 1] += 1
return uvs
def __get_colors(blender_mesh, color_i):
layer = blender_mesh.vertex_colors[color_i]
colors = np.empty(len(blender_mesh.loops) * 4, dtype=np.float32)
layer.data.foreach_get('color', colors)
colors = colors.reshape(len(blender_mesh.loops), 4)
# sRGB -> Linear
rgb = colors[:, :-1]
not_small = rgb >= 0.04045
small_result = np.where(rgb < 0.0, 0.0, rgb * (1.0 / 12.92))
large_result = np.power((rgb + 0.055) * (1.0 / 1.055), 2.4, where=not_small)
rgb[:] = np.where(not_small, large_result, small_result)
return colors
def __get_bone_data(blender_mesh, skin, blender_vertex_groups):
joint_name_to_index = {joint.name: index for index, joint in enumerate(skin.joints)}
group_to_joint = [joint_name_to_index.get(g.name) for g in blender_vertex_groups]
# List of (joint, weight) pairs for each vert
vert_bones = []
max_num_influences = 0
for vertex in blender_mesh.vertices:
bones = []
if vertex.groups:
for group_element in vertex.groups:
weight = group_element.weight
if weight <= 0.0:
continue
try:
joint = group_to_joint[group_element.group]
except Exception:
continue
if joint is None:
continue
bones.append((joint, weight))
bones.sort(key=lambda x: x[1], reverse=True)
if not bones: bones = ((0, 1.0),) # HACK for verts with zero weight (#308)
vert_bones.append(bones)
if len(bones) > max_num_influences:
max_num_influences = len(bones)
# How many joint sets do we need? 1 set = 4 influences
num_joint_sets = (max_num_influences + 3) // 4
return vert_bones, num_joint_sets
def __zup2yup(array):
# x,y,z -> x,z,-y
array[:, [1,2]] = array[:, [2,1]] # x,z,y
array[:, 2] *= -1 # x,z,-y
def __apply_mat_to_all(matrix, vectors):
"""Given matrix m and vectors [v1,v2,...], computes [m@v1,m@v2,...]"""
# Linear part
m = matrix.to_3x3() if len(matrix) == 4 else matrix
res = np.matmul(vectors, np.array(m.transposed()))
# Translation part
if len(matrix) == 4:
res += np.array(matrix.translation)
return res
def __normalize_vecs(vectors):
norms = np.linalg.norm(vectors, axis=1, keepdims=True)
np.divide(vectors, norms, out=vectors, where=norms != 0)
| 37.35786 | 120 | 0.616025 |
import numpy as np
from mathutils import Vector
from . import gltf2_blender_export_keys
from ...io.com.gltf2_io_debug import print_console
from io_scene_gltf2.blender.exp import gltf2_blender_gather_skins
def extract_primitives(glTF, blender_mesh, library, blender_object, blender_vertex_groups, modifiers, export_settings):
print_console('INFO', 'Extracting primitive: ' + blender_mesh.name)
use_normals = export_settings[gltf2_blender_export_keys.NORMALS]
if use_normals:
blender_mesh.calc_normals_split()
use_tangents = False
if use_normals and export_settings[gltf2_blender_export_keys.TANGENTS]:
if blender_mesh.uv_layers.active and len(blender_mesh.uv_layers) > 0:
try:
blender_mesh.calc_tangents()
use_tangents = True
except Exception:
print_console('WARNING', 'Could not calculate tangents. Please try to triangulate the mesh first.')
tex_coord_max = 0
if export_settings[gltf2_blender_export_keys.TEX_COORDS]:
if blender_mesh.uv_layers.active:
tex_coord_max = len(blender_mesh.uv_layers)
color_max = 0
if export_settings[gltf2_blender_export_keys.COLORS]:
color_max = len(blender_mesh.vertex_colors)
armature = None
skin = None
if blender_vertex_groups and export_settings[gltf2_blender_export_keys.SKINS]:
if modifiers is not None:
modifiers_dict = {m.type: m for m in modifiers}
if "ARMATURE" in modifiers_dict:
modifier = modifiers_dict["ARMATURE"]
armature = modifier.object
is_child_of_arma = (
armature and
blender_object and
blender_object.parent_type == "BONE" and
blender_object.parent.name == armature.name
)
if is_child_of_arma:
armature = None
if armature:
skin = gltf2_blender_gather_skins.gather_skin(armature, export_settings)
if not skin:
armature = None
use_morph_normals = use_normals and export_settings[gltf2_blender_export_keys.MORPH_NORMAL]
use_morph_tangents = use_morph_normals and use_tangents and export_settings[gltf2_blender_export_keys.MORPH_TANGENT]
key_blocks = []
if blender_mesh.shape_keys and export_settings[gltf2_blender_export_keys.MORPH]:
key_blocks = [
key_block
for key_block in blender_mesh.shape_keys.key_blocks
if not (key_block == key_block.relative_key or key_block.mute)
]
use_materials = export_settings[gltf2_blender_export_keys.MATERIALS]
locs, morph_locs = __get_positions(blender_mesh, key_blocks, armature, blender_object, export_settings)
if skin:
vert_bones, num_joint_sets = __get_bone_data(blender_mesh, skin, blender_vertex_groups)
# attributes at that loop, namely the vertex index (which determines all
# per-vert data), and all the per-loop data like UVs, etc.
#
# Each unique dot will become one unique glTF vert.
# List all fields the dot struct needs.
dot_fields = [('vertex_index', np.uint32)]
if use_normals:
dot_fields += [('nx', np.float32), ('ny', np.float32), ('nz', np.float32)]
if use_tangents:
dot_fields += [('tx', np.float32), ('ty', np.float32), ('tz', np.float32), ('tw', np.float32)]
for uv_i in range(tex_coord_max):
dot_fields += [('uv%dx' % uv_i, np.float32), ('uv%dy' % uv_i, np.float32)]
for col_i in range(color_max):
dot_fields += [
('color%dr' % col_i, np.float32),
('color%dg' % col_i, np.float32),
('color%db' % col_i, np.float32),
('color%da' % col_i, np.float32),
]
if use_morph_normals:
for morph_i, _ in enumerate(key_blocks):
dot_fields += [
('morph%dnx' % morph_i, np.float32),
('morph%dny' % morph_i, np.float32),
('morph%dnz' % morph_i, np.float32),
]
dots = np.empty(len(blender_mesh.loops), dtype=np.dtype(dot_fields))
vidxs = np.empty(len(blender_mesh.loops))
blender_mesh.loops.foreach_get('vertex_index', vidxs)
dots['vertex_index'] = vidxs
del vidxs
if use_normals:
kbs = key_blocks if use_morph_normals else []
normals, morph_normals = __get_normals(
blender_mesh, kbs, armature, blender_object, export_settings
)
dots['nx'] = normals[:, 0]
dots['ny'] = normals[:, 1]
dots['nz'] = normals[:, 2]
del normals
for morph_i, ns in enumerate(morph_normals):
dots['morph%dnx' % morph_i] = ns[:, 0]
dots['morph%dny' % morph_i] = ns[:, 1]
dots['morph%dnz' % morph_i] = ns[:, 2]
del morph_normals
if use_tangents:
tangents = __get_tangents(blender_mesh, armature, blender_object, export_settings)
dots['tx'] = tangents[:, 0]
dots['ty'] = tangents[:, 1]
dots['tz'] = tangents[:, 2]
del tangents
signs = __get_bitangent_signs(blender_mesh, armature, blender_object, export_settings)
dots['tw'] = signs
del signs
for uv_i in range(tex_coord_max):
uvs = __get_uvs(blender_mesh, uv_i)
dots['uv%dx' % uv_i] = uvs[:, 0]
dots['uv%dy' % uv_i] = uvs[:, 1]
del uvs
for col_i in range(color_max):
colors = __get_colors(blender_mesh, col_i)
dots['color%dr' % col_i] = colors[:, 0]
dots['color%dg' % col_i] = colors[:, 1]
dots['color%db' % col_i] = colors[:, 2]
dots['color%da' % col_i] = colors[:, 3]
del colors
# Calculate triangles and sort them into primitives.
blender_mesh.calc_loop_triangles()
loop_indices = np.empty(len(blender_mesh.loop_triangles) * 3, dtype=np.uint32)
blender_mesh.loop_triangles.foreach_get('loops', loop_indices)
prim_indices = {} # maps material index to TRIANGLES-style indices into dots
if use_materials == "NONE": # Only for None. For placeholder and export, keep primitives
# Put all vertices into one primitive
prim_indices[-1] = loop_indices
else:
# Bucket by material index.
tri_material_idxs = np.empty(len(blender_mesh.loop_triangles), dtype=np.uint32)
blender_mesh.loop_triangles.foreach_get('material_index', tri_material_idxs)
loop_material_idxs = np.repeat(tri_material_idxs, 3) # material index for every loop
unique_material_idxs = np.unique(tri_material_idxs)
del tri_material_idxs
for material_idx in unique_material_idxs:
prim_indices[material_idx] = loop_indices[loop_material_idxs == material_idx]
# Create all the primitives.
primitives = []
for material_idx, dot_indices in prim_indices.items():
# Extract just dots used by this primitive, deduplicate them, and
# calculate indices into this deduplicated list.
prim_dots = dots[dot_indices]
prim_dots, indices = np.unique(prim_dots, return_inverse=True)
if len(prim_dots) == 0:
continue
# Now just move all the data for prim_dots into attribute arrays
attributes = {}
blender_idxs = prim_dots['vertex_index']
attributes['POSITION'] = locs[blender_idxs]
for morph_i, vs in enumerate(morph_locs):
attributes['MORPH_POSITION_%d' % morph_i] = vs[blender_idxs]
if use_normals:
normals = np.empty((len(prim_dots), 3), dtype=np.float32)
normals[:, 0] = prim_dots['nx']
normals[:, 1] = prim_dots['ny']
normals[:, 2] = prim_dots['nz']
attributes['NORMAL'] = normals
if use_tangents:
tangents = np.empty((len(prim_dots), 4), dtype=np.float32)
tangents[:, 0] = prim_dots['tx']
tangents[:, 1] = prim_dots['ty']
tangents[:, 2] = prim_dots['tz']
tangents[:, 3] = prim_dots['tw']
attributes['TANGENT'] = tangents
if use_morph_normals:
for morph_i, _ in enumerate(key_blocks):
ns = np.empty((len(prim_dots), 3), dtype=np.float32)
ns[:, 0] = prim_dots['morph%dnx' % morph_i]
ns[:, 1] = prim_dots['morph%dny' % morph_i]
ns[:, 2] = prim_dots['morph%dnz' % morph_i]
attributes['MORPH_NORMAL_%d' % morph_i] = ns
if use_morph_tangents:
attributes['MORPH_TANGENT_%d' % morph_i] = __calc_morph_tangents(normals, ns, tangents)
for tex_coord_i in range(tex_coord_max):
uvs = np.empty((len(prim_dots), 2), dtype=np.float32)
uvs[:, 0] = prim_dots['uv%dx' % tex_coord_i]
uvs[:, 1] = prim_dots['uv%dy' % tex_coord_i]
attributes['TEXCOORD_%d' % tex_coord_i] = uvs
for color_i in range(color_max):
colors = np.empty((len(prim_dots), 4), dtype=np.float32)
colors[:, 0] = prim_dots['color%dr' % color_i]
colors[:, 1] = prim_dots['color%dg' % color_i]
colors[:, 2] = prim_dots['color%db' % color_i]
colors[:, 3] = prim_dots['color%da' % color_i]
attributes['COLOR_%d' % color_i] = colors
if skin:
joints = [[] for _ in range(num_joint_sets)]
weights = [[] for _ in range(num_joint_sets)]
for vi in blender_idxs:
bones = vert_bones[vi]
for j in range(0, 4 * num_joint_sets):
if j < len(bones):
joint, weight = bones[j]
else:
joint, weight = 0, 0.0
joints[j//4].append(joint)
weights[j//4].append(weight)
for i, (js, ws) in enumerate(zip(joints, weights)):
attributes['JOINTS_%d' % i] = js
attributes['WEIGHTS_%d' % i] = ws
primitives.append({
'attributes': attributes,
'indices': indices,
'material': material_idx,
})
if export_settings['gltf_loose_edges']:
# Find loose edges
loose_edges = [e for e in blender_mesh.edges if e.is_loose]
blender_idxs = [vi for e in loose_edges for vi in e.vertices]
if blender_idxs:
# Export one glTF vert per unique Blender vert in a loose edge
blender_idxs = np.array(blender_idxs, dtype=np.uint32)
blender_idxs, indices = np.unique(blender_idxs, return_inverse=True)
attributes = {}
attributes['POSITION'] = locs[blender_idxs]
for morph_i, vs in enumerate(morph_locs):
attributes['MORPH_POSITION_%d' % morph_i] = vs[blender_idxs]
if skin:
joints = [[] for _ in range(num_joint_sets)]
weights = [[] for _ in range(num_joint_sets)]
for vi in blender_idxs:
bones = vert_bones[vi]
for j in range(0, 4 * num_joint_sets):
if j < len(bones):
joint, weight = bones[j]
else:
joint, weight = 0, 0.0
joints[j//4].append(joint)
weights[j//4].append(weight)
for i, (js, ws) in enumerate(zip(joints, weights)):
attributes['JOINTS_%d' % i] = js
attributes['WEIGHTS_%d' % i] = ws
primitives.append({
'attributes': attributes,
'indices': indices,
'mode': 1, # LINES
'material': 0,
})
if export_settings['gltf_loose_points']:
# Find loose points
verts_in_edge = set(vi for e in blender_mesh.edges for vi in e.vertices)
blender_idxs = [
vi for vi, _ in enumerate(blender_mesh.vertices)
if vi not in verts_in_edge
]
if blender_idxs:
blender_idxs = np.array(blender_idxs, dtype=np.uint32)
attributes = {}
attributes['POSITION'] = locs[blender_idxs]
for morph_i, vs in enumerate(morph_locs):
attributes['MORPH_POSITION_%d' % morph_i] = vs[blender_idxs]
if skin:
joints = [[] for _ in range(num_joint_sets)]
weights = [[] for _ in range(num_joint_sets)]
for vi in blender_idxs:
bones = vert_bones[vi]
for j in range(0, 4 * num_joint_sets):
if j < len(bones):
joint, weight = bones[j]
else:
joint, weight = 0, 0.0
joints[j//4].append(joint)
weights[j//4].append(weight)
for i, (js, ws) in enumerate(zip(joints, weights)):
attributes['JOINTS_%d' % i] = js
attributes['WEIGHTS_%d' % i] = ws
primitives.append({
'attributes': attributes,
'mode': 0, # POINTS
'material': 0,
})
print_console('INFO', 'Primitives created: %d' % len(primitives))
return primitives
def __get_positions(blender_mesh, key_blocks, armature, blender_object, export_settings):
locs = np.empty(len(blender_mesh.vertices) * 3, dtype=np.float32)
source = key_blocks[0].relative_key.data if key_blocks else blender_mesh.vertices
source.foreach_get('co', locs)
locs = locs.reshape(len(blender_mesh.vertices), 3)
morph_locs = []
for key_block in key_blocks:
vs = np.empty(len(blender_mesh.vertices) * 3, dtype=np.float32)
key_block.data.foreach_get('co', vs)
vs = vs.reshape(len(blender_mesh.vertices), 3)
morph_locs.append(vs)
# Transform for skinning
if armature and blender_object:
apply_matrix = armature.matrix_world.inverted_safe() @ blender_object.matrix_world
loc_transform = armature.matrix_world @ apply_matrix
loc_transform = blender_object.matrix_world
locs[:] = __apply_mat_to_all(loc_transform, locs)
for vs in morph_locs:
vs[:] = __apply_mat_to_all(loc_transform, vs)
# glTF stores deltas in morph targets
for vs in morph_locs:
vs -= locs
if export_settings[gltf2_blender_export_keys.YUP]:
__zup2yup(locs)
for vs in morph_locs:
__zup2yup(vs)
return locs, morph_locs
def __get_normals(blender_mesh, key_blocks, armature, blender_object, export_settings):
if key_blocks:
normals = key_blocks[0].relative_key.normals_split_get()
normals = np.array(normals, dtype=np.float32)
else:
normals = np.empty(len(blender_mesh.loops) * 3, dtype=np.float32)
blender_mesh.calc_normals_split()
blender_mesh.loops.foreach_get('normal', normals)
normals = normals.reshape(len(blender_mesh.loops), 3)
morph_normals = []
for key_block in key_blocks:
ns = np.array(key_block.normals_split_get(), dtype=np.float32)
ns = ns.reshape(len(blender_mesh.loops), 3)
morph_normals.append(ns)
# Transform for skinning
if armature and blender_object:
apply_matrix = (armature.matrix_world.inverted_safe() @ blender_object.matrix_world)
apply_matrix = apply_matrix.to_3x3().inverted_safe().transposed()
normal_transform = armature.matrix_world.to_3x3() @ apply_matrix
normals[:] = __apply_mat_to_all(normal_transform, normals)
__normalize_vecs(normals)
for ns in morph_normals:
ns[:] = __apply_mat_to_all(normal_transform, ns)
__normalize_vecs(ns)
for ns in [normals, *morph_normals]:
# Replace zero normals with the unit UP vector.
# Seems to happen sometimes with degenerate tris?
is_zero = ~ns.any(axis=1)
ns[is_zero, 2] = 1
# glTF stores deltas in morph targets
for ns in morph_normals:
ns -= normals
if export_settings[gltf2_blender_export_keys.YUP]:
__zup2yup(normals)
for ns in morph_normals:
__zup2yup(ns)
return normals, morph_normals
def __get_tangents(blender_mesh, armature, blender_object, export_settings):
tangents = np.empty(len(blender_mesh.loops) * 3, dtype=np.float32)
blender_mesh.loops.foreach_get('tangent', tangents)
tangents = tangents.reshape(len(blender_mesh.loops), 3)
# Transform for skinning
if armature and blender_object:
apply_matrix = armature.matrix_world.inverted_safe() @ blender_object.matrix_world
tangent_transform = apply_matrix.to_quaternion().to_matrix()
tangents = __apply_mat_to_all(tangent_transform, tangents)
__normalize_vecs(tangents)
if export_settings[gltf2_blender_export_keys.YUP]:
__zup2yup(tangents)
return tangents
def __get_bitangent_signs(blender_mesh, armature, blender_object, export_settings):
signs = np.empty(len(blender_mesh.loops), dtype=np.float32)
blender_mesh.loops.foreach_get('bitangent_sign', signs)
# Transform for skinning
if armature and blender_object:
# Bitangent signs should flip when handedness changes
# TODO: confirm
apply_matrix = armature.matrix_world.inverted_safe() @ blender_object.matrix_world
tangent_transform = apply_matrix.to_quaternion().to_matrix()
flipped = tangent_transform.determinant() < 0
if flipped:
signs *= -1
# No change for Zup -> Yup
return signs
def __calc_morph_tangents(normals, morph_normal_deltas, tangents):
# TODO: check if this works
morph_tangent_deltas = np.empty((len(normals), 3), dtype=np.float32)
for i in range(len(normals)):
n = Vector(normals[i])
morph_n = n + Vector(morph_normal_deltas[i]) # convert back to non-delta
t = Vector(tangents[i, :3])
rotation = morph_n.rotation_difference(n)
t_morph = Vector(t)
t_morph.rotate(rotation)
morph_tangent_deltas[i] = t_morph - t # back to delta
return morph_tangent_deltas
def __get_uvs(blender_mesh, uv_i):
layer = blender_mesh.uv_layers[uv_i]
uvs = np.empty(len(blender_mesh.loops) * 2, dtype=np.float32)
layer.data.foreach_get('uv', uvs)
uvs = uvs.reshape(len(blender_mesh.loops), 2)
# Blender UV space -> glTF UV space
# u,v -> u,1-v
uvs[:, 1] *= -1
uvs[:, 1] += 1
return uvs
def __get_colors(blender_mesh, color_i):
layer = blender_mesh.vertex_colors[color_i]
colors = np.empty(len(blender_mesh.loops) * 4, dtype=np.float32)
layer.data.foreach_get('color', colors)
colors = colors.reshape(len(blender_mesh.loops), 4)
# sRGB -> Linear
rgb = colors[:, :-1]
not_small = rgb >= 0.04045
small_result = np.where(rgb < 0.0, 0.0, rgb * (1.0 / 12.92))
large_result = np.power((rgb + 0.055) * (1.0 / 1.055), 2.4, where=not_small)
rgb[:] = np.where(not_small, large_result, small_result)
return colors
def __get_bone_data(blender_mesh, skin, blender_vertex_groups):
joint_name_to_index = {joint.name: index for index, joint in enumerate(skin.joints)}
group_to_joint = [joint_name_to_index.get(g.name) for g in blender_vertex_groups]
# List of (joint, weight) pairs for each vert
vert_bones = []
max_num_influences = 0
for vertex in blender_mesh.vertices:
bones = []
if vertex.groups:
for group_element in vertex.groups:
weight = group_element.weight
if weight <= 0.0:
continue
try:
joint = group_to_joint[group_element.group]
except Exception:
continue
if joint is None:
continue
bones.append((joint, weight))
bones.sort(key=lambda x: x[1], reverse=True)
if not bones: bones = ((0, 1.0),) # HACK for verts with zero weight (#308)
vert_bones.append(bones)
if len(bones) > max_num_influences:
max_num_influences = len(bones)
# How many joint sets do we need? 1 set = 4 influences
num_joint_sets = (max_num_influences + 3) // 4
return vert_bones, num_joint_sets
def __zup2yup(array):
# x,y,z -> x,z,-y
array[:, [1,2]] = array[:, [2,1]] # x,z,y
array[:, 2] *= -1 # x,z,-y
def __apply_mat_to_all(matrix, vectors):
# Linear part
m = matrix.to_3x3() if len(matrix) == 4 else matrix
res = np.matmul(vectors, np.array(m.transposed()))
# Translation part
if len(matrix) == 4:
res += np.array(matrix.translation)
return res
def __normalize_vecs(vectors):
norms = np.linalg.norm(vectors, axis=1, keepdims=True)
np.divide(vectors, norms, out=vectors, where=norms != 0)
| true | true |
f7318c381216a7a0a5e30f0ca35e0f3c326d7ead | 184 | py | Python | beecrowd exercises/beecrowd-1145.py | pachecosamuel/Python-Exercises | de542536dd1a2bc0ad27e81824713cda8ad34054 | [
"MIT"
] | null | null | null | beecrowd exercises/beecrowd-1145.py | pachecosamuel/Python-Exercises | de542536dd1a2bc0ad27e81824713cda8ad34054 | [
"MIT"
] | null | null | null | beecrowd exercises/beecrowd-1145.py | pachecosamuel/Python-Exercises | de542536dd1a2bc0ad27e81824713cda8ad34054 | [
"MIT"
] | null | null | null | n1,n2 = list(map(int,input().split()))
cont = 1
for i in range(1,(int((n2/n1))+1)):
r = ""
for y in range(n1):
r += str(cont) + " "
cont += 1
print(r[:-1])
| 20.444444 | 38 | 0.451087 | n1,n2 = list(map(int,input().split()))
cont = 1
for i in range(1,(int((n2/n1))+1)):
r = ""
for y in range(n1):
r += str(cont) + " "
cont += 1
print(r[:-1])
| true | true |
f7318c8d58133c98fa8052706364e5c9c4e4f7bb | 116 | py | Python | ComStream/tf_idf/__init__.py | alimpfard/ComStream | 1bfde3a01ba0b996b4c41e0e9112089618789469 | [
"MIT"
] | null | null | null | ComStream/tf_idf/__init__.py | alimpfard/ComStream | 1bfde3a01ba0b996b4c41e0e9112089618789469 | [
"MIT"
] | null | null | null | ComStream/tf_idf/__init__.py | alimpfard/ComStream | 1bfde3a01ba0b996b4c41e0e9112089618789469 | [
"MIT"
] | null | null | null | from . import Agent
from . import Coordinator
from . import DataManager
from . import DataPoint
from . import Utils
| 19.333333 | 25 | 0.784483 | from . import Agent
from . import Coordinator
from . import DataManager
from . import DataPoint
from . import Utils
| true | true |
f7318c9eae78d9ecd4097a5f77c09245d4f5b3b0 | 14,635 | py | Python | appmetrics/statistics.py | avalente/appmetrics | 366fc7e1ca897e49a2227cbfa43bfa02a47f1acc | [
"Apache-2.0"
] | 60 | 2015-01-19T05:32:32.000Z | 2021-06-08T07:35:02.000Z | appmetrics/statistics.py | alexKeleon/appmetrics | 366fc7e1ca897e49a2227cbfa43bfa02a47f1acc | [
"Apache-2.0"
] | 6 | 2015-03-02T19:25:02.000Z | 2021-03-27T17:26:07.000Z | appmetrics/statistics.py | alexKeleon/appmetrics | 366fc7e1ca897e49a2227cbfa43bfa02a47f1acc | [
"Apache-2.0"
] | 12 | 2015-01-19T05:04:14.000Z | 2020-09-08T07:49:54.000Z | ## Module statistics.py
##
## Copyright (c) 2014 Antonio Valente <y3sman@gmail.com>
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""
Statistics module.
The basic functions are stolen from python 3.4 stdlib
"""
from __future__ import division
import collections
import math
import operator
import functools
from fractions import Fraction
from decimal import Decimal
from .exceptions import StatisticsError
from .py3comp import xrange, iteritems
def isfinite(n):
"""Return True if x is neither an infinity nor a NaN, and False otherwise.
(Note that 0.0 is considered finite.)
Backported from python 3
"""
return not (math.isinf(n) or math.isnan(n))
def sum(data, start=0):
"""sum(data [, start]) -> value
Return a high-precision sum of the given numeric data. If optional
argument ``start`` is given, it is added to the total. If ``data`` is
empty, ``start`` (defaulting to 0) is returned.
"""
n, d = exact_ratio(start)
T = type(start)
partials = {d: n} # map {denominator: sum of numerators}
# Micro-optimizations.
coerce_types_ = coerce_types
exact_ratio_ = exact_ratio
partials_get = partials.get
# Add numerators for each denominator, and track the "current" type.
for x in data:
T = coerce_types_(T, type(x))
n, d = exact_ratio_(x)
partials[d] = partials_get(d, 0) + n
if None in partials:
assert issubclass(T, (float, Decimal))
assert not isfinite(partials[None])
return T(partials[None])
total = Fraction()
for d, n in sorted(partials.items()):
total += Fraction(n, d)
if issubclass(T, int):
assert total.denominator == 1
return T(total.numerator)
if issubclass(T, Decimal):
return T(total.numerator) / total.denominator
return T(total)
def exact_ratio(x):
"""Convert Real number x exactly to (numerator, denominator) pair.
x is expected to be an int, Fraction, Decimal or float.
"""
try:
try:
# int, Fraction
return x.numerator, x.denominator
except AttributeError:
# float
try:
return x.as_integer_ratio()
except AttributeError:
# Decimal
try:
return decimal_to_ratio(x)
except AttributeError:
msg = "can't convert type '{}' to numerator/denominator"
raise TypeError(msg.format(type(x).__name__))
except (OverflowError, ValueError):
# INF or NAN
return (x, None)
# FIXME This is faster than Fraction.from_decimal, but still too slow.
def decimal_to_ratio(d):
"""Convert Decimal d to exact integer ratio (numerator, denominator).
"""
sign, digits, exp = d.as_tuple()
if exp in ('F', 'n', 'N'): # INF, NAN, sNAN
assert not d.is_finite()
raise ValueError
num = 0
for digit in digits:
num = num * 10 + digit
if sign:
num = -num
den = 10 ** -exp
return (num, den)
def coerce_types(T1, T2):
"""Coerce types T1 and T2 to a common type.
Coercion is performed according to this table, where "N/A" means
that a TypeError exception is raised.
+----------+-----------+-----------+-----------+----------+
| | int | Fraction | Decimal | float |
+----------+-----------+-----------+-----------+----------+
| int | int | Fraction | Decimal | float |
| Fraction | Fraction | Fraction | N/A | float |
| Decimal | Decimal | N/A | Decimal | float |
| float | float | float | float | float |
+----------+-----------+-----------+-----------+----------+
Subclasses trump their parent class; two subclasses of the same
base class will be coerced to the second of the two.
"""
# Get the common/fast cases out of the way first.
if T1 is T2: return T1
if T1 is int: return T2
if T2 is int: return T1
# Subclasses trump their parent class.
if issubclass(T2, T1): return T2
if issubclass(T1, T2): return T1
# Floats trump everything else.
if issubclass(T2, float): return T2
if issubclass(T1, float): return T1
# Subclasses of the same base class give priority to the second.
if T1.__base__ is T2.__base__: return T2
# Otherwise, just give up.
raise TypeError('cannot coerce types %r and %r' % (T1, T2))
def counts(data):
"""
Generate a table of sorted (value, frequency) pairs.
"""
if data is None:
raise TypeError('None is not iterable')
table = collections.Counter(data).most_common()
if not table:
return table
# Extract the values with the highest frequency.
maxfreq = table[0][1]
for i in range(1, len(table)):
if table[i][1] != maxfreq:
table = table[:i]
break
return table
# === Measures of central tendency (averages) ===
def mean(data):
"""Return the sample arithmetic mean of data.
If ``data`` is empty, StatisticsError will be raised.
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('mean requires at least one data point')
return sum(data) / n
# FIXME: investigate ways to calculate medians without sorting? Quickselect?
def median(data):
"""Return the median (middle value) of numeric data.
When the number of data points is odd, return the middle data point.
When the number of data points is even, the median is interpolated by
taking the average of the two middle values:
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
if n % 2 == 1:
return data[n // 2]
else:
i = n // 2
return (data[i - 1] + data[i]) / 2
def median_low(data):
"""Return the low median of numeric data.
When the number of data points is odd, the middle value is returned.
When it is even, the smaller of the two middle values is returned.
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
if n % 2 == 1:
return data[n // 2]
else:
return data[n // 2 - 1]
def median_high(data):
"""Return the high median of data.
When the number of data points is odd, the middle value is returned.
When it is even, the larger of the two middle values is returned.
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
return data[n // 2]
def mode(data):
"""Return the most common data point from discrete or nominal data.
``mode`` assumes discrete data, and returns a single value. This is the
standard treatment of the mode as commonly taught in schools:
If there is not exactly one most common value, ``mode`` will raise
StatisticsError.
"""
# Generate a table of sorted (value, frequency) pairs.
table = counts(data)
if len(table) == 1:
return table[0][0]
elif table:
raise StatisticsError(
'no unique mode; found %d equally common values' % len(table)
)
else:
raise StatisticsError('no mode for empty data')
# === Measures of spread ===
# See http://mathworld.wolfram.com/Variance.html
# http://mathworld.wolfram.com/SampleVariance.html
# http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
#
# Under no circumstances use the so-called "computational formula for
# variance", as that is only suitable for hand calculations with a small
# amount of low-precision data. It has terrible numeric properties.
#
# See a comparison of three computational methods here:
# http://www.johndcook.com/blog/2008/09/26/comparing-three-methods-of-computing-standard-deviation/
def _ss(data, c=None):
"""Return sum of square deviations of sequence data.
If ``c`` is None, the mean is calculated in one pass, and the deviations
from the mean are calculated in a second pass. Otherwise, deviations are
calculated from ``c`` as given. Use the second case with care, as it can
lead to garbage results.
"""
if c is None:
c = mean(data)
ss = sum((x - c) ** 2 for x in data)
# The following sum should mathematically equal zero, but due to rounding
# error may not.
ss -= sum((x - c) for x in data) ** 2 / len(data)
assert not ss < 0, 'negative sum of square deviations: %f' % ss
return ss
def variance(data, xbar=None):
"""Return the sample variance of data.
data should be an iterable of Real-valued numbers, with at least two
values. The optional argument xbar, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function when your data is a sample from a population. To
calculate the variance from the entire population, see ``pvariance``.
If you have already calculated the mean of your data, you can pass it as
the optional second argument ``xbar`` to avoid recalculating it:
This function does not check that ``xbar`` is actually the mean of
``data``. Giving arbitrary values for ``xbar`` may lead to invalid or
impossible results.
Decimals and Fractions are supported
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 2:
raise StatisticsError('variance requires at least two data points')
ss = _ss(data, xbar)
return ss / (n - 1)
def pvariance(data, mu=None):
"""Return the population variance of ``data``.
data should be an iterable of Real-valued numbers, with at least one
value. The optional argument mu, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function to calculate the variance from the entire population.
To estimate the variance from a sample, the ``variance`` function is
usually a better choice.
If you have already calculated the mean of the data, you can pass it as
the optional second argument to avoid recalculating it:
This function does not check that ``mu`` is actually the mean of ``data``.
Giving arbitrary values for ``mu`` may lead to invalid or impossible
results.
Decimals and Fractions are supported:
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('pvariance requires at least one data point')
ss = _ss(data, mu)
return ss / n
def stdev(data, xbar=None):
"""Return the square root of the sample variance.
See ``variance`` for arguments and other details.
"""
var = variance(data, xbar)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var)
def pstdev(data, mu=None):
"""Return the square root of the population variance.
See ``pvariance`` for arguments and other details.
"""
var = pvariance(data, mu)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var)
def geometric_mean(data):
"""Return the geometric mean of data
"""
if not data:
raise StatisticsError('geometric_mean requires at least one data point')
# in order to support negative or null values
data = [x if x > 0 else math.e if x == 0 else 1.0 for x in data]
return math.pow(math.fabs(functools.reduce(operator.mul, data)), 1.0 / len(data))
def harmonic_mean(data):
"""Return the harmonic mean of data
"""
if not data:
raise StatisticsError('harmonic_mean requires at least one data point')
divisor = sum(map(lambda x: 1.0 / x if x else 0.0, data))
return len(data) / divisor if divisor else 0.0
def skewness(data):
"""Return the skewness of the data's distribution
"""
if not data:
raise StatisticsError('skewness requires at least one data point')
size = len(data)
sd = stdev(data) ** 3
if not sd:
return 0.0
mn = mean(data)
return sum(map(lambda x: ((x - mn) ** 3 / sd), data)) / size
def kurtosis(data):
"""Return the kurtosis of the data's distribution
"""
if not data:
raise StatisticsError('kurtosis requires at least one data point')
size = len(data)
sd = stdev(data) ** 4
if not sd:
return 0.0
mn = mean(data)
return sum(map(lambda x: ((x - mn) ** 4 / sd), data)) / size - 3
def percentile(data, n):
"""Return the n-th percentile of the given data
Assume that the data are already sorted
"""
size = len(data)
idx = (n / 100.0) * size - 0.5
if idx < 0 or idx > size:
raise StatisticsError("Too few data points ({}) for {}th percentile".format(size, n))
return data[int(idx)]
def get_histogram(data):
"""Return the histogram relative to the given data
Assume that the data are already sorted
"""
count = len(data)
if count < 2:
raise StatisticsError('Too few data points ({}) for get_histogram'.format(count))
min_ = data[0]
max_ = data[-1]
std = stdev(data)
bins = get_histogram_bins(min_, max_, std, count)
res = {x: 0 for x in bins}
for value in data:
for bin_ in bins:
if value <= bin_:
res[bin_] += 1
break
return sorted(iteritems(res))
def get_histogram_bins(min_, max_, std, count):
"""
Return optimal bins given the input parameters
"""
width = _get_bin_width(std, count)
count = int(round((max_ - min_) / width) + 1)
if count:
bins = [i * width + min_ for i in xrange(1, count + 1)]
else:
bins = [min_]
return bins
def _get_bin_width(stdev, count):
"""Return the histogram's optimal bin width based on Sturges
http://www.jstor.org/pss/2965501
"""
w = int(round((3.5 * stdev) / (count ** (1.0 / 3))))
if w:
return w
else:
return 1
| 28.362403 | 99 | 0.624804 | partials[d] = partials_get(d, 0) + n
if None in partials:
assert issubclass(T, (float, Decimal))
assert not isfinite(partials[None])
return T(partials[None])
total = Fraction()
for d, n in sorted(partials.items()):
total += Fraction(n, d)
if issubclass(T, int):
assert total.denominator == 1
return T(total.numerator)
if issubclass(T, Decimal):
return T(total.numerator) / total.denominator
return T(total)
def exact_ratio(x):
try:
try:
return x.numerator, x.denominator
except AttributeError:
try:
return x.as_integer_ratio()
except AttributeError:
try:
return decimal_to_ratio(x)
except AttributeError:
msg = "can't convert type '{}' to numerator/denominator"
raise TypeError(msg.format(type(x).__name__))
except (OverflowError, ValueError):
# INF or NAN
return (x, None)
# FIXME This is faster than Fraction.from_decimal, but still too slow.
def decimal_to_ratio(d):
sign, digits, exp = d.as_tuple()
if exp in ('F', 'n', 'N'): # INF, NAN, sNAN
assert not d.is_finite()
raise ValueError
num = 0
for digit in digits:
num = num * 10 + digit
if sign:
num = -num
den = 10 ** -exp
return (num, den)
def coerce_types(T1, T2):
# Get the common/fast cases out of the way first.
if T1 is T2: return T1
if T1 is int: return T2
if T2 is int: return T1
# Subclasses trump their parent class.
if issubclass(T2, T1): return T2
if issubclass(T1, T2): return T1
# Floats trump everything else.
if issubclass(T2, float): return T2
if issubclass(T1, float): return T1
# Subclasses of the same base class give priority to the second.
if T1.__base__ is T2.__base__: return T2
# Otherwise, just give up.
raise TypeError('cannot coerce types %r and %r' % (T1, T2))
def counts(data):
if data is None:
raise TypeError('None is not iterable')
table = collections.Counter(data).most_common()
if not table:
return table
# Extract the values with the highest frequency.
maxfreq = table[0][1]
for i in range(1, len(table)):
if table[i][1] != maxfreq:
table = table[:i]
break
return table
# === Measures of central tendency (averages) ===
def mean(data):
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('mean requires at least one data point')
return sum(data) / n
# FIXME: investigate ways to calculate medians without sorting? Quickselect?
def median(data):
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
if n % 2 == 1:
return data[n // 2]
else:
i = n // 2
return (data[i - 1] + data[i]) / 2
def median_low(data):
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
if n % 2 == 1:
return data[n // 2]
else:
return data[n // 2 - 1]
def median_high(data):
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
return data[n // 2]
def mode(data):
# Generate a table of sorted (value, frequency) pairs.
table = counts(data)
if len(table) == 1:
return table[0][0]
elif table:
raise StatisticsError(
'no unique mode; found %d equally common values' % len(table)
)
else:
raise StatisticsError('no mode for empty data')
# === Measures of spread ===
# See http://mathworld.wolfram.com/Variance.html
# http://mathworld.wolfram.com/SampleVariance.html
# http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
#
# Under no circumstances use the so-called "computational formula for
# variance", as that is only suitable for hand calculations with a small
# amount of low-precision data. It has terrible numeric properties.
#
# See a comparison of three computational methods here:
# http://www.johndcook.com/blog/2008/09/26/comparing-three-methods-of-computing-standard-deviation/
def _ss(data, c=None):
if c is None:
c = mean(data)
ss = sum((x - c) ** 2 for x in data)
# The following sum should mathematically equal zero, but due to rounding
# error may not.
ss -= sum((x - c) for x in data) ** 2 / len(data)
assert not ss < 0, 'negative sum of square deviations: %f' % ss
return ss
def variance(data, xbar=None):
if iter(data) is data:
data = list(data)
n = len(data)
if n < 2:
raise StatisticsError('variance requires at least two data points')
ss = _ss(data, xbar)
return ss / (n - 1)
def pvariance(data, mu=None):
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('pvariance requires at least one data point')
ss = _ss(data, mu)
return ss / n
def stdev(data, xbar=None):
var = variance(data, xbar)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var)
def pstdev(data, mu=None):
var = pvariance(data, mu)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var)
def geometric_mean(data):
if not data:
raise StatisticsError('geometric_mean requires at least one data point')
# in order to support negative or null values
data = [x if x > 0 else math.e if x == 0 else 1.0 for x in data]
return math.pow(math.fabs(functools.reduce(operator.mul, data)), 1.0 / len(data))
def harmonic_mean(data):
if not data:
raise StatisticsError('harmonic_mean requires at least one data point')
divisor = sum(map(lambda x: 1.0 / x if x else 0.0, data))
return len(data) / divisor if divisor else 0.0
def skewness(data):
if not data:
raise StatisticsError('skewness requires at least one data point')
size = len(data)
sd = stdev(data) ** 3
if not sd:
return 0.0
mn = mean(data)
return sum(map(lambda x: ((x - mn) ** 3 / sd), data)) / size
def kurtosis(data):
if not data:
raise StatisticsError('kurtosis requires at least one data point')
size = len(data)
sd = stdev(data) ** 4
if not sd:
return 0.0
mn = mean(data)
return sum(map(lambda x: ((x - mn) ** 4 / sd), data)) / size - 3
def percentile(data, n):
size = len(data)
idx = (n / 100.0) * size - 0.5
if idx < 0 or idx > size:
raise StatisticsError("Too few data points ({}) for {}th percentile".format(size, n))
return data[int(idx)]
def get_histogram(data):
count = len(data)
if count < 2:
raise StatisticsError('Too few data points ({}) for get_histogram'.format(count))
min_ = data[0]
max_ = data[-1]
std = stdev(data)
bins = get_histogram_bins(min_, max_, std, count)
res = {x: 0 for x in bins}
for value in data:
for bin_ in bins:
if value <= bin_:
res[bin_] += 1
break
return sorted(iteritems(res))
def get_histogram_bins(min_, max_, std, count):
width = _get_bin_width(std, count)
count = int(round((max_ - min_) / width) + 1)
if count:
bins = [i * width + min_ for i in xrange(1, count + 1)]
else:
bins = [min_]
return bins
def _get_bin_width(stdev, count):
w = int(round((3.5 * stdev) / (count ** (1.0 / 3))))
if w:
return w
else:
return 1
| true | true |
f7318cbf69b4247d05d82297d5c1cf2c37526fa4 | 6,137 | py | Python | lavaplayer/websocket.py | xArty4/lavaplayer | 1fae7c74e5e3c1eaf45b06fc05fc5fb16a2e4d3c | [
"MIT"
] | null | null | null | lavaplayer/websocket.py | xArty4/lavaplayer | 1fae7c74e5e3c1eaf45b06fc05fc5fb16a2e4d3c | [
"MIT"
] | null | null | null | lavaplayer/websocket.py | xArty4/lavaplayer | 1fae7c74e5e3c1eaf45b06fc05fc5fb16a2e4d3c | [
"MIT"
] | null | null | null | import asyncio
import aiohttp
import logging
from lavaplayer.exceptions import NodeError
from .objects import (
Info,
PlayerUpdateEvent,
TrackStartEvent,
TrackEndEvent,
TrackExceptionEvent,
TrackStuckEvent,
WebSocketClosedEvent,
)
from .emitter import Emitter
import typing as t
if t.TYPE_CHECKING:
from .client import LavalinkClient
_LOGGER = logging.getLogger("lavaplayer.ws")
class WS:
def __init__(
self,
client: "LavalinkClient",
host: str,
port: int,
is_ssl: bool = False,
) -> None:
self.ws = None
self.ws_url = f"{'wss' if is_ssl else 'ws'}://{host}:{port}"
self.client = client
self._headers = client._headers
self._loop = client._loop
self.emitter: Emitter = client.event_manager
self.is_connect: bool = False
async def _connect(self):
async with aiohttp.ClientSession(headers=self._headers, loop=self._loop) as session:
self.session = session
try:
self.ws = await self.session.ws_connect(self.ws_url)
if session is None:
await self.check_connection()
except (aiohttp.ClientConnectorError, aiohttp.WSServerHandshakeError, aiohttp.ServerDisconnectedError) as error:
if isinstance(error, aiohttp.ClientConnectorError):
_LOGGER.error(f"Could not connect to websocket: {error}")
_LOGGER.warning("Reconnecting to websocket after 10 seconds")
await asyncio.sleep(10)
await self._connect()
return
elif isinstance(error, aiohttp.WSServerHandshakeError):
if error.status in (403, 401): # Unauthorized or Forbidden
_LOGGER.warning("Password authentication failed - closing websocket")
return
_LOGGER.warning("Please check your websocket port - closing websocket")
elif isinstance(error, aiohttp.ServerDisconnectedError):
_LOGGER.error(f"Could not connect to websocket: {error}")
_LOGGER.warning("Reconnecting to websocket after 10 seconds")
await asyncio.sleep(10)
await self._connect()
return
_LOGGER.info("Connected to websocket")
self.is_connect = True
async for msg in self.ws:
if msg.type == aiohttp.WSMsgType.TEXT:
await self.callback(msg.json())
elif msg.type == aiohttp.WSMsgType.CLOSED:
_LOGGER.error("Websocket closed")
break
elif msg.type == aiohttp.WSMsgType.ERROR:
_LOGGER.error(msg.data)
break
async def check_connection(self):
while self.ws.closed is None or not self.ws.closed or not self.is_connected:
_LOGGER.warning("Websocket closed unexpectedly - reconnecting in 10 seconds")
if self.client.nodes:
self.client.nodes.clear()
await asyncio.sleep(10)
await self._connect()
async def callback(self, payload: dict):
if payload["op"] == "stats":
self.client.info = Info(
playing_players=payload["playingPlayers"],
memory_used=payload["memory"]["used"],
memory_free=payload["memory"]["free"],
players=payload["players"],
uptime=payload["uptime"]
)
elif payload["op"] == "playerUpdate":
data = PlayerUpdateEvent(
guild_id=payload["guildId"],
time=payload["state"]["time"],
position=payload["state"].get("position"),
connected=payload["state"]["connected"],
)
self.emitter.emit("playerUpdate", data)
elif payload["op"] == "event":
if not payload.get("track"):
return
track = await self.client._decodetrack(payload["track"])
guild_id = int(payload["guildId"])
try:
node = await self.client.get_guild_node(guild_id)
except NodeError:
node = None
if payload["type"] == "TrackStartEvent":
self.emitter.emit("TrackStartEvent", TrackStartEvent(track, guild_id))
elif payload["type"] == "TrackEndEvent":
self.emitter.emit("TrackEndEvent", TrackEndEvent(track, guild_id, payload["reason"]))
if not node:
return
if not node.queue:
return
if node.repeat:
await self.client.play(guild_id, track, node.queue[0].requester, True)
return
del node.queue[0]
await self.client.set_guild_node(guild_id, node)
if len(node.queue) != 0:
await self.client.play(guild_id, node.queue[0], node.queue[0].requester, True)
elif payload["type"] == "TrackExceptionEvent":
self.emitter.emit("TrackExceptionEvent", TrackExceptionEvent(track, guild_id, payload["exception"], payload["message"], payload["severity"], payload["cause"]))
elif payload["type"] == "TrackStuckEvent":
self.emitter.emit("TrackStuckEvent", TrackStuckEvent(track, guild_id, payload["thresholdMs"]))
elif payload["type"] == "WebSocketClosedEvent":
self.emitter.emit("WebSocketClosedEvent", WebSocketClosedEvent(track, guild_id, payload["code"], payload["reason"], payload["byRemote"]))
@property
def is_connected(self) -> bool:
return self.is_connect and self.ws.closed is False
async def send(self, payload): # only dict
if not self.is_connected:
_LOGGER.error("Not connected to websocket")
await self.check_connection()
return
await self.ws.send_json(payload)
| 40.111111 | 175 | 0.570311 | import asyncio
import aiohttp
import logging
from lavaplayer.exceptions import NodeError
from .objects import (
Info,
PlayerUpdateEvent,
TrackStartEvent,
TrackEndEvent,
TrackExceptionEvent,
TrackStuckEvent,
WebSocketClosedEvent,
)
from .emitter import Emitter
import typing as t
if t.TYPE_CHECKING:
from .client import LavalinkClient
_LOGGER = logging.getLogger("lavaplayer.ws")
class WS:
def __init__(
self,
client: "LavalinkClient",
host: str,
port: int,
is_ssl: bool = False,
) -> None:
self.ws = None
self.ws_url = f"{'wss' if is_ssl else 'ws'}://{host}:{port}"
self.client = client
self._headers = client._headers
self._loop = client._loop
self.emitter: Emitter = client.event_manager
self.is_connect: bool = False
async def _connect(self):
async with aiohttp.ClientSession(headers=self._headers, loop=self._loop) as session:
self.session = session
try:
self.ws = await self.session.ws_connect(self.ws_url)
if session is None:
await self.check_connection()
except (aiohttp.ClientConnectorError, aiohttp.WSServerHandshakeError, aiohttp.ServerDisconnectedError) as error:
if isinstance(error, aiohttp.ClientConnectorError):
_LOGGER.error(f"Could not connect to websocket: {error}")
_LOGGER.warning("Reconnecting to websocket after 10 seconds")
await asyncio.sleep(10)
await self._connect()
return
elif isinstance(error, aiohttp.WSServerHandshakeError):
if error.status in (403, 401):
_LOGGER.warning("Password authentication failed - closing websocket")
return
_LOGGER.warning("Please check your websocket port - closing websocket")
elif isinstance(error, aiohttp.ServerDisconnectedError):
_LOGGER.error(f"Could not connect to websocket: {error}")
_LOGGER.warning("Reconnecting to websocket after 10 seconds")
await asyncio.sleep(10)
await self._connect()
return
_LOGGER.info("Connected to websocket")
self.is_connect = True
async for msg in self.ws:
if msg.type == aiohttp.WSMsgType.TEXT:
await self.callback(msg.json())
elif msg.type == aiohttp.WSMsgType.CLOSED:
_LOGGER.error("Websocket closed")
break
elif msg.type == aiohttp.WSMsgType.ERROR:
_LOGGER.error(msg.data)
break
async def check_connection(self):
while self.ws.closed is None or not self.ws.closed or not self.is_connected:
_LOGGER.warning("Websocket closed unexpectedly - reconnecting in 10 seconds")
if self.client.nodes:
self.client.nodes.clear()
await asyncio.sleep(10)
await self._connect()
async def callback(self, payload: dict):
if payload["op"] == "stats":
self.client.info = Info(
playing_players=payload["playingPlayers"],
memory_used=payload["memory"]["used"],
memory_free=payload["memory"]["free"],
players=payload["players"],
uptime=payload["uptime"]
)
elif payload["op"] == "playerUpdate":
data = PlayerUpdateEvent(
guild_id=payload["guildId"],
time=payload["state"]["time"],
position=payload["state"].get("position"),
connected=payload["state"]["connected"],
)
self.emitter.emit("playerUpdate", data)
elif payload["op"] == "event":
if not payload.get("track"):
return
track = await self.client._decodetrack(payload["track"])
guild_id = int(payload["guildId"])
try:
node = await self.client.get_guild_node(guild_id)
except NodeError:
node = None
if payload["type"] == "TrackStartEvent":
self.emitter.emit("TrackStartEvent", TrackStartEvent(track, guild_id))
elif payload["type"] == "TrackEndEvent":
self.emitter.emit("TrackEndEvent", TrackEndEvent(track, guild_id, payload["reason"]))
if not node:
return
if not node.queue:
return
if node.repeat:
await self.client.play(guild_id, track, node.queue[0].requester, True)
return
del node.queue[0]
await self.client.set_guild_node(guild_id, node)
if len(node.queue) != 0:
await self.client.play(guild_id, node.queue[0], node.queue[0].requester, True)
elif payload["type"] == "TrackExceptionEvent":
self.emitter.emit("TrackExceptionEvent", TrackExceptionEvent(track, guild_id, payload["exception"], payload["message"], payload["severity"], payload["cause"]))
elif payload["type"] == "TrackStuckEvent":
self.emitter.emit("TrackStuckEvent", TrackStuckEvent(track, guild_id, payload["thresholdMs"]))
elif payload["type"] == "WebSocketClosedEvent":
self.emitter.emit("WebSocketClosedEvent", WebSocketClosedEvent(track, guild_id, payload["code"], payload["reason"], payload["byRemote"]))
@property
def is_connected(self) -> bool:
return self.is_connect and self.ws.closed is False
async def send(self, payload):
if not self.is_connected:
_LOGGER.error("Not connected to websocket")
await self.check_connection()
return
await self.ws.send_json(payload)
| true | true |
f7318d4e874e77b247071f0e8f618b2b791be9d7 | 629 | py | Python | skl2onnx/operator_converters/id_op.py | xiaowuhu/sklearn-onnx | e85674a67a0a043e19c2ffe181e5d31eca8ce40b | [
"Apache-2.0"
] | 323 | 2018-12-18T20:23:19.000Z | 2022-03-25T09:47:31.000Z | skl2onnx/operator_converters/id_op.py | xiaowuhu/sklearn-onnx | e85674a67a0a043e19c2ffe181e5d31eca8ce40b | [
"Apache-2.0"
] | 408 | 2019-01-02T12:16:10.000Z | 2022-03-21T14:01:28.000Z | skl2onnx/operator_converters/id_op.py | xiaowuhu/sklearn-onnx | e85674a67a0a043e19c2ffe181e5d31eca8ce40b | [
"Apache-2.0"
] | 70 | 2018-12-20T19:36:07.000Z | 2022-03-14T06:41:36.000Z | # SPDX-License-Identifier: Apache-2.0
from ..common._apply_operation import apply_identity
from ..common._registration import register_converter
from ..common._topology import Scope, Operator
from ..common._container import ModelComponentContainer
def convert_sklearn_identity(scope: Scope, operator: Operator,
container: ModelComponentContainer):
apply_identity(
scope, operator.inputs[0].full_name,
operator.outputs[0].full_name, container,
operator_name=scope.get_unique_operator_name('CIdentity'))
register_converter('SklearnIdentity', convert_sklearn_identity)
| 33.105263 | 66 | 0.761526 |
from ..common._apply_operation import apply_identity
from ..common._registration import register_converter
from ..common._topology import Scope, Operator
from ..common._container import ModelComponentContainer
def convert_sklearn_identity(scope: Scope, operator: Operator,
container: ModelComponentContainer):
apply_identity(
scope, operator.inputs[0].full_name,
operator.outputs[0].full_name, container,
operator_name=scope.get_unique_operator_name('CIdentity'))
register_converter('SklearnIdentity', convert_sklearn_identity)
| true | true |
f7318d62eadf54fe70b5a57ab76350ff07135c26 | 177 | py | Python | tests/cases/core/layer3_pubsub/rpc/timeout/bad_call.py | AaronJGaut/pyspoke | 37bbe8b42c5abe129a3736b255c8a2ee17a4fb59 | [
"MIT"
] | null | null | null | tests/cases/core/layer3_pubsub/rpc/timeout/bad_call.py | AaronJGaut/pyspoke | 37bbe8b42c5abe129a3736b255c8a2ee17a4fb59 | [
"MIT"
] | null | null | null | tests/cases/core/layer3_pubsub/rpc/timeout/bad_call.py | AaronJGaut/pyspoke | 37bbe8b42c5abe129a3736b255c8a2ee17a4fb59 | [
"MIT"
] | null | null | null | import spoke
try:
spoke.call("junk", None, timeout=2)
except TimeoutError:
print("Got expected TimeoutError")
else:
raise TestFailure("Didn't get a TimeoutError")
| 17.7 | 50 | 0.711864 | import spoke
try:
spoke.call("junk", None, timeout=2)
except TimeoutError:
print("Got expected TimeoutError")
else:
raise TestFailure("Didn't get a TimeoutError")
| true | true |
f7318e0ce7bdc535165b2bde29c259ad0a74e64c | 9,150 | py | Python | tests/providers/test_internet.py | shirakia/faker | 4eb4ef24f5edbcbadd38f941025b671f5b6ebe60 | [
"MIT"
] | 1 | 2019-01-16T14:02:54.000Z | 2019-01-16T14:02:54.000Z | tests/providers/test_internet.py | shirakia/faker | 4eb4ef24f5edbcbadd38f941025b671f5b6ebe60 | [
"MIT"
] | null | null | null | tests/providers/test_internet.py | shirakia/faker | 4eb4ef24f5edbcbadd38f941025b671f5b6ebe60 | [
"MIT"
] | 1 | 2019-11-07T03:33:43.000Z | 2019-11-07T03:33:43.000Z | # coding=utf-8
from __future__ import unicode_literals
from itertools import cycle
import unittest
import mock
import pytest
import six
from email_validator import validate_email
from faker import Faker
from faker.providers.person.ja_JP import Provider as JaProvider
from faker.utils import text
class TestInternetProvider(unittest.TestCase):
""" Tests internet """
def setUp(self):
self.factory = Faker()
def test_email(self):
email = self.factory.email(domain='example.com')
assert email.split('@')[1] == 'example.com'
@mock.patch(
'faker.providers.internet.Provider.image_placeholder_services',
{'https://dummyimage.com/{width}x{height}'},
)
def test_image_url(self):
my_width = 500
my_height = 1024
url = self.factory.image_url(my_width, my_height)
assert 'https://dummyimage.com/{}x{}'.format(my_width, my_height) == url
url = self.factory.image_url()
assert 'https://dummyimage.com/' in url
def test_hostname(self):
hostname_1_level = self.factory.hostname(levels=1)
hostname_parts = hostname_1_level.split(".")
assert hostname_1_level
self.assertIsInstance(hostname_1_level, six.string_types)
assert len(hostname_parts) == 3
hostname_0_level = self.factory.hostname(levels=0)
assert hostname_0_level
self.assertIsInstance(hostname_0_level, six.string_types)
class TestInternetProviderUrl(unittest.TestCase):
""" Test internet url generation """
def setUp(self):
self.factory = Faker()
@staticmethod
def is_correct_scheme(url, schemes):
return any(url.startswith('{}://'.format(scheme)) for scheme in schemes)
def test_url_default_schemes(self):
for _ in range(100):
url = self.factory.url()
assert self.is_correct_scheme(url, ['http', 'https'])
def test_url_custom_schemes(self):
schemes_sets = [
['usb'],
['ftp', 'file'],
['usb', 'telnet', 'http'],
]
for _, schemes in zip(range(100), cycle(schemes_sets)):
url = self.factory.url(schemes=schemes)
assert self.is_correct_scheme(url, schemes)
def test_url_empty_schemes_list_generate_schemeless_urls(self):
for _ in range(100):
url = self.factory.url(schemes=[])
assert not url.startswith('http')
assert url.startswith('://')
class TestJaJP(unittest.TestCase):
""" Tests internet in the ja_JP locale """
def setUp(self):
self.factory = Faker('ja')
def test_internet(self):
names = JaProvider.last_romanized_names
domain_word = self.factory.domain_word()
self.assertIsInstance(domain_word, six.string_types)
assert any(domain_word == text.slugify(name) for name in names)
domain_name = self.factory.domain_name()
deep_domain_name = self.factory.domain_name(3)
self.assertIsInstance(domain_name, six.string_types)
self.assertIsInstance(deep_domain_name, six.string_types)
assert deep_domain_name.count('.') == 3
with pytest.raises(ValueError):
self.factory.domain_name(-1)
user_name = self.factory.user_name()
self.assertIsInstance(user_name, six.string_types)
tld = self.factory.tld()
self.assertIsInstance(tld, six.string_types)
class TestZhCN(unittest.TestCase):
def setUp(self):
self.factory = Faker(locale='zh_CN')
def test_email(self):
email = self.factory.email()
validate_email(email, check_deliverability=False)
def test_domain_word(self):
domain_word = self.factory.domain_word()
assert len(domain_word) > 1
@mock.patch(
'faker.providers.internet.Provider.tld',
lambda x: 'cn',
)
def test_domain_name(self):
domain_name_1_level = self.factory.domain_name(levels=1)
domain_parts = domain_name_1_level.split(".")
assert len(domain_parts) == 2
assert domain_parts[-1] == 'cn'
domain_name_2_level = self.factory.domain_name(levels=2)
domain_parts = domain_name_2_level.split(".")
assert len(domain_parts) == 3
assert domain_parts[-1] == 'cn'
assert domain_parts[1] in ['ac', 'com', 'edu', 'gov', 'mil',
'net', 'org', 'ah', 'bj', 'cq',
'fj', 'gd', 'gs', 'gz', 'gx', 'ha',
'hb', 'he', 'hi', 'hk', 'hl', 'hn',
'jl', 'js', 'jx', 'ln', 'mo', 'nm',
'nx', 'qh', 'sc', 'sd', 'sh', 'sn',
'sx', 'tj', 'xj', 'xz', 'yn', 'zj']
class TestZhTW(unittest.TestCase):
def setUp(self):
self.factory = Faker(locale='zh_TW')
def test_email(self):
email = self.factory.email()
validate_email(email, check_deliverability=False)
class TestHuHU(unittest.TestCase):
""" Tests internet module in the hu_HU locale. """
def setUp(self):
self.factory = Faker('hu_HU')
def test_internet(self):
domain_name = self.factory.domain_name()
self.assertIsInstance(domain_name, six.string_types)
tld = self.factory.tld()
self.assertIsInstance(tld, six.string_types)
email = self.factory.email()
self.assertIsInstance(email, six.string_types)
class TestPlPL(unittest.TestCase):
def setUp(self):
self.factory = Faker('pl_PL')
self.provider = self.factory.provider('faker.providers.internet')
def test_free_email_domain(self):
domain = self.factory.free_email_domain()
assert domain in self.provider.free_email_domains
def test_tld(self):
tld = self.factory.tld()
assert tld in self.provider.tlds
class TestNlNl(unittest.TestCase):
def setUp(self):
self.factory = Faker('nl_NL')
self.provider = self.factory.provider('faker.providers.internet')
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'fabiënné',
)
def test_ascii_safe_email(self):
email = self.factory.ascii_safe_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'fabienne'
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'fabiënné',
)
def test_ascii_free_email(self):
email = self.factory.ascii_free_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'fabienne'
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'fabiënné',
)
def test_ascii_company_email(self):
email = self.factory.ascii_company_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'fabienne'
class TestArAa(unittest.TestCase):
def setUp(self):
self.factory = Faker('ar_AA')
self.provider = self.factory.provider('faker.providers.internet')
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'اصيل',
)
def test_ascii_safe_email(self):
email = self.factory.ascii_safe_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'asyl'
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'اصيل',
)
def test_ascii_free_email(self):
email = self.factory.ascii_free_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'asyl'
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'اصيل',
)
def test_ascii_company_email(self):
email = self.factory.ascii_company_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'asyl'
class TestPtBR(unittest.TestCase):
def setUp(self):
self.factory = Faker('pt_BR')
self.provider = self.factory.provider('faker.providers.internet')
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'VitóriaMagalhães',
)
def test_ascii_safe_email(self):
email = self.factory.ascii_safe_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'vitoriamagalhaes'
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'JoãoSimões',
)
def test_ascii_free_email(self):
email = self.factory.ascii_free_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'joaosimoes'
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'AndréCauã',
)
def test_ascii_company_email(self):
email = self.factory.ascii_company_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'andrecaua'
| 31.6609 | 80 | 0.628962 |
from __future__ import unicode_literals
from itertools import cycle
import unittest
import mock
import pytest
import six
from email_validator import validate_email
from faker import Faker
from faker.providers.person.ja_JP import Provider as JaProvider
from faker.utils import text
class TestInternetProvider(unittest.TestCase):
def setUp(self):
self.factory = Faker()
def test_email(self):
email = self.factory.email(domain='example.com')
assert email.split('@')[1] == 'example.com'
@mock.patch(
'faker.providers.internet.Provider.image_placeholder_services',
{'https://dummyimage.com/{width}x{height}'},
)
def test_image_url(self):
my_width = 500
my_height = 1024
url = self.factory.image_url(my_width, my_height)
assert 'https://dummyimage.com/{}x{}'.format(my_width, my_height) == url
url = self.factory.image_url()
assert 'https://dummyimage.com/' in url
def test_hostname(self):
hostname_1_level = self.factory.hostname(levels=1)
hostname_parts = hostname_1_level.split(".")
assert hostname_1_level
self.assertIsInstance(hostname_1_level, six.string_types)
assert len(hostname_parts) == 3
hostname_0_level = self.factory.hostname(levels=0)
assert hostname_0_level
self.assertIsInstance(hostname_0_level, six.string_types)
class TestInternetProviderUrl(unittest.TestCase):
def setUp(self):
self.factory = Faker()
@staticmethod
def is_correct_scheme(url, schemes):
return any(url.startswith('{}://'.format(scheme)) for scheme in schemes)
def test_url_default_schemes(self):
for _ in range(100):
url = self.factory.url()
assert self.is_correct_scheme(url, ['http', 'https'])
def test_url_custom_schemes(self):
schemes_sets = [
['usb'],
['ftp', 'file'],
['usb', 'telnet', 'http'],
]
for _, schemes in zip(range(100), cycle(schemes_sets)):
url = self.factory.url(schemes=schemes)
assert self.is_correct_scheme(url, schemes)
def test_url_empty_schemes_list_generate_schemeless_urls(self):
for _ in range(100):
url = self.factory.url(schemes=[])
assert not url.startswith('http')
assert url.startswith('://')
class TestJaJP(unittest.TestCase):
def setUp(self):
self.factory = Faker('ja')
def test_internet(self):
names = JaProvider.last_romanized_names
domain_word = self.factory.domain_word()
self.assertIsInstance(domain_word, six.string_types)
assert any(domain_word == text.slugify(name) for name in names)
domain_name = self.factory.domain_name()
deep_domain_name = self.factory.domain_name(3)
self.assertIsInstance(domain_name, six.string_types)
self.assertIsInstance(deep_domain_name, six.string_types)
assert deep_domain_name.count('.') == 3
with pytest.raises(ValueError):
self.factory.domain_name(-1)
user_name = self.factory.user_name()
self.assertIsInstance(user_name, six.string_types)
tld = self.factory.tld()
self.assertIsInstance(tld, six.string_types)
class TestZhCN(unittest.TestCase):
def setUp(self):
self.factory = Faker(locale='zh_CN')
def test_email(self):
email = self.factory.email()
validate_email(email, check_deliverability=False)
def test_domain_word(self):
domain_word = self.factory.domain_word()
assert len(domain_word) > 1
@mock.patch(
'faker.providers.internet.Provider.tld',
lambda x: 'cn',
)
def test_domain_name(self):
domain_name_1_level = self.factory.domain_name(levels=1)
domain_parts = domain_name_1_level.split(".")
assert len(domain_parts) == 2
assert domain_parts[-1] == 'cn'
domain_name_2_level = self.factory.domain_name(levels=2)
domain_parts = domain_name_2_level.split(".")
assert len(domain_parts) == 3
assert domain_parts[-1] == 'cn'
assert domain_parts[1] in ['ac', 'com', 'edu', 'gov', 'mil',
'net', 'org', 'ah', 'bj', 'cq',
'fj', 'gd', 'gs', 'gz', 'gx', 'ha',
'hb', 'he', 'hi', 'hk', 'hl', 'hn',
'jl', 'js', 'jx', 'ln', 'mo', 'nm',
'nx', 'qh', 'sc', 'sd', 'sh', 'sn',
'sx', 'tj', 'xj', 'xz', 'yn', 'zj']
class TestZhTW(unittest.TestCase):
def setUp(self):
self.factory = Faker(locale='zh_TW')
def test_email(self):
email = self.factory.email()
validate_email(email, check_deliverability=False)
class TestHuHU(unittest.TestCase):
def setUp(self):
self.factory = Faker('hu_HU')
def test_internet(self):
domain_name = self.factory.domain_name()
self.assertIsInstance(domain_name, six.string_types)
tld = self.factory.tld()
self.assertIsInstance(tld, six.string_types)
email = self.factory.email()
self.assertIsInstance(email, six.string_types)
class TestPlPL(unittest.TestCase):
def setUp(self):
self.factory = Faker('pl_PL')
self.provider = self.factory.provider('faker.providers.internet')
def test_free_email_domain(self):
domain = self.factory.free_email_domain()
assert domain in self.provider.free_email_domains
def test_tld(self):
tld = self.factory.tld()
assert tld in self.provider.tlds
class TestNlNl(unittest.TestCase):
def setUp(self):
self.factory = Faker('nl_NL')
self.provider = self.factory.provider('faker.providers.internet')
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'fabiënné',
)
def test_ascii_safe_email(self):
email = self.factory.ascii_safe_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'fabienne'
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'fabiënné',
)
def test_ascii_free_email(self):
email = self.factory.ascii_free_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'fabienne'
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'fabiënné',
)
def test_ascii_company_email(self):
email = self.factory.ascii_company_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'fabienne'
class TestArAa(unittest.TestCase):
def setUp(self):
self.factory = Faker('ar_AA')
self.provider = self.factory.provider('faker.providers.internet')
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'اصيل',
)
def test_ascii_safe_email(self):
email = self.factory.ascii_safe_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'asyl'
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'اصيل',
)
def test_ascii_free_email(self):
email = self.factory.ascii_free_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'asyl'
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'اصيل',
)
def test_ascii_company_email(self):
email = self.factory.ascii_company_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'asyl'
class TestPtBR(unittest.TestCase):
def setUp(self):
self.factory = Faker('pt_BR')
self.provider = self.factory.provider('faker.providers.internet')
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'VitóriaMagalhães',
)
def test_ascii_safe_email(self):
email = self.factory.ascii_safe_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'vitoriamagalhaes'
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'JoãoSimões',
)
def test_ascii_free_email(self):
email = self.factory.ascii_free_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'joaosimoes'
@mock.patch(
'faker.providers.internet.Provider.user_name',
lambda x: 'AndréCauã',
)
def test_ascii_company_email(self):
email = self.factory.ascii_company_email()
validate_email(email, check_deliverability=False)
assert email.split('@')[0] == 'andrecaua'
| true | true |
f7318f9b6f7017b7889c28a18c24f5841345d651 | 2,324 | py | Python | project-chat/chatApp/migrations/0001_initial.py | Torkvamedo/smx | a5aef4f430f56ac67100c505902f55e18fba5978 | [
"Unlicense"
] | null | null | null | project-chat/chatApp/migrations/0001_initial.py | Torkvamedo/smx | a5aef4f430f56ac67100c505902f55e18fba5978 | [
"Unlicense"
] | null | null | null | project-chat/chatApp/migrations/0001_initial.py | Torkvamedo/smx | a5aef4f430f56ac67100c505902f55e18fba5978 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-17 19:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ban',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='ChatUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('login', models.CharField(max_length=30, unique=True)),
('password', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.CharField(max_length=1024)),
('date', models.DateTimeField(auto_now=True)),
('receiver_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='receiver', to='chatApp.ChatUser')),
('sender_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sender', to='chatApp.ChatUser')),
],
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role_name', models.CharField(max_length=20)),
],
),
migrations.AddField(
model_name='chatuser',
name='role_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='chatApp.Role'),
),
migrations.AddField(
model_name='ban',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='chatApp.ChatUser'),
),
]
| 38.098361 | 144 | 0.577883 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ban',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='ChatUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('login', models.CharField(max_length=30, unique=True)),
('password', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.CharField(max_length=1024)),
('date', models.DateTimeField(auto_now=True)),
('receiver_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='receiver', to='chatApp.ChatUser')),
('sender_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sender', to='chatApp.ChatUser')),
],
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role_name', models.CharField(max_length=20)),
],
),
migrations.AddField(
model_name='chatuser',
name='role_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='chatApp.Role'),
),
migrations.AddField(
model_name='ban',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='chatApp.ChatUser'),
),
]
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.