text
stringlengths 2
999k
|
|---|
#
# @lc app=leetcode id=79 lang=python3
#
# [79] Word Search
#
# @lc code=start
class Solution:
def exist(self, board, word):
start = [None, None]
h = len(board)
l = len(board[0])
walked = [[0] * l for _ in range(h)]
for i in range(h):
for j in range(l):
if board[i][j] == word[0]:
start = [i, j]
walked[i][j] = 1
if self.helper(word[1:], board, walked, start):
return True
walked[i][j] = 0
return False
def helper(self, rest, board, walked, current_pos):
if len(rest) == 0:
return True
i = current_pos[0]
j = current_pos[1]
if i > 0 and board[i - 1][j] == rest[0] and walked[i - 1][j] == 0:
walked[i - 1][j] = 1
if self.helper(rest[1:], board, walked, [i - 1, j]):
return True
walked[i - 1][j] = 0
if i < len(board) - 1 and board[i + 1][j] == rest[0] and walked[i + 1][j] == 0:
walked[i + 1][j] = 1
if self.helper(rest[1:], board, walked, [i + 1, j]):
return True
walked[i + 1][j] = 0
if j > 0 and board[i][j - 1] == rest[0] and walked[i][j - 1] == 0:
walked[i][j - 1] = 1
if self.helper(rest[1:], board, walked, [i, j - 1]):
return True
walked[i][j - 1] = 0
if j < len(board[0]) - 1 and board[i][j + 1] == rest[0] and walked[i][j + 1] == 0:
walked[i][j + 1] = 1
if self.helper(rest[1:], board, walked, [i, j + 1]):
return True
walked[i][j + 1] = 0
return False
# @lc code=end
|
"""
-----------------------------------------------------------------------------
AUTHOR: Soumitra Samanta (soumitramath39@gmail.com)
-----------------------------------------------------------------------------
"""
import subprocess
import os
import numpy as np
from datetime import datetime
import pandas as pd
from rdkit import Chem
from rdkit.Chem import Descriptors
__all__ = [
'FragGenie'
]
class FragGenie():
def __init__(self, dir_fraggenie=''):
self.dir_fraggenie = dir_fraggenie
def to_numpy(self, array_str, sep=','):
return np.fromstring(array_str[1:-1], sep=sep)
def create_folder(self, folder_name):
if len(folder_name):
if not os.path.isdir(folder_name):
os.makedirs(folder_name)
return folder_name
def mol_prop_mass(self, smiles):
"""
Molecular mass
"""
return [Descriptors.ExactMolWt(Chem.MolFromSmiles(sm)) for sm in smiles]
def smiles2fraggenie_csv(
self,
input_path='',
input_filename='test_input.csv',
smiles_col='smiles',
output_path='',
output_filename='',
num_bonds_to_break=3,
min_fragment_mass=50,
max_smiles_len=250,
max_num_smiles=1000000000,
flag_display='true',
masses_option='METFRAG_MZ'
):
"""Calculate FragGenie from csv file"""
if(len(output_path)==0):
output_path = input_path
if(len(output_filename)==0):
output_filename = ''.join([
'fraggenie_', datetime.today().strftime('%d%m%Y%H%M%S'),
'_', str(np.random.random(1)[0])[2:],
'_nbonds_', str(num_bonds_to_break),
'_frgms_', str(min_fragment_mass),
'_smlen_', str(max_smiles_len),
'_', input_filename
])
bash_cmd = ''.join([
'bash ', self.dir_fraggenie,
'fragment.sh ',
input_path,
input_filename,
' ', output_path,
output_filename,
' ', smiles_col,
' ', str(num_bonds_to_break),
' ', str(min_fragment_mass),
' ', str(max_smiles_len),
' ', str(max_num_smiles),
' ', flag_display,
' ', masses_option
])
subprocess.call(bash_cmd, shell=True)
return output_path, output_filename, bash_cmd
def smiles2fraggenie(
self,
smiles,
num_bonds_to_break=3,
min_fragment_mass=50,
max_smiles_len=250,
max_num_smiles=1000000000,
flag_display='true',
masses_option='METFRAG_MZ',
input_path='dump/',
input_filename='',
massspec_sep=',',
fill_non_break_mol=1,
flag_del_temp_file=1,
verbose=0
):
"""Calculate FragGenie from smiles"""
input_path = self.create_folder(input_path)
if len(input_filename)==0:
input_filename = ''.join(['smiles_', datetime.today().strftime('%d%m%Y%H%M%S'),
'_', str(np.random.random(1)[0])[2:],
'.csv'
])
pd.DataFrame.from_dict({'smiles':smiles}).to_csv(''.join([input_path, input_filename]), index=False)
output_path, output_filename, bash_cmd = self.smiles2fraggenie_csv(
input_path=input_path,
input_filename=input_filename,
num_bonds_to_break=num_bonds_to_break,
min_fragment_mass=min_fragment_mass,
max_smiles_len=max_smiles_len,
max_num_smiles=max_num_smiles,
flag_display=flag_display,
masses_option=masses_option
)
df_smiles = pd.read_csv(output_path+output_filename)
# handle very small molecules which is unable to break into fraggenie (fill with mol mass) or unbreakable molecules
if fill_non_break_mol:
fraggenie = [None]*len(smiles)
fraggenie_smiles = df_smiles['smiles'].tolist()
count1 = 0
count2 = 0
for i, sm in enumerate(smiles):
try:
fraggenie[i] = self.to_numpy(df_smiles[masses_option][fraggenie_smiles.index(sm)], sep=massspec_sep)
if len(fraggenie[i])==0:
if verbose:
print('Unable to break molecules: {}-{}' .format(i, smiles[i]))
fraggenie[i] = np.asarray([self.mol_prop_mass([smiles[i]])[0]])
count1 += 1
except:
if verbose:
print('Unable to break molecules: {}-{}' .format(i, smiles[i]))
fraggenie[i] = np.asarray([self.mol_prop_mass([smiles[i]])[0]])
count2 += 1
print('Total number of unbreakable molecules: {} (empty-{}, not all-{})' .format(count1+count2, count1, count2))
else:
fraggenie = df_smiles[masses_option].apply(self.to_numpy, sep=massspec_sep).tolist()
if flag_del_temp_file:
filename = ''.join([input_path, input_filename])
if os.path.isfile(filename):
if verbose:
print('Removing "{}"' .format(filename))
os.remove(filename)
filename = ''.join([output_path, output_filename])
if os.path.isfile(filename):
if verbose:
print('Removing "{}"' .format(filename))
os.remove(filename)
return fraggenie
if __name__ == '__main__':
fraggenie = FragGenie()
output_path, output_filename, bash_cmd = fraggenie.smiles2fraggenie_csv(output_filename='fraggenie_test_input.csv')
smiles = ['Cn1cnc2n(C)c(=O)n(C)c(=O)c12',
'BrC1CCCCc1CC',
'C#1C#CC1',
'C#1C#CCcCCCc1',
'C#1CCCCCCC=1',
'C#1CCcNccccccccc1',
'Cn1cnc2n(C)c(=O)n(C)c(=O)c12']
fragment = fraggenie.smiles2fraggenie(smiles, fill_non_break_mol=1)
for i in range(len(smiles)):
print('smiles: {}\nfragment: {}' .format(smiles[i], fragment[i]))
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the expand composite optimization pass."""
import cirq
def assert_equal_mod_empty(expected, actual):
drop_empty = cirq.DropEmptyMoments()
drop_empty.optimize_circuit(actual)
if expected != actual:
# coverage: ignore
print('EXPECTED')
print(expected)
print('ACTUAL')
print(actual)
assert expected == actual
def test_empty_circuit():
circuit = cirq.Circuit()
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
assert_equal_mod_empty(cirq.Circuit(), circuit)
def test_empty_moment():
circuit = cirq.Circuit([])
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
assert_equal_mod_empty(cirq.Circuit([]), circuit)
def test_ignore_non_composite():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit()
circuit.append([cirq.X(q0), cirq.Y(q1), cirq.CZ(q0, q1), cirq.Z(q0)])
expected = circuit.copy()
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
assert_equal_mod_empty(expected, circuit)
def test_composite_default():
q0, q1 = cirq.LineQubit.range(2)
cnot = cirq.CNOT(q0, q1)
circuit = cirq.Circuit()
circuit.append(cnot)
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
expected = cirq.Circuit()
expected.append([cirq.Y(q1) ** -0.5, cirq.CZ(q0, q1), cirq.Y(q1) ** 0.5])
assert_equal_mod_empty(expected, circuit)
def test_multiple_composite_default():
q0, q1 = cirq.LineQubit.range(2)
cnot = cirq.CNOT(q0, q1)
circuit = cirq.Circuit()
circuit.append([cnot, cnot])
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
expected = cirq.Circuit()
decomp = [cirq.Y(q1) ** -0.5, cirq.CZ(q0, q1), cirq.Y(q1) ** 0.5]
expected.append([decomp, decomp])
assert_equal_mod_empty(expected, circuit)
def test_mix_composite_non_composite():
q0, q1 = cirq.LineQubit.range(2)
actual = cirq.Circuit.from_ops(cirq.X(q0), cirq.CNOT(q0, q1), cirq.X(q1))
opt = cirq.ExpandComposite()
opt.optimize_circuit(actual)
expected = cirq.Circuit.from_ops(cirq.X(q0),
cirq.Y(q1) ** -0.5,
cirq.CZ(q0, q1),
cirq.Y(q1) ** 0.5,
cirq.X(q1),
strategy=cirq.InsertStrategy.NEW)
assert_equal_mod_empty(expected, actual)
def test_recursive_composite():
q0, q1 = cirq.LineQubit.range(2)
swap = cirq.SWAP(q0, q1)
circuit = cirq.Circuit()
circuit.append(swap)
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
expected = cirq.Circuit().from_ops(cirq.Y(q1) ** -0.5,
cirq.CZ(q0, q1),
cirq.Y(q1) ** 0.5,
cirq.Y(q0) ** -0.5,
cirq.CZ(q1, q0),
cirq.Y(q0) ** 0.5,
cirq.Y(q1) ** -0.5,
cirq.CZ(q0, q1),
cirq.Y(q1) ** 0.5)
assert_equal_mod_empty(expected, circuit)
def test_decompose_returns_not_flat_op_tree():
class DummyGate(cirq.SingleQubitGate):
def _decompose_(self, qubits):
q0, = qubits
# Yield a tuple of gates instead of yielding a gate
yield cirq.X(q0),
q0 = cirq.NamedQubit('q0')
circuit = cirq.Circuit.from_ops(DummyGate()(q0))
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
expected = cirq.Circuit().from_ops(cirq.X(q0))
assert_equal_mod_empty(expected, circuit)
def test_decompose_returns_deep_op_tree():
class DummyGate(cirq.TwoQubitGate):
def _decompose_(self, qubits):
q0, q1 = qubits
# Yield a tuple
yield ((cirq.X(q0), cirq.Y(q0)), cirq.Z(q0))
# Yield nested lists
yield [cirq.X(q0), [cirq.Y(q0), cirq.Z(q0)]]
def generator(depth):
if depth <= 0:
yield cirq.CZ(q0, q1), cirq.Y(q0)
else:
yield cirq.X(q0), generator(depth - 1)
yield cirq.Z(q0)
# Yield nested generators
yield generator(2)
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit.from_ops(DummyGate()(q0, q1))
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
expected = cirq.Circuit().from_ops(
cirq.X(q0), cirq.Y(q0), cirq.Z(q0), # From tuple
cirq.X(q0), cirq.Y(q0), cirq.Z(q0), # From nested lists
# From nested generators
cirq.X(q0), cirq.X(q0),
cirq.CZ(q0, q1), cirq.Y(q0),
cirq.Z(q0), cirq.Z(q0))
assert_equal_mod_empty(expected, circuit)
def test_nonrecursive_expansion():
qubits = [cirq.NamedQubit(s) for s in 'xy']
no_decomp = lambda op: (isinstance(op, cirq.GateOperation) and
op.gate == cirq.ISWAP)
expander = cirq.ExpandComposite(no_decomp=no_decomp)
unexpanded_circuit = cirq.Circuit.from_ops(cirq.ISWAP(*qubits))
circuit = unexpanded_circuit.__copy__()
expander.optimize_circuit(circuit)
assert circuit == unexpanded_circuit
no_decomp = lambda op: (isinstance(op, cirq.GateOperation) and
isinstance(op.gate, (cirq.CNotPowGate,
cirq.HPowGate)))
expander = cirq.ExpandComposite(no_decomp=no_decomp)
circuit = unexpanded_circuit.__copy__()
expander.optimize_circuit(circuit)
actual_text_diagram = circuit.to_text_diagram().strip()
expected_text_diagram = """
x: ───@───H───X───S───X───S^-1───H───@───
│ │ │ │
y: ───X───────@───────@──────────────X───
""".strip()
assert actual_text_diagram == expected_text_diagram
|
import os, glob
def get_last_timestamped_dir_path(data_dir_path):
glob_path = os.path.join(os.path.expanduser(data_dir_path), '2*')
date_paths = glob.glob(glob_path)
date_paths.sort()
return date_paths[-1] if date_paths else None
if __name__ == '__main__':
print(get_last_timestamped_dir_path('~/fake_scraper_data'))
|
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2019 The Bitcoin Core Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message structures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
paymastercoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization.
Classes use __slots__ to ensure extraneous attributes aren't accidentally added
by tests, compromising their intended effect.
"""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, assert_equal
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_LOCATOR_SZ = 101
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
MAX_MONEY = 21000000 * COIN
BIP125_SEQUENCE_NUMBER = 0xfffffffd # Sequence number that is BIP 125 opt-in and BIP 68-opt-out
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_NETWORK_LIMITED = (1 << 10)
MSG_TX = 1
MSG_BLOCK = 2
MSG_FILTERED_BLOCK = 3
MSG_WITNESS_FLAG = 1 << 30
MSG_TYPE_MASK = 0xffffffff >> 2
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return obj.serialize().hex()
# Objects that map to paymastercoind objects, which can be serialized/deserialized
class CAddress:
__slots__ = ("ip", "nServices", "pchReserved", "port", "time")
def __init__(self):
self.time = 0
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f, with_time=True):
if with_time:
self.time = struct.unpack("<i", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self, with_time=True):
r = b""
if with_time:
r += struct.pack("<i", self.time)
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv:
__slots__ = ("hash", "type")
typemap = {
0: "Error",
MSG_TX: "TX",
MSG_BLOCK: "Block",
MSG_TX | MSG_WITNESS_FLAG: "WitnessTx",
MSG_BLOCK | MSG_WITNESS_FLAG: "WitnessBlock",
MSG_FILTERED_BLOCK: "filtered Block",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator:
__slots__ = ("nVersion", "vHave")
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint:
__slots__ = ("hash", "n")
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn:
__slots__ = ("nSequence", "prevout", "scriptSig")
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), self.scriptSig.hex(),
self.nSequence)
class CTxOut:
__slots__ = ("nValue", "scriptPubKey")
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
self.scriptPubKey.hex())
class CScriptWitness:
__slots__ = ("stack",)
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([x.hex() for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness:
__slots__ = ("scriptWitness",)
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness:
__slots__ = ("vtxinwit",)
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction:
__slots__ = ("hash", "nLockTime", "nVersion", "sha256", "vin", "vout",
"wit")
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in paymastercoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
else:
self.wit = CTxWitness()
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_with_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.hash
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader:
__slots__ = ("hash", "hashMerkleRoot", "hashPrevBlock", "nBits", "nNonce",
"nTime", "nVersion", "sha256")
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
BLOCK_HEADER_SIZE = len(CBlockHeader().serialize())
assert_equal(BLOCK_HEADER_SIZE, 80)
class CBlock(CBlockHeader):
__slots__ = ("vtx",)
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction:
__slots__ = ("index", "tx")
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs:
__slots__ = ("header", "nonce", "prefilled_txn", "prefilled_txn_length",
"shortids", "shortids_length")
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
__slots__ = ()
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs:
__slots__ = ("header", "nonce", "prefilled_txn", "shortids", "use_witness")
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids is not None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list=None, use_witness=False):
if prefill_list is None:
prefill_list = [0]
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest:
__slots__ = ("blockhash", "indexes")
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions:
__slots__ = ("blockhash", "transactions")
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree:
__slots__ = ("nTransactions", "vBits", "vHash")
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock:
__slots__ = ("header", "txn")
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version:
__slots__ = ("addrFrom", "addrTo", "nNonce", "nRelay", "nServices",
"nStartingHeight", "nTime", "nVersion", "strSubVer")
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK | NODE_WITNESS
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f, False)
self.addrFrom = CAddress()
self.addrFrom.deserialize(f, False)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize(False)
r += self.addrFrom.serialize(False)
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack:
__slots__ = ()
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr:
__slots__ = ("addrs",)
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv:
__slots__ = ("inv",)
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata:
__slots__ = ("inv",)
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv is not None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks:
__slots__ = ("locator", "hashstop")
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx:
__slots__ = ("tx",)
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_with_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_no_witness_tx(msg_tx):
__slots__ = ()
def serialize(self):
return self.tx.serialize_without_witness()
class msg_block:
__slots__ = ("block",)
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic:
__slots__ = ("command", "data")
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_no_witness_block(msg_block):
__slots__ = ()
def serialize(self):
return self.block.serialize(with_witness=False)
class msg_getaddr:
__slots__ = ()
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping:
__slots__ = ("nonce",)
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong:
__slots__ = ("nonce",)
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool:
__slots__ = ()
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_notfound:
__slots__ = ("vec", )
command = b"notfound"
def __init__(self, vec=None):
self.vec = vec or []
def deserialize(self, f):
self.vec = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.vec)
def __repr__(self):
return "msg_notfound(vec=%s)" % (repr(self.vec))
class msg_sendheaders:
__slots__ = ()
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders:
__slots__ = ("hashstop", "locator",)
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers:
__slots__ = ("headers",)
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in paymastercoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_merkleblock:
command = b"merkleblock"
def deserialize(self, f):
pass # Placeholder for now
class msg_filterload:
__slots__ = ("data", "nHashFuncs", "nTweak", "nFlags")
command = b"filterload"
def __init__(self, data=b'00', nHashFuncs=0, nTweak=0, nFlags=0):
self.data = data
self.nHashFuncs = nHashFuncs
self.nTweak = nTweak
self.nFlags = nFlags
def deserialize(self, f):
self.data = deser_string(f)
self.nHashFuncs = struct.unpack("<I", f.read(4))[0]
self.nTweak = struct.unpack("<I", f.read(4))[0]
self.nFlags = struct.unpack("<B", f.read(1))[0]
def serialize(self):
r = b""
r += ser_string(self.data)
r += struct.pack("<I", self.nHashFuncs)
r += struct.pack("<I", self.nTweak)
r += struct.pack("<B", self.nFlags)
return r
def __repr__(self):
return "msg_filterload(data={}, nHashFuncs={}, nTweak={}, nFlags={})".format(
self.data, self.nHashFuncs, self.nTweak, self.nFlags)
class msg_filteradd:
__slots__ = ("data")
command = b"filteradd"
def __init__(self, data):
self.data = data
def deserialize(self, f):
self.data = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.data)
return r
def __repr__(self):
return "msg_filteradd(data={})".format(self.data)
class msg_filterclear:
__slots__ = ()
command = b"filterclear"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_filterclear()"
class msg_feefilter:
__slots__ = ("feerate",)
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct:
__slots__ = ("announce", "version")
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock:
__slots__ = ("header_and_shortids",)
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn:
__slots__ = ("block_txn_request",)
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn:
__slots__ = ("block_transactions",)
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_no_witness_blocktxn(msg_blocktxn):
__slots__ = ()
def serialize(self):
return self.block_transactions.serialize(with_witness=False)
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Dataset loading, creation and processing"""
import logging
import math
import os
import time
import timeit
import pickle
import numpy as np
import pandas as pd
from mindspore.dataset import GeneratorDataset, Sampler
import src.constants as rconst
import src.movielens as movielens
import src.stat_utils as stat_utils
DATASET_TO_NUM_USERS_AND_ITEMS = {
"ml-1m": (6040, 3706),
"ml-20m": (138493, 26744)
}
_EXPECTED_CACHE_KEYS = (
rconst.TRAIN_USER_KEY, rconst.TRAIN_ITEM_KEY, rconst.EVAL_USER_KEY,
rconst.EVAL_ITEM_KEY, rconst.USER_MAP, rconst.ITEM_MAP)
def load_data(data_dir, dataset):
"""
Load data in .csv format and output structured data.
This function reads in the raw CSV of positive items, and performs three
preprocessing transformations:
1) Filter out all users who have not rated at least a certain number
of items. (Typically 20 items)
2) Zero index the users and items such that the largest user_id is
`num_users - 1` and the largest item_id is `num_items - 1`
3) Sort the dataframe by user_id, with timestamp as a secondary sort key.
This allows the dataframe to be sliced by user in-place, and for the last
item to be selected simply by calling the `-1` index of a user's slice.
While all of these transformations are performed by Pandas (and are therefore
single-threaded), they only take ~2 minutes, and the overhead to apply a
MapReduce pattern to parallel process the dataset adds significant complexity
for no computational gain. For a larger dataset parallelizing this
preprocessing could yield speedups. (Also, this preprocessing step is only
performed once for an entire run.
"""
logging.info("Beginning loading data...")
raw_rating_path = os.path.join(data_dir, dataset, movielens.RATINGS_FILE)
cache_path = os.path.join(data_dir, dataset, rconst.RAW_CACHE_FILE)
valid_cache = os.path.exists(cache_path)
if valid_cache:
with open(cache_path, 'rb') as f:
cached_data = pickle.load(f)
for key in _EXPECTED_CACHE_KEYS:
if key not in cached_data:
valid_cache = False
if not valid_cache:
logging.info("Removing stale raw data cache file.")
os.remove(cache_path)
if valid_cache:
data = cached_data
else:
# process data and save to .csv
with open(raw_rating_path) as f:
df = pd.read_csv(f)
# Get the info of users who have more than 20 ratings on items
grouped = df.groupby(movielens.USER_COLUMN)
df = grouped.filter(lambda x: len(x) >= rconst.MIN_NUM_RATINGS)
original_users = df[movielens.USER_COLUMN].unique()
original_items = df[movielens.ITEM_COLUMN].unique()
# Map the ids of user and item to 0 based index for following processing
logging.info("Generating user_map and item_map...")
user_map = {user: index for index, user in enumerate(original_users)}
item_map = {item: index for index, item in enumerate(original_items)}
df[movielens.USER_COLUMN] = df[movielens.USER_COLUMN].apply(
lambda user: user_map[user])
df[movielens.ITEM_COLUMN] = df[movielens.ITEM_COLUMN].apply(
lambda item: item_map[item])
num_users = len(original_users)
num_items = len(original_items)
assert num_users <= np.iinfo(rconst.USER_DTYPE).max
assert num_items <= np.iinfo(rconst.ITEM_DTYPE).max
assert df[movielens.USER_COLUMN].max() == num_users - 1
assert df[movielens.ITEM_COLUMN].max() == num_items - 1
# This sort is used to shard the dataframe by user, and later to select
# the last item for a user to be used in validation.
logging.info("Sorting by user, timestamp...")
# This sort is equivalent to
# df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],
# inplace=True)
# except that the order of items with the same user and timestamp are
# sometimes different. For some reason, this sort results in a better
# hit-rate during evaluation, matching the performance of the MLPerf
# reference implementation.
df.sort_values(by=movielens.TIMESTAMP_COLUMN, inplace=True)
df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],
inplace=True, kind="mergesort")
# The dataframe does not reconstruct indices in the sort or filter steps.
df = df.reset_index()
grouped = df.groupby(movielens.USER_COLUMN, group_keys=False)
eval_df, train_df = grouped.tail(1), grouped.apply(lambda x: x.iloc[:-1])
data = {
rconst.TRAIN_USER_KEY:
train_df[movielens.USER_COLUMN].values.astype(rconst.USER_DTYPE),
rconst.TRAIN_ITEM_KEY:
train_df[movielens.ITEM_COLUMN].values.astype(rconst.ITEM_DTYPE),
rconst.EVAL_USER_KEY:
eval_df[movielens.USER_COLUMN].values.astype(rconst.USER_DTYPE),
rconst.EVAL_ITEM_KEY:
eval_df[movielens.ITEM_COLUMN].values.astype(rconst.ITEM_DTYPE),
rconst.USER_MAP: user_map,
rconst.ITEM_MAP: item_map,
"create_time": time.time(),
}
logging.info("Writing raw data cache.")
with open(cache_path, "wb") as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
num_users, num_items = DATASET_TO_NUM_USERS_AND_ITEMS[dataset]
if num_users != len(data[rconst.USER_MAP]):
raise ValueError("Expected to find {} users, but found {}".format(
num_users, len(data[rconst.USER_MAP])))
if num_items != len(data[rconst.ITEM_MAP]):
raise ValueError("Expected to find {} items, but found {}".format(
num_items, len(data[rconst.ITEM_MAP])))
return data, num_users, num_items
def construct_lookup_variables(train_pos_users, train_pos_items, num_users):
"""Lookup variables"""
index_bounds = None
sorted_train_pos_items = None
def index_segment(user):
lower, upper = index_bounds[user:user + 2]
items = sorted_train_pos_items[lower:upper]
negatives_since_last_positive = np.concatenate(
[items[0][np.newaxis], items[1:] - items[:-1] - 1])
return np.cumsum(negatives_since_last_positive)
start_time = timeit.default_timer()
inner_bounds = np.argwhere(train_pos_users[1:] -
train_pos_users[:-1])[:, 0] + 1
(upper_bound,) = train_pos_users.shape
index_bounds = np.array([0] + inner_bounds.tolist() + [upper_bound])
# Later logic will assume that the users are in sequential ascending order.
assert np.array_equal(train_pos_users[index_bounds[:-1]], np.arange(num_users))
sorted_train_pos_items = train_pos_items.copy()
for i in range(num_users):
lower, upper = index_bounds[i:i + 2]
sorted_train_pos_items[lower:upper].sort()
total_negatives = np.concatenate([
index_segment(i) for i in range(num_users)])
logging.info("Negative total vector built. Time: {:.1f} seconds".format(
timeit.default_timer() - start_time))
return total_negatives, index_bounds, sorted_train_pos_items
class NCFDataset:
"""
A dataset for NCF network.
"""
def __init__(self,
pos_users,
pos_items,
num_users,
num_items,
batch_size,
total_negatives,
index_bounds,
sorted_train_pos_items,
num_neg,
is_training=True):
self._pos_users = pos_users
self._pos_items = pos_items
self._num_users = num_users
self._num_items = num_items
self._batch_size = batch_size
self._total_negatives = total_negatives
self._index_bounds = index_bounds
self._sorted_train_pos_items = sorted_train_pos_items
self._is_training = is_training
if self._is_training:
self._train_pos_count = self._pos_users.shape[0]
else:
self._eval_users_per_batch = int(
batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))
_pos_count = pos_users.shape[0]
_num_samples = (1 + num_neg) * _pos_count
self.dataset_len = math.ceil(_num_samples / batch_size)
def lookup_negative_items(self, negative_users):
"""Lookup negative items"""
output = np.zeros(shape=negative_users.shape, dtype=rconst.ITEM_DTYPE) - 1
left_index = self._index_bounds[negative_users]
right_index = self._index_bounds[negative_users + 1] - 1
num_positives = right_index - left_index + 1
num_negatives = self._num_items - num_positives
neg_item_choice = stat_utils.very_slightly_biased_randint(num_negatives)
# Shortcuts:
# For points where the negative is greater than or equal to the tally before
# the last positive point there is no need to bisect. Instead the item id
# corresponding to the negative item choice is simply:
# last_postive_index + 1 + (neg_choice - last_negative_tally)
# Similarly, if the selection is less than the tally at the first positive
# then the item_id is simply the selection.
#
# Because MovieLens organizes popular movies into low integers (which is
# preserved through the preprocessing), the first shortcut is very
# efficient, allowing ~60% of samples to bypass the bisection. For the same
# reason, the second shortcut is rarely triggered (<0.02%) and is therefore
# not worth implementing.
use_shortcut = neg_item_choice >= self._total_negatives[right_index]
output[use_shortcut] = (
self._sorted_train_pos_items[right_index] + 1 +
(neg_item_choice - self._total_negatives[right_index])
)[use_shortcut]
if np.all(use_shortcut):
# The bisection code is ill-posed when there are no elements.
return output
not_use_shortcut = np.logical_not(use_shortcut)
left_index = left_index[not_use_shortcut]
right_index = right_index[not_use_shortcut]
neg_item_choice = neg_item_choice[not_use_shortcut]
num_loops = np.max(
np.ceil(np.log2(num_positives[not_use_shortcut])).astype(np.int32))
for _ in range(num_loops):
mid_index = (left_index + right_index) // 2
right_criteria = self._total_negatives[mid_index] > neg_item_choice
left_criteria = np.logical_not(right_criteria)
right_index[right_criteria] = mid_index[right_criteria]
left_index[left_criteria] = mid_index[left_criteria]
# Expected state after bisection pass:
# The right index is the smallest index whose tally is greater than the
# negative item choice index.
assert np.all((right_index - left_index) <= 1)
output[not_use_shortcut] = (
self._sorted_train_pos_items[right_index] - (self._total_negatives[right_index] - neg_item_choice)
)
assert np.all(output >= 0)
return output
def _get_train_item(self, index):
"""Get train item"""
(mask_start_index,) = index.shape
index_mod = np.mod(index, self._train_pos_count)
# get batch of users
users = self._pos_users[index_mod]
# get batch of items
negative_indices = np.greater_equal(index, self._train_pos_count)
negative_users = users[negative_indices]
negative_items = self.lookup_negative_items(negative_users=negative_users)
items = self._pos_items[index_mod]
items[negative_indices] = negative_items
# get batch of labels
labels = np.logical_not(negative_indices)
# pad last partial batch
pad_length = self._batch_size - index.shape[0]
if pad_length:
user_pad = np.arange(pad_length, dtype=users.dtype) % self._num_users
item_pad = np.arange(pad_length, dtype=items.dtype) % self._num_items
label_pad = np.zeros(shape=(pad_length,), dtype=labels.dtype)
users = np.concatenate([users, user_pad])
items = np.concatenate([items, item_pad])
labels = np.concatenate([labels, label_pad])
users = np.reshape(users, (self._batch_size, 1)) # (_batch_size, 1), int32
items = np.reshape(items, (self._batch_size, 1)) # (_batch_size, 1), int32
mask_start_index = np.array(mask_start_index, dtype=np.int32) # (_batch_size, 1), int32
valid_pt_mask = np.expand_dims(
np.less(np.arange(self._batch_size), mask_start_index), -1).astype(np.float32) # (_batch_size, 1), bool
labels = np.reshape(labels, (self._batch_size, 1)).astype(np.int32) # (_batch_size, 1), bool
return users, items, labels, valid_pt_mask
@staticmethod
def _assemble_eval_batch(users, positive_items, negative_items,
users_per_batch):
"""Construct duplicate_mask and structure data accordingly.
The positive items should be last so that they lose ties. However, they
should not be masked out if the true eval positive happens to be
selected as a negative. So instead, the positive is placed in the first
position, and then switched with the last element after the duplicate
mask has been computed.
Args:
users: An array of users in a batch. (should be identical along axis 1)
positive_items: An array (batch_size x 1) of positive item indices.
negative_items: An array of negative item indices.
users_per_batch: How many users should be in the batch. This is passed
as an argument so that ncf_test.py can use this method.
Returns:
User, item, and duplicate_mask arrays.
"""
items = np.concatenate([positive_items, negative_items], axis=1)
# We pad the users and items here so that the duplicate mask calculation
# will include padding. The metric function relies on all padded elements
# except the positive being marked as duplicate to mask out padded points.
if users.shape[0] < users_per_batch:
pad_rows = users_per_batch - users.shape[0]
padding = np.zeros(shape=(pad_rows, users.shape[1]), dtype=np.int32)
users = np.concatenate([users, padding.astype(users.dtype)], axis=0)
items = np.concatenate([items, padding.astype(items.dtype)], axis=0)
duplicate_mask = stat_utils.mask_duplicates(items, axis=1).astype(np.float32)
items[:, (0, -1)] = items[:, (-1, 0)]
duplicate_mask[:, (0, -1)] = duplicate_mask[:, (-1, 0)]
assert users.shape == items.shape == duplicate_mask.shape
return users, items, duplicate_mask
def _get_eval_item(self, index):
"""Get eval item"""
low_index, high_index = index
users = np.repeat(self._pos_users[low_index:high_index, np.newaxis],
1 + rconst.NUM_EVAL_NEGATIVES, axis=1)
positive_items = self._pos_items[low_index:high_index, np.newaxis]
negative_items = (self.lookup_negative_items(negative_users=users[:, :-1])
.reshape(-1, rconst.NUM_EVAL_NEGATIVES))
users, items, duplicate_mask = self._assemble_eval_batch(
users, positive_items, negative_items, self._eval_users_per_batch)
users = np.reshape(users.flatten(), (self._batch_size, 1)) # (self._batch_size, 1), int32
items = np.reshape(items.flatten(), (self._batch_size, 1)) # (self._batch_size, 1), int32
duplicate_mask = np.reshape(duplicate_mask.flatten(), (self._batch_size, 1)) # (self._batch_size, 1), bool
return users, items, duplicate_mask
def __getitem__(self, index):
"""
Get a batch of samples.
"""
if self._is_training:
return self._get_train_item(index)
return self._get_eval_item(index)
def __len__(self):
"""
Return length of the dataset, i.e., the number of batches for an epoch
"""
return self.dataset_len
class RandomSampler(Sampler):
"""
A random sampler for dataset.
"""
def __init__(self, pos_count, num_train_negatives, batch_size):
self.pos_count = pos_count
self._num_samples = (1 + num_train_negatives) * self.pos_count
self._batch_size = batch_size
self._num_batches = math.ceil(self._num_samples / self._batch_size)
super().__init__(self._num_batches)
def __iter__(self):
"""
Return indices of all batches within an epoch.
"""
indices = stat_utils.permutation((self._num_samples, stat_utils.random_int32()))
batch_indices = [indices[x * self._batch_size:(x + 1) * self._batch_size] for x in range(self._num_batches)]
# padding last batch indices if necessary
if len(batch_indices) > 2 and len(batch_indices[-2]) != len(batch_indices[-1]):
pad_nums = len(batch_indices[-2]) - len(batch_indices[-1])
pad_indices = np.random.randint(0, self._num_samples, pad_nums)
batch_indices[-1] = np.hstack((batch_indices[-1], pad_indices))
return iter(batch_indices)
class DistributedSamplerOfTrain:
"""
A distributed sampler for dataset.
"""
def __init__(self, pos_count, num_train_negatives, batch_size, rank_id, rank_size):
"""
Distributed sampler of training dataset.
"""
self._num_samples = (1 + num_train_negatives) * pos_count
self._rank_id = rank_id
self._rank_size = rank_size
self._batch_size = batch_size
self._batchs_per_rank = int(math.ceil(self._num_samples / self._batch_size / rank_size))
self._samples_per_rank = int(math.ceil(self._batchs_per_rank * self._batch_size))
self._total_num_samples = self._samples_per_rank * self._rank_size
def __iter__(self):
"""
Returns the data after each sampling.
"""
indices = stat_utils.permutation((self._num_samples, stat_utils.random_int32()))
indices = indices.tolist()
indices.extend(indices[:self._total_num_samples - len(indices)])
indices = indices[self._rank_id:self._total_num_samples:self._rank_size]
batch_indices = [indices[x * self._batch_size:(x + 1) * self._batch_size] for x in range(self._batchs_per_rank)]
return iter(np.array(batch_indices))
def __len__(self):
"""
Returns the length after each sampling.
"""
return self._batchs_per_rank
class SequenceSampler(Sampler):
"""
A sequence sampler for dataset.
"""
def __init__(self, eval_batch_size, num_users):
self._eval_users_per_batch = int(
eval_batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))
self._eval_elements_in_epoch = num_users * (1 + rconst.NUM_EVAL_NEGATIVES)
self._eval_batches_per_epoch = self.count_batches(
self._eval_elements_in_epoch, eval_batch_size)
super().__init__(self._eval_batches_per_epoch)
def __iter__(self):
indices = [(x * self._eval_users_per_batch, (x + 1) * self._eval_users_per_batch)
for x in range(self._eval_batches_per_epoch)]
# padding last batch indices if necessary
if len(indices) > 2 and len(indices[-2]) != len(indices[-1]):
pad_nums = len(indices[-2]) - len(indices[-1])
pad_indices = np.random.randint(0, self._eval_elements_in_epoch, pad_nums)
indices[-1] = np.hstack((indices[-1], pad_indices))
return iter(indices)
@staticmethod
def count_batches(example_count, batch_size, batches_per_step=1):
"""Determine the number of batches, rounding up to fill all devices."""
x = (example_count + batch_size - 1) // batch_size
return (x + batches_per_step - 1) // batches_per_step * batches_per_step
class DistributedSamplerOfEval:
"""
A distributed sampler for eval dataset.
"""
def __init__(self, eval_batch_size, num_users, rank_id, rank_size):
self._eval_users_per_batch = int(
eval_batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))
self._eval_elements_in_epoch = num_users * (1 + rconst.NUM_EVAL_NEGATIVES)
self._eval_batches_per_epoch = self.count_batches(
self._eval_elements_in_epoch, eval_batch_size)
self._rank_id = rank_id
self._rank_size = rank_size
self._eval_batch_size = eval_batch_size
self._batchs_per_rank = int(math.ceil(self._eval_batches_per_epoch / rank_size))
def __iter__(self):
indices = [(x * self._eval_users_per_batch, (x + self._rank_id + 1) * self._eval_users_per_batch)
for x in range(self._batchs_per_rank)]
return iter(np.array(indices))
@staticmethod
def count_batches(example_count, batch_size, batches_per_step=1):
"""Determine the number of batches, rounding up to fill all devices."""
x = (example_count + batch_size - 1) // batch_size
return (x + batches_per_step - 1) // batches_per_step * batches_per_step
def __len__(self):
return self._batchs_per_rank
def parse_eval_batch_size(eval_batch_size):
"""
Parse eval batch size.
"""
if eval_batch_size % (1 + rconst.NUM_EVAL_NEGATIVES):
raise ValueError("Eval batch size {} is not divisible by {}".format(
eval_batch_size, 1 + rconst.NUM_EVAL_NEGATIVES))
return eval_batch_size
def create_dataset(test_train=True, data_dir='./dataset/', dataset='ml-1m', train_epochs=14, batch_size=256,
eval_batch_size=160000, num_neg=4, rank_id=None, rank_size=None):
"""
Create NCF dataset.
"""
data, num_users, num_items = load_data(data_dir, dataset)
train_pos_users = data[rconst.TRAIN_USER_KEY]
train_pos_items = data[rconst.TRAIN_ITEM_KEY]
eval_pos_users = data[rconst.EVAL_USER_KEY]
eval_pos_items = data[rconst.EVAL_ITEM_KEY]
total_negatives, index_bounds, sorted_train_pos_items = \
construct_lookup_variables(train_pos_users, train_pos_items, num_users)
if test_train:
print(train_pos_users, train_pos_items, num_users, num_items, batch_size, total_negatives, index_bounds,
sorted_train_pos_items)
dataset = NCFDataset(train_pos_users, train_pos_items, num_users, num_items, batch_size, total_negatives,
index_bounds, sorted_train_pos_items, num_neg)
sampler = RandomSampler(train_pos_users.shape[0], num_neg, batch_size)
if rank_id is not None and rank_size is not None:
sampler = DistributedSamplerOfTrain(train_pos_users.shape[0], num_neg, batch_size, rank_id, rank_size)
ds = GeneratorDataset(dataset,
column_names=[movielens.USER_COLUMN,
movielens.ITEM_COLUMN,
"labels",
rconst.VALID_POINT_MASK],
sampler=sampler)
else:
eval_batch_size = parse_eval_batch_size(eval_batch_size=eval_batch_size)
dataset = NCFDataset(eval_pos_users, eval_pos_items, num_users, num_items,
eval_batch_size, total_negatives, index_bounds,
sorted_train_pos_items, num_neg, is_training=False)
sampler = SequenceSampler(eval_batch_size, num_users)
ds = GeneratorDataset(dataset,
column_names=[movielens.USER_COLUMN,
movielens.ITEM_COLUMN,
rconst.DUPLICATE_MASK],
sampler=sampler)
repeat_count = train_epochs if test_train else train_epochs + 1
ds = ds.repeat(repeat_count)
return ds, num_users, num_items
|
"""
---
title: CIFAR10 Experiment to try Group Normalization
summary: >
This trains is a simple convolutional neural network that uses group normalization
to classify CIFAR10 images.
---
# CIFAR10 Experiment for Group Normalization
"""
import torch.nn as nn
from labml import experiment
from labml.configs import option
from labml_helpers.module import Module
from labml_nn.experiments.cifar10 import CIFAR10Configs
from labml_nn.normalization.group_norm import GroupNorm
class Model(Module):
"""
### VGG model for CIFAR-10 classification
"""
def __init__(self, groups: int = 32):
super().__init__()
layers = []
# RGB channels
in_channels = 3
# Number of channels in each layer in each block
for block in [[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]]:
# Convolution, Normalization and Activation layers
for channels in block:
layers += [nn.Conv2d(in_channels, channels, kernel_size=3, padding=1),
GroupNorm(groups, channels),
nn.ReLU(inplace=True)]
in_channels = channels
# Max pooling at end of each block
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
# Create a sequential model with the layers
self.layers = nn.Sequential(*layers)
# Final logits layer
self.fc = nn.Linear(512, 10)
def forward(self, x):
# The VGG layers
x = self.layers(x)
# Reshape for classification layer
x = x.view(x.shape[0], -1)
# Final linear layer
return self.fc(x)
class Configs(CIFAR10Configs):
# Number of groups
groups: int = 16
@option(Configs.model)
def model(c: Configs):
"""
### Create model
"""
return Model(c.groups).to(c.device)
def main():
# Create experiment
experiment.create(name='cifar10', comment='group norm')
# Create configurations
conf = Configs()
# Load configurations
experiment.configs(conf, {
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
})
# Start the experiment and run the training loop
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1QOOhihZdUwdAJcUgkeokbx7YaDSkSGWtlXHvKXhHW3E'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
|
from nca47.db import api as db_api
from nca47.db.sqlalchemy.models import Proximity as ProximityModel
from nca47.objects import base
from nca47.objects import fields as object_fields
class ProximityInfo(base.Nca47Object):
VERSION = '1.0'
fields = {
'tenant_id': object_fields.StringField(),
'sp_policy_id': object_fields.StringField(),
'src_type': object_fields.StringField(),
'src_logic': object_fields.StringField(),
'src_data1': object_fields.StringField(),
'src_data2': object_fields.StringField(),
'src_data3': object_fields.StringField(),
'src_data4': object_fields.StringField(),
'dst_type': object_fields.StringField(),
'dst_logic': object_fields.StringField(),
'dst_data1': object_fields.StringField(),
'dst_data2': object_fields.StringField(),
}
def __init__(self, context=None, **kwarg):
self.db_api = db_api.get_instance()
super(ProximityInfo, self).__init__(context=None, **kwarg)
@staticmethod
def _from_db_object(dns_proximity, db_dns_proximity):
"""Converts a database entity to a formal :class:`Proximity` object.
:param dns_proximity: An object of :class:`Proximity`.
:param db_dns_proximity: A DB model of a Proximity.
:return: a :class:`Proximity` object.
"""
for field in dns_proximity.fields:
dns_proximity[field] = db_dns_proximity[field]
dns_proximity.obj_reset_changes()
return dns_proximity
def create(self, context, values):
region = self.db_api.create(ProximityModel, values)
return region
def update(self, context, id, values):
region = self.db_api.update_object(ProximityModel, id, values)
return region
def get_object(self, context, **values):
region = self.db_api.get_object(ProximityModel, **values)
return region
def delete(self, context, id):
region = self.db_api.delete_object(ProximityModel, id)
return region
def get_objects(self, context, **values):
region = self.db_api.get_objects(ProximityModel, **values)
return region
def get_all_object(self, context, values):
region = self.db_api.get_all_object(ProximityModel, values)
return region
|
#!/usr/bin/env python
# kktmat.py -- KKT matrix from Laplacian matrix
#
# Copyright (C) <2016> <Kevin Deweese>
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import scipy
def kktmat(L):
mat=scipy.sparse.coo_matrix(scipy.sparse.tril(L,-1))
row=mat.row
m=len(row)
n=L.shape[0]
col=mat.col
val=mat.data
#R=scipy.sparse.diags(-1/val,0)
R=scipy.array(-1/val)
i=scipy.concatenate([scipy.arange(0,m),scipy.arange(0,m)])
j=scipy.concatenate([row,col])
data=scipy.concatenate([scipy.ones(m),-scipy.ones(m)])
B=scipy.sparse.coo_matrix((data,(i,j)))
return {'R':R,'B':B}
|
from SimpleCV import *
import time
"""
This is an example of HOW-TO use FaceRecognizer to recognize gender
of the person.
"""
def identifyGender():
f = FaceRecognizer()
cam = Camera()
img = cam.getImage()
cascade = LAUNCH_PATH + "/" + "Features/HaarCascades/face.xml"
feat = img.findHaarFeatures(cascade)
if feat:
crop_image = feat.sortArea()[-1].crop()
feat.sortArea()[-1].draw()
f.load(LAUNCH_PATH + "/" + "Features/FaceRecognizerData/GenderData.xml")
w, h = f.imageSize
crop_image = crop_image.resize(w, h)
label, confidence = f.predict(crop_image)
print label
if label == 0:
img.drawText("Female", fontsize=48)
else:
img.drawText("Male", fontsize=48)
img.show()
time.sleep(4)
identifyGender()
|
# pylint: disable=missing-docstring
import getpass
import os
from celery.schedules import crontab
from readthedocs.core.settings import Settings
from readthedocs.projects.constants import CELERY_LOW, CELERY_MEDIUM, CELERY_HIGH
try:
import readthedocsext # noqa
ext = True
except ImportError:
ext = False
_ = gettext = lambda s: s
class CommunityBaseSettings(Settings):
"""Community base settings, don't use this directly."""
# Django settings
SITE_ID = 1
ROOT_URLCONF = 'readthedocs.urls'
SUBDOMAIN_URLCONF = 'readthedocs.core.urls.subdomain'
SINGLE_VERSION_URLCONF = 'readthedocs.core.urls.single_version'
LOGIN_REDIRECT_URL = '/dashboard/'
FORCE_WWW = False
SECRET_KEY = 'replace-this-please' # noqa
ATOMIC_REQUESTS = True
# Debug settings
DEBUG = True
# Domains and URLs
PRODUCTION_DOMAIN = 'readthedocs.org'
PUBLIC_DOMAIN = None
PUBLIC_DOMAIN_USES_HTTPS = False
USE_SUBDOMAIN = False
PUBLIC_API_URL = 'https://{}'.format(PRODUCTION_DOMAIN)
RTD_EXTERNAL_VERSION_DOMAIN = 'external-builds.readthedocs.io'
# Doc Builder Backends
MKDOCS_BACKEND = 'readthedocs.doc_builder.backends.mkdocs'
SPHINX_BACKEND = 'readthedocs.doc_builder.backends.sphinx'
# slumber settings
SLUMBER_API_HOST = 'https://readthedocs.org'
SLUMBER_USERNAME = None
SLUMBER_PASSWORD = None
# Email
DEFAULT_FROM_EMAIL = 'no-reply@readthedocs.org'
SERVER_EMAIL = DEFAULT_FROM_EMAIL
SUPPORT_EMAIL = None
# Sessions
SESSION_COOKIE_DOMAIN = 'readthedocs.org'
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_AGE = 30 * 24 * 60 * 60 # 30 days
SESSION_SAVE_EVERY_REQUEST = True
# This cookie is used in cross-origin API requests from *.readthedocs.io to readthedocs.org
SESSION_COOKIE_SAMESITE = None
# CSRF
CSRF_COOKIE_HTTPONLY = True
CSRF_COOKIE_AGE = 30 * 24 * 60 * 60
# Security & X-Frame-Options Middleware
# https://docs.djangoproject.com/en/1.11/ref/middleware/#django.middleware.security.SecurityMiddleware
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
X_FRAME_OPTIONS = 'DENY'
# Content Security Policy
# https://django-csp.readthedocs.io/
CSP_BLOCK_ALL_MIXED_CONTENT = True
CSP_DEFAULT_SRC = None # This could be improved
CSP_FRAME_ANCESTORS = ("'none'",)
CSP_OBJECT_SRC = ("'none'",)
CSP_REPORT_URI = None
CSP_REPORT_ONLY = True # Set to false to enable CSP in blocking mode
CSP_EXCLUDE_URL_PREFIXES = (
"/admin/",
)
# Read the Docs
READ_THE_DOCS_EXTENSIONS = ext
RTD_LATEST = 'latest'
RTD_LATEST_VERBOSE_NAME = 'latest'
RTD_STABLE = 'stable'
RTD_STABLE_VERBOSE_NAME = 'stable'
RTD_CLEAN_AFTER_BUILD = False
RTD_MAX_CONCURRENT_BUILDS = 4
RTD_BUILD_STATUS_API_NAME = 'docs/readthedocs'
# Database and API hitting settings
DONT_HIT_API = False
DONT_HIT_DB = True
SYNC_USER = getpass.getuser()
USER_MATURITY_DAYS = 7
# override classes
CLASS_OVERRIDES = {}
DOC_PATH_PREFIX = '_/'
# Application classes
@property
def INSTALLED_APPS(self): # noqa
apps = [
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.messages',
'django.contrib.humanize',
# third party apps
'dj_pagination',
'taggit',
'django_gravatar',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'textclassifier',
'annoying',
'django_extensions',
'crispy_forms',
'messages_extends',
'django_elasticsearch_dsl',
'django_filters',
'polymorphic',
# our apps
'readthedocs.projects',
'readthedocs.builds',
'readthedocs.core',
'readthedocs.doc_builder',
'readthedocs.oauth',
'readthedocs.redirects',
'readthedocs.rtd_tests',
'readthedocs.api.v2',
'readthedocs.api.v3',
'readthedocs.gold',
'readthedocs.payments',
'readthedocs.notifications',
'readthedocs.integrations',
'readthedocs.analytics',
'readthedocs.sphinx_domains',
'readthedocs.search',
# allauth
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.github',
'allauth.socialaccount.providers.gitlab',
'allauth.socialaccount.providers.bitbucket',
'allauth.socialaccount.providers.bitbucket_oauth2',
]
if ext:
apps.append('django_countries')
apps.append('readthedocsext.donate')
apps.append('readthedocsext.embed')
apps.append('readthedocsext.spamfighting')
return apps
@property
def USE_PROMOS(self): # noqa
return 'readthedocsext.donate' in self.INSTALLED_APPS
MIDDLEWARE = (
'readthedocs.core.middleware.ReadTheDocsSessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'dj_pagination.middleware.PaginationMiddleware',
'readthedocs.core.middleware.SubdomainMiddleware',
'readthedocs.core.middleware.SingleVersionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'csp.middleware.CSPMiddleware',
)
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 9,
}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
MESSAGE_STORAGE = 'readthedocs.notifications.storages.FallbackUniqueStorage'
NOTIFICATION_BACKENDS = [
'readthedocs.notifications.backends.EmailBackend',
'readthedocs.notifications.backends.SiteBackend',
]
# Paths
SITE_ROOT = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
TEMPLATE_ROOT = os.path.join(SITE_ROOT, 'readthedocs', 'templates')
DOCROOT = os.path.join(SITE_ROOT, 'user_builds')
UPLOAD_ROOT = os.path.join(SITE_ROOT, 'user_uploads')
CNAME_ROOT = os.path.join(SITE_ROOT, 'cnames')
LOGS_ROOT = os.path.join(SITE_ROOT, 'logs')
PRODUCTION_ROOT = os.path.join(SITE_ROOT, 'prod_artifacts')
PRODUCTION_MEDIA_ARTIFACTS = os.path.join(PRODUCTION_ROOT, 'media')
# Assets and media
STATIC_ROOT = os.path.join(SITE_ROOT, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(SITE_ROOT, 'media/')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/media/admin/'
STATICFILES_DIRS = [
os.path.join(SITE_ROOT, 'readthedocs', 'static'),
os.path.join(SITE_ROOT, 'media'),
]
STATICFILES_FINDERS = [
'readthedocs.core.static.SelectiveFileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
PYTHON_MEDIA = False
# Django Storage subclass used to write build artifacts to cloud or local storage
# https://docs.readthedocs.io/page/development/settings.html#rtd-build-media-storage
RTD_BUILD_MEDIA_STORAGE = 'readthedocs.builds.storage.BuildMediaFileSystemStorage'
RTD_BUILD_ENVIRONMENT_STORAGE = 'readthedocs.builds.storage.BuildMediaFileSystemStorage'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_ROOT],
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
# Read the Docs processor
'readthedocs.core.context_processors.readthedocs_processor',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
},
},
]
# Cache
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'PREFIX': 'docs',
}
}
CACHE_MIDDLEWARE_SECONDS = 60
# I18n
TIME_ZONE = 'UTC'
USE_TZ = True
LANGUAGE_CODE = 'en-us'
LANGUAGES = (
('ca', gettext('Catalan')),
('en', gettext('English')),
('es', gettext('Spanish')),
('pt-br', gettext('Brazilian Portuguese')),
('nb', gettext('Norwegian Bokmål')),
('fr', gettext('French')),
('ru', gettext('Russian')),
('de', gettext('German')),
('gl', gettext('Galician')),
('vi', gettext('Vietnamese')),
('zh-cn', gettext('Simplified Chinese')),
('zh-tw', gettext('Traditional Chinese')),
('ja', gettext('Japanese')),
('uk', gettext('Ukrainian')),
('it', gettext('Italian')),
('ko', gettext('Korean')),
)
LOCALE_PATHS = [
os.path.join(SITE_ROOT, 'readthedocs', 'locale'),
]
USE_I18N = True
USE_L10N = True
# Celery
CELERY_APP_NAME = 'readthedocs'
CELERY_ALWAYS_EAGER = True
CELERYD_TASK_TIME_LIMIT = 60 * 60 # 60 minutes
CELERY_SEND_TASK_ERROR_EMAILS = False
CELERYD_HIJACK_ROOT_LOGGER = False
# This stops us from pre-fetching a task that then sits around on the builder
CELERY_ACKS_LATE = True
# Don't queue a bunch of tasks in the workers
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_CREATE_MISSING_QUEUES = True
BROKER_TRANSPORT_OPTIONS = {
'queue_order_strategy': 'priority',
'priority_steps': [CELERY_LOW, CELERY_MEDIUM, CELERY_HIGH],
}
CELERY_DEFAULT_QUEUE = 'celery'
CELERYBEAT_SCHEDULE = {
# Ran every hour on minute 30
'hourly-remove-orphan-symlinks': {
'task': 'readthedocs.projects.tasks.broadcast_remove_orphan_symlinks',
'schedule': crontab(minute=30),
'options': {'queue': 'web'},
},
'quarter-finish-inactive-builds': {
'task': 'readthedocs.projects.tasks.finish_inactive_builds',
'schedule': crontab(minute='*/15'),
'options': {'queue': 'web'},
},
'every-three-hour-clear-persistent-messages': {
'task': 'readthedocs.core.tasks.clear_persistent_messages',
'schedule': crontab(minute=0, hour='*/3'),
'options': {'queue': 'web'},
},
'every-day-delete-old-search-queries': {
'task': 'readthedocs.search.tasks.delete_old_search_queries_from_db',
'schedule': crontab(minute=0, hour=0),
'options': {'queue': 'web'},
}
}
MULTIPLE_APP_SERVERS = [CELERY_DEFAULT_QUEUE]
MULTIPLE_BUILD_SERVERS = [CELERY_DEFAULT_QUEUE]
# Sentry
SENTRY_CELERY_IGNORE_EXPECTED = True
# Docker
DOCKER_ENABLE = False
DOCKER_SOCKET = 'unix:///var/run/docker.sock'
# This settings has been deprecated in favor of DOCKER_IMAGE_SETTINGS
DOCKER_BUILD_IMAGES = None
# User used to create the container.
# In production we use the same user than the one defined by the
# ``USER docs`` instruction inside the Dockerfile.
# In development, we can use the "UID:GID" of the current user running the
# instance to avoid file permissions issues.
# https://docs.docker.com/engine/reference/run/#user
RTD_DOCKER_USER = 'docs:docs'
RTD_DOCKER_COMPOSE = False
DOCKER_DEFAULT_IMAGE = 'readthedocs/build'
DOCKER_VERSION = 'auto'
DOCKER_DEFAULT_VERSION = 'latest'
DOCKER_IMAGE = '{}:{}'.format(DOCKER_DEFAULT_IMAGE, DOCKER_DEFAULT_VERSION)
DOCKER_IMAGE_SETTINGS = {
# A large number of users still have this pinned in their config file.
# We must have documented it at some point.
'readthedocs/build:2.0': {
'python': {
'supported_versions': [2, 2.7, 3, 3.5],
'default_version': {
2: 2.7,
3: 3.5,
},
},
},
'readthedocs/build:4.0': {
'python': {
'supported_versions': [2, 2.7, 3, 3.5, 3.6, 3.7],
'default_version': {
2: 2.7,
3: 3.7,
},
},
},
'readthedocs/build:5.0': {
'python': {
'supported_versions': [2, 2.7, 3, 3.5, 3.6, 3.7, 'pypy3.5'],
'default_version': {
2: 2.7,
3: 3.7,
},
},
},
'readthedocs/build:6.0': {
'python': {
'supported_versions': [2, 2.7, 3, 3.5, 3.6, 3.7, 3.8, 'pypy3.5'],
'default_version': {
2: 2.7,
3: 3.7,
},
},
},
'readthedocs/build:7.0': {
'python': {
'supported_versions': [2, 2.7, 3, 3.5, 3.6, 3.7, 3.8, 'pypy3.5'],
'default_version': {
2: 2.7,
3: 3.7,
},
},
},
}
# Alias tagged via ``docker tag`` on the build servers
DOCKER_IMAGE_SETTINGS.update({
'readthedocs/build:stable': DOCKER_IMAGE_SETTINGS.get('readthedocs/build:5.0'),
'readthedocs/build:latest': DOCKER_IMAGE_SETTINGS.get('readthedocs/build:6.0'),
'readthedocs/build:testing': DOCKER_IMAGE_SETTINGS.get('readthedocs/build:7.0'),
})
# All auth
ACCOUNT_ADAPTER = 'readthedocs.core.adapters.AccountAdapter'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_ACTIVATION_DAYS = 7
SOCIALACCOUNT_AUTO_SIGNUP = False
SOCIALACCOUNT_PROVIDERS = {
'github': {
'SCOPE': [
'user:email',
'read:org',
'admin:repo_hook',
'repo:status',
],
},
'gitlab': {
'SCOPE': [
'api',
'read_user',
],
},
# Bitbucket scope/permissions are determined by the Oauth consumer setup on bitbucket.org
}
# CORS
CORS_ORIGIN_REGEX_WHITELIST = (
r'^http://(.+)\.readthedocs\.io$',
r'^https://(.+)\.readthedocs\.io$',
)
# So people can post to their accounts
CORS_ALLOW_CREDENTIALS = True
CORS_ALLOW_HEADERS = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
'x-csrftoken'
)
# RTD Settings
REPO_LOCK_SECONDS = 30
ALLOW_PRIVATE_REPOS = False
DEFAULT_PRIVACY_LEVEL = 'public'
DEFAULT_VERSION_PRIVACY_LEVEL = 'public'
GROK_API_HOST = 'https://api.grokthedocs.com'
SERVE_DOCS = ['public']
ALLOW_ADMIN = True
# Elasticsearch settings.
ES_HOSTS = ['search:9200']
ELASTICSEARCH_DSL = {
'default': {
'hosts': 'search:9200'
},
}
# Chunk size for elasticsearch reindex celery tasks
ES_TASK_CHUNK_SIZE = 100
# Info from Honza about this:
# The key to determine shard number is actually usually not the node count,
# but the size of your data.
# There are advantages to just having a single shard in an index since
# you don't have to do the distribute/collect steps when executing a search.
# If your data will allow it (not significantly larger than 40GB)
# I would recommend going to a single shard and one replica meaning
# any of the two nodes will be able to serve any search without talking to the other one.
# Scaling to more searches will then just mean adding a third node
# and a second replica resulting in immediate 50% bump in max search throughput.
ES_INDEXES = {
'project': {
'name': 'project_index',
'settings': {'number_of_shards': 1,
'number_of_replicas': 1
}
},
'page': {
'name': 'page_index',
'settings': {
'number_of_shards': 1,
'number_of_replicas': 1,
}
},
}
# ANALYZER = 'analysis': {
# 'analyzer': {
# 'default_icu': {
# 'type': 'custom',
# 'tokenizer': 'icu_tokenizer',
# 'filter': ['word_delimiter', 'icu_folding', 'icu_normalizer'],
# }
# }
# }
# Disable auto refresh for increasing index performance
ELASTICSEARCH_DSL_AUTO_REFRESH = False
ALLOWED_HOSTS = ['*']
ABSOLUTE_URL_OVERRIDES = {
'auth.user': lambda o: '/profiles/{}/'.format(o.username)
}
INTERNAL_IPS = ('127.0.0.1',)
# Taggit
# https://django-taggit.readthedocs.io
TAGGIT_TAGS_FROM_STRING = 'readthedocs.projects.tag_utils.rtd_parse_tags'
# Stripe
STRIPE_SECRET = None
STRIPE_PUBLISHABLE = None
# Do Not Track support
DO_NOT_TRACK_ENABLED = False
# Advertising configuration defaults
ADSERVER_API_BASE = None
ADSERVER_API_KEY = None
ADSERVER_API_TIMEOUT = 0.35 # seconds
# Misc application settings
GLOBAL_ANALYTICS_CODE = None
DASHBOARD_ANALYTICS_CODE = None # For the dashboard, not docs
GRAVATAR_DEFAULT_IMAGE = 'https://assets.readthedocs.org/static/images/silhouette.png' # NOQA
OAUTH_AVATAR_USER_DEFAULT_URL = GRAVATAR_DEFAULT_IMAGE
OAUTH_AVATAR_ORG_DEFAULT_URL = GRAVATAR_DEFAULT_IMAGE
RESTRICTEDSESSIONS_AUTHED_ONLY = True
RESTRUCTUREDTEXT_FILTER_SETTINGS = {
'cloak_email_addresses': True,
'file_insertion_enabled': False,
'raw_enabled': False,
'strip_comments': True,
'doctitle_xform': True,
'sectsubtitle_xform': True,
'initial_header_level': 2,
'report_level': 5,
'syntax_highlight': 'none',
'math_output': 'latex',
'field_name_limit': 50,
}
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', # NOQA
'DEFAULT_THROTTLE_RATES': {
'anon': '5/minute',
'user': '60/minute',
},
'PAGE_SIZE': 10,
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
}
SILENCED_SYSTEM_CHECKS = ['fields.W342']
# Logging
LOG_FORMAT = '%(name)s:%(lineno)s[%(process)d]: %(levelname)s %(message)s'
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'default': {
'format': LOG_FORMAT,
'datefmt': '%d/%b/%Y %H:%M:%S',
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'default'
},
'debug': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGS_ROOT, 'debug.log'),
'formatter': 'default',
},
'null': {
'class': 'logging.NullHandler',
},
},
'loggers': {
'': { # root logger
'handlers': ['debug', 'console'],
# Always send from the root, handlers can filter levels
'level': 'INFO',
},
'readthedocs': {
'handlers': ['debug', 'console'],
'level': 'DEBUG',
# Don't double log at the root logger for these.
'propagate': False,
},
'django.security.DisallowedHost': {
'handlers': ['null'],
'propagate': False,
},
},
}
|
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import django_filters
from django.db.models import Q
from django_filters import FilterSet
from pdc.apps.common.filters import MultiValueFilter, MultiValueRegexFilter, value_is_not_empty
from . import models
from .models import (Person,
Maillist,
GlobalComponentContact,
ReleaseComponentContact)
class PersonFilterSet(django_filters.FilterSet):
username = MultiValueFilter()
email = MultiValueFilter()
class Meta:
model = models.Person
fields = ('username', 'email')
class MaillistFilterSet(django_filters.FilterSet):
mail_name = MultiValueFilter()
email = MultiValueFilter()
class Meta:
model = models.Maillist
fields = ('mail_name', 'email')
class ContactRoleFilterSet(django_filters.FilterSet):
name = MultiValueFilter()
class Meta:
model = models.ContactRole
fields = ('name',)
def _filter_contacts(people_filter, maillist_filter, qs, values):
"""Helper for filtering based on subclassed contacts.
Runs the filter on separately on each subclass (field defined by argument,
the same values are used), then filters the queryset to only keep items
that have matching.
"""
people = Person.objects.filter(**{people_filter + '__in': values})
mailing_lists = Maillist.objects.filter(**{maillist_filter + '__in': values})
return qs.filter(Q(contact__in=people) | Q(contact__in=mailing_lists))
class _BaseComponentContactFilter(FilterSet):
contact = MultiValueFilter(method='filter_by_contact')
email = MultiValueFilter(method='filter_by_email')
role = MultiValueFilter(name='role__name')
component = MultiValueRegexFilter(name='component__name')
@value_is_not_empty
def filter_by_contact(self, qs, name, value):
return _filter_contacts('username', 'mail_name', qs, value)
@value_is_not_empty
def filter_by_email(self, qs, name, value):
return _filter_contacts('email', 'email', qs, value)
class GlobalComponentContactFilter(_BaseComponentContactFilter):
class Meta:
model = GlobalComponentContact
fields = ('role', 'email', 'contact', 'component')
class ReleaseComponentContactFilter(_BaseComponentContactFilter):
dist_git_branch = MultiValueFilter(name='component__dist_git_branch')
release = MultiValueFilter(name='component__release__release_id')
global_component = MultiValueFilter(name='component__global_component__name')
class Meta:
model = ReleaseComponentContact
fields = ('role', 'email', 'contact', 'component', 'dist_git_branch', 'release',
'global_component')
|
import sys
n, r = map(int, sys.stdin.readline().split())
def main():
res = r + 100 * max(10 - n, 0)
return res
if __name__ == '__main__':
ans = main()
print(ans)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'PrivateEndpointConnectionResponse',
'PrivateEndpointResponse',
'PrivateLinkServiceConnectionStateResponse',
'ServiceAccessPolicyEntryResponse',
'ServiceAuthenticationConfigurationInfoResponse',
'ServiceCorsConfigurationInfoResponse',
'ServiceCosmosDbConfigurationInfoResponse',
'ServiceExportConfigurationInfoResponse',
'ServicesPropertiesResponse',
'ServicesResourceResponseIdentity',
]
@pulumi.output_type
class PrivateEndpointConnectionResponse(dict):
"""
The Private Endpoint Connection resource.
"""
def __init__(__self__, *,
id: str,
name: str,
private_link_service_connection_state: 'outputs.PrivateLinkServiceConnectionStateResponse',
provisioning_state: str,
type: str,
private_endpoint: Optional['outputs.PrivateEndpointResponse'] = None):
"""
The Private Endpoint Connection resource.
:param str id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:param str name: The name of the resource
:param 'PrivateLinkServiceConnectionStateResponseArgs' private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider.
:param str provisioning_state: The provisioning state of the private endpoint connection resource.
:param str type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
:param 'PrivateEndpointResponseArgs' private_endpoint: The resource of private end point.
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateEndpointResponse(dict):
"""
The Private Endpoint resource.
"""
def __init__(__self__, *,
id: str):
"""
The Private Endpoint resource.
:param str id: The ARM identifier for Private Endpoint
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> str:
"""
The ARM identifier for Private Endpoint
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateLinkServiceConnectionStateResponse(dict):
"""
A collection of information about the state of the connection between service consumer and provider.
"""
def __init__(__self__, *,
actions_required: Optional[str] = None,
description: Optional[str] = None,
status: Optional[str] = None):
"""
A collection of information about the state of the connection between service consumer and provider.
:param str actions_required: A message indicating if changes on the service provider require any updates on the consumer.
:param str description: The reason for approval/rejection of the connection.
:param str status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
if actions_required is not None:
pulumi.set(__self__, "actions_required", actions_required)
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="actionsRequired")
def actions_required(self) -> Optional[str]:
"""
A message indicating if changes on the service provider require any updates on the consumer.
"""
return pulumi.get(self, "actions_required")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The reason for approval/rejection of the connection.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
return pulumi.get(self, "status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceAccessPolicyEntryResponse(dict):
"""
An access policy entry.
"""
def __init__(__self__, *,
object_id: str):
"""
An access policy entry.
:param str object_id: An Azure AD object ID (User or Apps) that is allowed access to the FHIR service.
"""
pulumi.set(__self__, "object_id", object_id)
@property
@pulumi.getter(name="objectId")
def object_id(self) -> str:
"""
An Azure AD object ID (User or Apps) that is allowed access to the FHIR service.
"""
return pulumi.get(self, "object_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceAuthenticationConfigurationInfoResponse(dict):
"""
Authentication configuration information
"""
def __init__(__self__, *,
audience: Optional[str] = None,
authority: Optional[str] = None,
smart_proxy_enabled: Optional[bool] = None):
"""
Authentication configuration information
:param str audience: The audience url for the service
:param str authority: The authority url for the service
:param bool smart_proxy_enabled: If the SMART on FHIR proxy is enabled
"""
if audience is not None:
pulumi.set(__self__, "audience", audience)
if authority is not None:
pulumi.set(__self__, "authority", authority)
if smart_proxy_enabled is not None:
pulumi.set(__self__, "smart_proxy_enabled", smart_proxy_enabled)
@property
@pulumi.getter
def audience(self) -> Optional[str]:
"""
The audience url for the service
"""
return pulumi.get(self, "audience")
@property
@pulumi.getter
def authority(self) -> Optional[str]:
"""
The authority url for the service
"""
return pulumi.get(self, "authority")
@property
@pulumi.getter(name="smartProxyEnabled")
def smart_proxy_enabled(self) -> Optional[bool]:
"""
If the SMART on FHIR proxy is enabled
"""
return pulumi.get(self, "smart_proxy_enabled")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceCorsConfigurationInfoResponse(dict):
"""
The settings for the CORS configuration of the service instance.
"""
def __init__(__self__, *,
allow_credentials: Optional[bool] = None,
headers: Optional[Sequence[str]] = None,
max_age: Optional[int] = None,
methods: Optional[Sequence[str]] = None,
origins: Optional[Sequence[str]] = None):
"""
The settings for the CORS configuration of the service instance.
:param bool allow_credentials: If credentials are allowed via CORS.
:param Sequence[str] headers: The headers to be allowed via CORS.
:param int max_age: The max age to be allowed via CORS.
:param Sequence[str] methods: The methods to be allowed via CORS.
:param Sequence[str] origins: The origins to be allowed via CORS.
"""
if allow_credentials is not None:
pulumi.set(__self__, "allow_credentials", allow_credentials)
if headers is not None:
pulumi.set(__self__, "headers", headers)
if max_age is not None:
pulumi.set(__self__, "max_age", max_age)
if methods is not None:
pulumi.set(__self__, "methods", methods)
if origins is not None:
pulumi.set(__self__, "origins", origins)
@property
@pulumi.getter(name="allowCredentials")
def allow_credentials(self) -> Optional[bool]:
"""
If credentials are allowed via CORS.
"""
return pulumi.get(self, "allow_credentials")
@property
@pulumi.getter
def headers(self) -> Optional[Sequence[str]]:
"""
The headers to be allowed via CORS.
"""
return pulumi.get(self, "headers")
@property
@pulumi.getter(name="maxAge")
def max_age(self) -> Optional[int]:
"""
The max age to be allowed via CORS.
"""
return pulumi.get(self, "max_age")
@property
@pulumi.getter
def methods(self) -> Optional[Sequence[str]]:
"""
The methods to be allowed via CORS.
"""
return pulumi.get(self, "methods")
@property
@pulumi.getter
def origins(self) -> Optional[Sequence[str]]:
"""
The origins to be allowed via CORS.
"""
return pulumi.get(self, "origins")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceCosmosDbConfigurationInfoResponse(dict):
"""
The settings for the Cosmos DB database backing the service.
"""
def __init__(__self__, *,
key_vault_key_uri: Optional[str] = None,
offer_throughput: Optional[int] = None):
"""
The settings for the Cosmos DB database backing the service.
:param str key_vault_key_uri: The URI of the customer-managed key for the backing database.
:param int offer_throughput: The provisioned throughput for the backing database.
"""
if key_vault_key_uri is not None:
pulumi.set(__self__, "key_vault_key_uri", key_vault_key_uri)
if offer_throughput is not None:
pulumi.set(__self__, "offer_throughput", offer_throughput)
@property
@pulumi.getter(name="keyVaultKeyUri")
def key_vault_key_uri(self) -> Optional[str]:
"""
The URI of the customer-managed key for the backing database.
"""
return pulumi.get(self, "key_vault_key_uri")
@property
@pulumi.getter(name="offerThroughput")
def offer_throughput(self) -> Optional[int]:
"""
The provisioned throughput for the backing database.
"""
return pulumi.get(self, "offer_throughput")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceExportConfigurationInfoResponse(dict):
"""
Export operation configuration information
"""
def __init__(__self__, *,
storage_account_name: Optional[str] = None):
"""
Export operation configuration information
:param str storage_account_name: The name of the default export storage account.
"""
if storage_account_name is not None:
pulumi.set(__self__, "storage_account_name", storage_account_name)
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> Optional[str]:
"""
The name of the default export storage account.
"""
return pulumi.get(self, "storage_account_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServicesPropertiesResponse(dict):
"""
The properties of a service instance.
"""
def __init__(__self__, *,
provisioning_state: str,
access_policies: Optional[Sequence['outputs.ServiceAccessPolicyEntryResponse']] = None,
authentication_configuration: Optional['outputs.ServiceAuthenticationConfigurationInfoResponse'] = None,
cors_configuration: Optional['outputs.ServiceCorsConfigurationInfoResponse'] = None,
cosmos_db_configuration: Optional['outputs.ServiceCosmosDbConfigurationInfoResponse'] = None,
export_configuration: Optional['outputs.ServiceExportConfigurationInfoResponse'] = None,
private_endpoint_connections: Optional[Sequence['outputs.PrivateEndpointConnectionResponse']] = None,
public_network_access: Optional[str] = None):
"""
The properties of a service instance.
:param str provisioning_state: The provisioning state.
:param Sequence['ServiceAccessPolicyEntryResponseArgs'] access_policies: The access policies of the service instance.
:param 'ServiceAuthenticationConfigurationInfoResponseArgs' authentication_configuration: The authentication configuration for the service instance.
:param 'ServiceCorsConfigurationInfoResponseArgs' cors_configuration: The settings for the CORS configuration of the service instance.
:param 'ServiceCosmosDbConfigurationInfoResponseArgs' cosmos_db_configuration: The settings for the Cosmos DB database backing the service.
:param 'ServiceExportConfigurationInfoResponseArgs' export_configuration: The settings for the export operation of the service instance.
:param Sequence['PrivateEndpointConnectionResponseArgs'] private_endpoint_connections: The list of private endpoint connections that are set up for this resource.
:param str public_network_access: Control permission for data plane traffic coming from public networks while private endpoint is enabled.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
if access_policies is not None:
pulumi.set(__self__, "access_policies", access_policies)
if authentication_configuration is not None:
pulumi.set(__self__, "authentication_configuration", authentication_configuration)
if cors_configuration is not None:
pulumi.set(__self__, "cors_configuration", cors_configuration)
if cosmos_db_configuration is not None:
pulumi.set(__self__, "cosmos_db_configuration", cosmos_db_configuration)
if export_configuration is not None:
pulumi.set(__self__, "export_configuration", export_configuration)
if private_endpoint_connections is not None:
pulumi.set(__self__, "private_endpoint_connections", private_endpoint_connections)
if public_network_access is not None:
pulumi.set(__self__, "public_network_access", public_network_access)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="accessPolicies")
def access_policies(self) -> Optional[Sequence['outputs.ServiceAccessPolicyEntryResponse']]:
"""
The access policies of the service instance.
"""
return pulumi.get(self, "access_policies")
@property
@pulumi.getter(name="authenticationConfiguration")
def authentication_configuration(self) -> Optional['outputs.ServiceAuthenticationConfigurationInfoResponse']:
"""
The authentication configuration for the service instance.
"""
return pulumi.get(self, "authentication_configuration")
@property
@pulumi.getter(name="corsConfiguration")
def cors_configuration(self) -> Optional['outputs.ServiceCorsConfigurationInfoResponse']:
"""
The settings for the CORS configuration of the service instance.
"""
return pulumi.get(self, "cors_configuration")
@property
@pulumi.getter(name="cosmosDbConfiguration")
def cosmos_db_configuration(self) -> Optional['outputs.ServiceCosmosDbConfigurationInfoResponse']:
"""
The settings for the Cosmos DB database backing the service.
"""
return pulumi.get(self, "cosmos_db_configuration")
@property
@pulumi.getter(name="exportConfiguration")
def export_configuration(self) -> Optional['outputs.ServiceExportConfigurationInfoResponse']:
"""
The settings for the export operation of the service instance.
"""
return pulumi.get(self, "export_configuration")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> Optional[Sequence['outputs.PrivateEndpointConnectionResponse']]:
"""
The list of private endpoint connections that are set up for this resource.
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> Optional[str]:
"""
Control permission for data plane traffic coming from public networks while private endpoint is enabled.
"""
return pulumi.get(self, "public_network_access")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServicesResourceResponseIdentity(dict):
"""
Setting indicating whether the service has a managed identity associated with it.
"""
def __init__(__self__, *,
principal_id: str,
tenant_id: str,
type: Optional[str] = None):
"""
Setting indicating whether the service has a managed identity associated with it.
:param str principal_id: The principal ID of the resource identity.
:param str tenant_id: The tenant ID of the resource.
:param str type: Type of identity being specified, currently SystemAssigned and None are allowed.
"""
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "tenant_id", tenant_id)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal ID of the resource identity.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant ID of the resource.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of identity being specified, currently SystemAssigned and None are allowed.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
|
from django.conf.urls import include, url
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register('courses', views.CourseViewSet)
urlpatterns = [
url(r'^subjects/$', views.SubjectListView.as_view(), name='subject_list'),
url(r'^subjects/(?P<pk>\d+)/$', views.SubjectDetailView.as_view(), name='subject_detail'),
url(r'^', include(router.urls))
]
|
import importlib
import logging
from zygoat.constants import Phases, Projects
from zygoat.components import Component
from zygoat.config import yaml
from . import resources
log = logging.getLogger()
file_name = 'docker-compose.yml'
class DockerCompose(Component):
def _dump_config(self, data):
with open(file_name, 'w') as root_config:
yaml.dump(data, root_config)
def _load_config(self):
with open(file_name) as root_config:
return yaml.load(root_config.read())
def create(self):
log.info(f'Reading {file_name} from the repo')
config = self._load_config()
config['services'].update(yaml.load(importlib.resources.read_text(resources, file_name)))
log.info('Dumping updated docker-compose config')
self._dump_config(config)
def update(self):
self.call_phase(Phases.CREATE, force_create=True)
def delete(self):
config = self._load_config()
log.info('Removing backend and DB services from config')
del config['services'][Projects.BACKEND]
del config['services']['db']
log.info('Dumping updated docker-compose config')
self._dump_config(config)
@property
def installed(self):
services = self._load_config()['services']
return Projects.BACKEND in services and 'db' in services
docker_compose = DockerCompose()
|
#!/usr/bin/python3
import argparse
from os.path import isfile
from pathlib import Path
from re import compile, findall, split as re_split, sub, search, match
from utils import error
def parse_buffer(encode_detail_buffer, shellcode, numberbefore=0, numberafter=0):
"""
parse le buffer et renvoie un tuple comme suit :
("Type Encode", start, end, param option)
:param encode_detail_buffer:
:param shellcode:
:param numberbefore:
:param numberafter:
:return:
"""
print(encode_detail_buffer)
print(shellcode)
print(numberafter)
print(numberbefore)
to_ret = None
try:
if encode_detail_buffer == ")":
to_ret = None
pass
elif len(encode_detail_buffer) == 1:
## Param Only char (X|x|L|l|R|r|\+|-)
if numberbefore != 0:
begin = numberbefore
else:
begin = 0
if numberafter != 0:
end = numberafter
else:
end = len(shellcode) - 1
to_ret = (encode_detail_buffer, begin, end, 1)
#print("({},{},{},{})".format(encode_detail_buffer, begin, end, 1))
elif ":" in encode_detail_buffer:
## Gestion des ranges (ex 9:13X)
tmp = encode_detail_buffer[:-1].split(":")
if not encode_detail_buffer[-1].isdigit():
## Gestion des ranges ne terminant pas par un chiffre (ex: 9:13X)
to_ret = (encode_detail_buffer[-1], tmp[0], tmp[1], 1)
#print("({},{},{},{})".format(encode_detail_buffer[-1], tmp[0], tmp[1], 1))
elif encode_detail_buffer[-1].isdigit():
## Gestion des ranges terminant par un chiffre (ex: 9:13X4)
letter = findall("(" + regex_shellcode_encodage_detail + ")" + regex_entier,
encode_detail_buffer)
to_ret = (letter[0][0], tmp[0], tmp[1].split(letter[0][0])[0],
letter[0][1])
#print("({},{},{},{})".format(letter[0][0], tmp[0], tmp[1].split(letter[0][0])[0], letter[0][1]))
elif encode_detail_buffer[0].isdigit() and not encode_detail_buffer[-1].isdigit():
## Commence par un chiffre et ne finis pas par un chiffre (ex: 12l)
to_ret = (encode_detail_buffer[-1], encode_detail_buffer[:-1],
encode_detail_buffer[:-1], 1)
#print("({},{},{},{})".format(encode_detail_buffer[-1], encode_detail_buffer[:-1], encode_detail_buffer[:-1], 1))
elif not encode_detail_buffer[0].isdigit() and encode_detail_buffer[-1].isdigit():
## ne commence pas par un chiffre et finis par un chiffre (ex: r32)
if numberbefore != 0:
begin = numberbefore
else:
begin = 0
if numberafter != 0:
end = numberafter
else:
end = len(shellcode) - 1
to_ret = (encode_detail_buffer[0], begin, end, encode_detail_buffer[1:])
#print("({},{},{},{})".format(encode_detail_buffer[0], begin, end, encode_detail_buffer[1:]))
elif encode_detail_buffer[0].isdigit() and encode_detail_buffer[-1].isdigit():
## Commence et finis par un chiffre (ex: 422X5)
before = ""
after = ""
letter = ""
passed_letter = False
for i in encode_detail_buffer:
if i.isdigit() and passed_letter == False:
before += i
elif not i.isdigit():
letter = i
passed_letter = True
elif i.isdigit() and passed_letter == True:
after += i
to_ret = (letter, before, before, after)
# rint(to_ret)
print("({},{},{},{})".format(letter, before, before, after))
except IndexError as er:
print(er)
to_ret = None
pass
finally:
return to_ret
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', '--infile', dest='infile',
help='file to encode, expects filetype of data i.e. msfvenom ... -f raw or echo -ne "........"',
required=True)
parser.add_argument('-e', '--enocde', '--encode-list', dest='encodelist',
help='list of encodage to use in suite X=xor +=+1 -=-1 add payload behind to get it inside, X,-,2,X,+3,+')
parser.add_argument('-o', '--out', '--outfile', dest='outfile', help='write assembly to file (default: STDOUT)')
# parser.add_argument('-d', dest='decode', default=False, action='store_true', help='Decode what is passed via -f or -s')
args = parser.parse_args()
if not isfile(args.infile):
error("No such file: {}".format(args.infile))
try:
shellcode = Path(args.infile).read_bytes()
except:
error("While reading input shellcode file")
if args.encodelist:
regex_entier = r"([0-9]+)"
regex_shellcode_section = r"(" + regex_entier + r"?:)?" + regex_entier + r"?"
regex_shellcode_encodage_simple = r"\*|<|>"
regex_shellcode_encodage_detail = r"X|x|L|l|R|r|\+|-"
regex_shellcode_encodage_list = r"(" + regex_shellcode_encodage_detail + r"|" + regex_shellcode_encodage_simple + r")"
for sub_encode_list in args.encodelist.split('|'):
if search(
r"(" + regex_shellcode_encodage_detail + r")" + regex_entier + r"(\(|:|" + regex_shellcode_encodage_list + r")",
sub_encode_list):
print(search(
r"(" + regex_shellcode_encodage_detail + r")" + regex_entier + r"(\(|:|" + regex_shellcode_encodage_list + r")",
sub_encode_list))
error(
"invalid encode list add ; between encodage that need details and all encode sort " + regex_shellcode_encodage_detail + " and of course before : if need")
if search(r"\([^\)]*:[^\)]*\)", sub_encode_list):
error("invalid encode list, you cant put positionnal detail inside brackets")
if search(r"(\([^\)]*\()|(\)[^\(]*\))", sub_encode_list):
error("invalid choice, you can't get a encode list with imbrick parenthesis")
# sub_encode_list = sub( r"(" + regex_shellcode_encodage_detail + r"|" + regex_shellcode_encodage_simple + r"|\))(" + regex_shellcode_encodage_detail + r"|" + regex_shellcode_encodage_simple + r"|\()", r"\1;\2", sub_encode_list)
sub_encode_list = sub(
regex_shellcode_encodage_list + r"(" + regex_shellcode_encodage_detail + r"|" + regex_shellcode_encodage_simple + r"|:|\(|\))",
r"\1,\2", sub_encode_list)
sub_encode_list = sub(
regex_shellcode_encodage_list + r"(" + regex_shellcode_encodage_detail + r"|" + regex_shellcode_encodage_simple + r"|:|\(|\))",
r"\1,\2", sub_encode_list)
sub_encode_list = sub(r"\);", r")", sub_encode_list)
encode_detail_buffer = ""
tab_tupl = []
sub_encode_list += ",a"
for encode_detail in sub_encode_list:
# print('schema all : {}'.format(repr(encode_detail)))
if encode_detail == "," and "(" not in encode_detail_buffer or encode_detail == ")":
encode_detail = encode_detail.replace(',', '')
print(encode_detail_buffer)
# tab_tupl.append((param1, param2, param3 , param4))
if encode_detail_buffer == ")":
pass
elif "(" in encode_detail_buffer or ")" in encode_detail_buffer:
## Gestion des parenthèses
start_number = findall("(.*)\((.*)", encode_detail_buffer)
start = start_number[0][0]
if ":" in start:
## Range dans parentheses
tmp = start.split(":")
print(len(start.split(":")))
start = tmp[0]
end = tmp[1]
else:
## no range in parenthese
end = 0
param = start_number[0][1]
for spl in param.split(','):
ret = parse_buffer(spl, shellcode, start, end)
if ret != None:
tab_tupl.append(ret)
else:
ret = parse_buffer(encode_detail_buffer, shellcode)
if ret != None:
tab_tupl.append(ret)
encode_detail_buffer = ""
encode_detail_buffer += encode_detail
print(tab_tupl)
# print(encode_detail_buffer)
# regex_encode_type_ba = r"((([0-9]*):)?([0-9]*))?\((((X|x|L|l|R|r|\+|-)([0-9]*))
# regex_encode_type_base = r"((([0-9]*):([0-9]*))?((\*|<|>)|((X|x|L|l|R|r|\+|-)([0-9]*)));)"
# regex_split = compile(r"\(|\)")
# regex_sub_encode_type = compile(regex_encode_type_base)
# for sub_encode_list in args.encodelist.split('|'):
# regex_encode_type=compile( regex_encode_type_base + r"\(" + regex_encode_type_base + r"\)?" + regex_encode_type_base + r"?" )
# for sub_encode_list in args.encodelist.split('|'):
# sub_encode_list_parsed = []
# for encode in findall(regex_encode_type, sub_encode_list) :
# offset = 1 if encode[4]=='' else int(encode[4])
# encode_type=encode[1]+encode[3]
# sub_encode_list_parsed.append((offset, encode_type))
# for encode in sub_encode_list_parsed:
# print(encode)
# for encode in findall(regex_encode_type, args.encodelist) :
# encode_type=encode[1]+encode[3]
# offset = 1 if encode[4]=='' else int(encode[4])
# if encode_type == "X" or encode_type == "x":
# print("Running XOR encoder")
# shellcode = rolling_xor(shellcode)
# shellcode = nasm( template_XOR.format(ecx_len(len(shellcode) - 1))) + shellcode
# # ','.join(hex(x) for x in shellcode)
# elif encode_type == "L" or encode_type == "l" or encode_type == "R" or encode_type == "r":
# print("Running right or left bit shifting encoder")
# shellcode = right_left_rotation_bit(shellcode, encode_type == "R" or encode_type == "r", offset)
# shellcode=nasm( template_rotation.format( ecx_len(len(shellcode)), 'rol' if encode_type == "R" or encode_type == "r" else 'ror', offset)) + shellcode
# elif encode_type == "+" or encode_type == "-":
# print("Running + or - encoder")
# shellcode = add_sub(shellcode, add_or_sub=(encode_type=='+'), to_num=offset)
# shellcode = nasm( template_sub_add.format(ecx_len(len(shellcode)), 'sub' if encode_type=='+' else 'add', offset)) + shellcode
# else:
# error("The input encoding action {} is not valid".format(encode_type))
if 0 in shellcode:
print("\033[31mIt looks like your shellcode will not be valid, there is a 00 byte\033[0m")
# print_shellcode(shellcode)
|
# -*- coding: utf-8 -*-
import iso8601
def to_days(date):
timedelta = iso8601.parse_date(date) - iso8601.parse_date("1970-1-1")
return timedelta.days
class Series(object):
__slots__ = ('series_id', 'title', 'release_date', 'series_info')
def __init__(self, series_id, title, release_date, series_info):
self.series_id = series_id
self.title = title
self.release_date = to_days(release_date)
self.series_info = series_info
class Season(object):
__slots__ = ('series_id', 'season_id', 'title', 'first_aired', 'last_aired')
def __init__(self, series_id, season_id, title, first_aired, last_aired):
self.series_id = series_id
self.season_id = season_id
self.title = title
self.first_aired = to_days(first_aired)
self.last_aired = to_days(last_aired)
class Episode(object):
__slots__ = ('series_id', 'season_id', 'episode_id', 'title', 'air_date')
def __init__(self, series_id, season_id, episode_id, title, air_date):
self.series_id = series_id
self.season_id = season_id
self.episode_id = episode_id
self.title = title
self.air_date = to_days(air_date)
def get_series_data():
return [
Series(1, "IT Crowd", "2006-02-03",
"The IT Crowd is a British sitcom produced by Channel 4, written by Graham Linehan, produced by "
"Ash Atalla and starring Chris O'Dowd, Richard Ayoade, Katherine Parkinson, and Matt Berry."),
Series(2, "Silicon Valley", "2014-04-06",
"Silicon Valley is an American comedy television series created by Mike Judge, John Altschuler and "
"Dave Krinsky. The series focuses on five young men who founded a startup company in Silicon Valley.")
]
def get_seasons_data():
return [
Season(1, 1, "Season 1", "2006-02-03", "2006-03-03"),
Season(1, 2, "Season 2", "2007-08-24", "2007-09-28"),
Season(1, 3, "Season 3", "2008-11-21", "2008-12-26"),
Season(1, 4, "Season 4", "2010-06-25", "2010-07-30"),
Season(2, 1, "Season 1", "2014-04-06", "2014-06-01"),
Season(2, 2, "Season 2", "2015-04-12", "2015-06-14"),
Season(2, 3, "Season 3", "2016-04-24", "2016-06-26"),
Season(2, 4, "Season 4", "2017-04-23", "2017-06-25"),
Season(2, 5, "Season 5", "2018-03-25", "2018-05-13")
]
def get_episodes_data():
return [
Episode(1, 1, 1, "Yesterday's Jam", "2006-02-03"),
Episode(1, 1, 2, "Calamity Jen", "2006-02-03"),
Episode(1, 1, 3, "Fifty-Fifty", "2006-02-10"),
Episode(1, 1, 4, "The Red Door", "2006-02-17"),
Episode(1, 1, 5, "The Haunting of Bill Crouse", "2006-02-24"),
Episode(1, 1, 6, "Aunt Irma Visits", "2006-03-03"),
Episode(1, 2, 1, "The Work Outing", "2006-08-24"),
Episode(1, 2, 2, "Return of the Golden Child", "2007-08-31"),
Episode(1, 2, 3, "Moss and the German", "2007-09-07"),
Episode(1, 2, 4, "The Dinner Party", "2007-09-14"),
Episode(1, 2, 5, "Smoke and Mirrors", "2007-09-21"),
Episode(1, 2, 6, "Men Without Women", "2007-09-28"),
Episode(1, 3, 1, "From Hell", "2008-11-21"),
Episode(1, 3, 2, "Are We Not Men?", "2008-11-28"),
Episode(1, 3, 3, "Tramps Like Us", "2008-12-05"),
Episode(1, 3, 4, "The Speech", "2008-12-12"),
Episode(1, 3, 5, "Friendface", "2008-12-19"),
Episode(1, 3, 6, "Calendar Geeks", "2008-12-26"),
Episode(1, 4, 1, "Jen The Fredo", "2010-06-25"),
Episode(1, 4, 2, "The Final Countdown", "2010-07-02"),
Episode(1, 4, 3, "Something Happened", "2010-07-09"),
Episode(1, 4, 4, "Italian For Beginners", "2010-07-16"),
Episode(1, 4, 5, "Bad Boys", "2010-07-23"),
Episode(1, 4, 6, "Reynholm vs Reynholm", "2010-07-30"),
]
def get_episodes_data_for_bulk_upsert():
return [
Episode(2, 1, 1, "Minimum Viable Product", "2014-04-06"),
Episode(2, 1, 2, "The Cap Table", "2014-04-13"),
Episode(2, 1, 3, "Articles of Incorporation", "2014-04-20"),
Episode(2, 1, 4, "Fiduciary Duties", "2014-04-27"),
Episode(2, 1, 5, "Signaling Risk", "2014-05-04"),
Episode(2, 1, 6, "Third Party Insourcing", "2014-05-11"),
Episode(2, 1, 7, "Proof of Concept", "2014-05-18"),
Episode(2, 1, 8, "Optimal Tip-to-Tip Efficiency", "2014-06-01"),
Episode(2, 2, 1, "Sand Hill Shuffle", "2015-04-12"),
Episode(2, 2, 2, "Runaway Devaluation", "2015-04-19"),
Episode(2, 2, 3, "Bad Money", "2015-04-26"),
Episode(2, 2, 4, "The Lady", "2015-05-03"),
Episode(2, 2, 5, "Server Space", "2015-05-10"),
Episode(2, 2, 6, "Homicide", "2015-05-17"),
Episode(2, 2, 7, "Adult Content", "2015-05-24"),
Episode(2, 2, 8, "White Hat/Black Hat", "2015-05-31"),
Episode(2, 2, 9, "Binding Arbitration", "2015-06-07"),
Episode(2, 2, 10, "Two Days of the Condor", "2015-06-14"),
Episode(2, 3, 1, "Founder Friendly", "2016-04-24"),
Episode(2, 3, 2, "Two in the Box", "2016-05-01"),
Episode(2, 3, 3, "Meinertzhagen's Haversack", "2016-05-08"),
Episode(2, 3, 4, "Maleant Data Systems Solutions", "2016-05-15"),
Episode(2, 3, 5, "The Empty Chair", "2016-05-22"),
Episode(2, 3, 6, "Bachmanity Insanity", "2016-05-29"),
Episode(2, 3, 7, "To Build a Better Beta", "2016-06-05"),
Episode(2, 3, 8, "Bachman's Earnings Over-Ride", "2016-06-12"),
Episode(2, 3, 9, "Daily Active Users", "2016-06-19"),
Episode(2, 3, 10, "The Uptick", "2016-06-26"),
Episode(2, 4, 1, "Success Failure", "2017-04-23"),
Episode(2, 4, 2, "Terms of Service", "2017-04-30"),
Episode(2, 4, 3, "Intellectual Property", "2017-05-07"),
Episode(2, 4, 4, "Teambuilding Exercise", "2017-05-14"),
Episode(2, 4, 5, "The Blood Boy", "2017-05-21"),
Episode(2, 4, 6, "Customer Service", "2017-05-28"),
Episode(2, 4, 7, "The Patent Troll", "2017-06-04"),
Episode(2, 4, 8, "The Keenan Vortex", "2017-06-11"),
Episode(2, 4, 9, "Hooli-Con", "2017-06-18"),
Episode(2, 4, 10, "Server Error", "2017-06-25"),
Episode(2, 5, 1, "Grow Fast or Die Slow", "2018-03-25"),
Episode(2, 5, 2, "Reorientation", "2018-04-01"),
Episode(2, 5, 3, "Chief Operating Officer", "2018-04-08"),
Episode(2, 5, 4, "Tech Evangelist", "2018-04-15"),
Episode(2, 5, 5, "Facial Recognition", "2018-04-22"),
Episode(2, 5, 6, "Artificial Emotional Intelligence", "2018-04-29"),
Episode(2, 5, 7, "Initial Coin Offering", "2018-05-06"),
Episode(2, 5, 8, "Fifty-One Percent", "2018-05-13"),
]
|
"""
I want to know, whether the imitation process leads to equal return rates in both sectors.
Parameters that this could depend on are
1) the rate of exploration (random changes in opinion and rewiring),
2) also, the rate of rewiring could have an effect.
This should only work in the equilibrium condition where the environment stays constant.
"""
import getpass
import itertools as it
import os
import pickle as cp
import sys
import time
import networkx as nx
import numpy as np
import pandas as pd
from random import uniform
from pysave.visualization.data_visualization \
import plot_trajectories, plot_tau_smean,plot_tau_ymean
from pysave.model.model import SavingsCore_thebest as Model
from pymofa.experiment_handling \
import experiment_handling, even_time_series_spacing
def RUN_FUNC(tau, phi, eps, test, filename):
"""
Set up the model for various parameters and determine
which parts of the output are saved where.
Output is saved in pickled dictionaries including the
initial values, parameters and convergence state and time
for each run.
Parameters:
-----------
tau : float > 0
the rate of the social updates
phi : float \in [0, 1]
the rewirking probability
eps: float > 0
the rate of random events in the social update process
test: int \in [0,1]
whether this is a test run, e.g.
can be executed with lower runtime
filename: string
filename for the results of the run
"""
# Make different types of decision makers. Cues are
# Parameters:
input_params = {'phi': phi, 'tau': tau,
'eps': eps, 'test': test,
'd': 0.5}
# building initial conditions
# network:
n = 100
k = 5
if test:
n = 30
k = 3
while True:
net = nx.barabasi_albert_graph(n, k)
#net = nx.complete_graph(n)
if len(list(net)) > 1:
break
adjacency_matrix = nx.adj_matrix(net).toarray()
# opinions and investment
savings_rate = [uniform(0, 1) for i in range(n)]
init_conditions = (adjacency_matrix, savings_rate)
t_1 = 50000
# initializing the model
m = Model(*init_conditions, **input_params)
# storing initial conditions and parameters
res = {
"parameters": pd.Series({"tau": m.tau,
"phi": m.phi,
"n": m.n,
"P": m.P,
"capital depreciation rate": m.d,
"pi": m.pi,
"kappa": m.kappa,
"epsilon": m.eps})}
# start timer
t_start = time.clock()
# run model with abundant resource
t_max = t_1 if not test else 1
m.R_depletion = False
exit_status = m.run(t_max=t_max)
res["runtime"] = time.clock() - t_start
# store data in case of successful run
if exit_status in [0, 1] or test:
# even and safe macro trajectory
res["trajectory"] = \
even_time_series_spacing(m.get_e_trajectory(), 401, 0., t_max)
# save micro data
res["adjacency"] = m.neighbors
res["final state"] = pd.DataFrame(data=np.array([m.savings_rate,
m.capital,
m.income]).transpose(),
columns=['s', 'k', 'i'])
# find connected components and their size
g = nx.from_numpy_matrix(m.neighbors)
cc = sorted(nx.connected_components(g), key=len)
cluster_sizes = []
for l in cc:
cs = 0
for n in l:
cs += 1
cluster_sizes.append(cs)
res["cluster sizes"] = pd.DataFrame(data=cluster_sizes, columns=['cluster sizes'])
# compute welfare and save
def gini(x):
# (Warning: This is a concise implementation, but it is O(n**2)
# in time and memory, where n = len(x). *Don't* pass in huge
# samples!)
# Mean absolute difference
mad = np.abs(np.subtract.outer(x, x)).mean()
# Relative mean absolute difference
rmad = mad / np.mean(x)
# Gini coefficient
g = 0.5 * rmad
return g
#res["welfare"] = np.mean(m.income) * (1. - gini(m.income))
# compute national savings rate and save
res["savings_rate"] = sum(m.income * m.savings_rate) / sum(m.income)
# save data
with open(filename, 'wb') as dumpfile:
cp.dump(res, dumpfile)
try:
np.load(filename)["savings_rate"]
except IOError:
print("writing results failed for " + filename)
return exit_status
def run_experiment(argv):
"""
Take arv input variables and run sub_experiment accordingly.
This happens in five steps:
1) parse input arguments to set switches
for [test, mode, ffh/av, equi/trans],
2) set output folders according to switches,
3) generate parameter combinations,
4) define names and dictionaries of callables to apply to sub_experiment
data for post processing,
5) run computation and/or post processing and/or plotting
depending on execution on cluster or locally or depending on
experimentation mode.
Parameters
----------
argv: list[N]
List of parameters from terminal input
Returns
-------
rt: int
some return value to show whether sub_experiment succeeded
return 1 if sucessfull.
"""
"""
Get switches from input line in order of
[test, mode, ffh on/of, equi/transition]
"""
# switch testing mode
if len(argv) > 1:
test = bool(int(argv[1]))
else:
test = False
# switch sub_experiment mode
if len(argv) > 2:
mode = int(argv[2])
else:
mode = 0
"""
set input/output paths
"""
respath = os.path.dirname(os.path.realpath(__file__)) + "/output_data"
if getpass.getuser() == "yuki":
tmppath = respath
elif getpass.getuser() == "asano":
tmppath = "/p/tmp/asano/Savings_Experiments"
else:
tmppath = "./"
folder = 'X2log_delta50'
# make sure, testing output goes to its own folder:
test_folder = ['', 'test_output/'][int(test)]
# check if cluster or local and set paths accordingly
save_path_raw = \
"{}/{}{}/" \
.format(tmppath, test_folder, folder)
save_path_res = \
"{}/{}{}/" \
.format(respath, test_folder, folder)
"""
create parameter combinations and index
"""
taus = [round(x, 5) for x in list(np.logspace(0, 3, 100))]
phis = [0]
epss = [0]
tau, phi, eps = [1., 10., 100.], [0], [0]
if test:
param_combs = list(it.product(tau, phi, eps, [test]))
else:
param_combs = list(it.product(taus, phis, epss, [test]))
index = {0: "tau", 1: "phi", 2: "eps"}
"""
create names and dicts of callables for post processing
"""
name = 'parameter_scan'
name1 = name + '_trajectory'
eva1 = {"mean_trajectory":
lambda fnames: pd.concat([np.load(f)["trajectory"]
for f in fnames]).groupby(
level=0).mean(),
"sem_trajectory":
lambda fnames: pd.concat([np.load(f)["trajectory"]
for f in fnames]).groupby(
level=0).std()
}
name2 = name + '_convergence'
eva2 = {'welfare_mean':
lambda fnames: np.nanmean([np.load(f)["welfare"]
for f in fnames]),
'savings_rate_mean':
lambda fnames: np.nanmean([np.load(f)["savings_rate"]
for f in fnames]),
'welfare_std':
lambda fnames: np.std([np.load(f)["welfare"]
for f in fnames]),
'savings_rate_std':
lambda fnames: np.std([np.load(f)["savings_rate"]
for f in fnames])
}
name3 = name + '_cluster_sizes'
cf3 = {'cluster sizes':
lambda fnames: pd.concat([np.load(f)["cluster sizes"]
for f in fnames]).sortlevel(level=0).reset_index()
}
name4 = name + '_all_si'
eva4 = {"all_si":
lambda fnames: [np.load(f)["final state"]
for f in fnames]
}
"""
run computation and/or post processing and/or plotting
"""
# cluster mode: computation and post processing
if mode == 0:
sample_size = 100 if not test else 2
handle = experiment_handling(sample_size, param_combs, index,
save_path_raw, save_path_res)
handle.compute(RUN_FUNC)
handle.resave(eva1, name1)
#handle.resave(eva2, name2)
handle.resave(eva4, name4)
#handle.resave(cf3, name3)
return 1
# local mode: plotting only
if mode == 1:
sample_size = 100 if not test else 2
handle = experiment_handling(sample_size, param_combs, index,
save_path_raw, save_path_res)
#handle.resave(eva1, name1)
#handle.resave(eva2, name2)
handle.resave(eva4, name4)
#handle.resave(cf3, name3)
#plot_trajectories(save_path_res, name1, None, None)
#print save_path_res, name1
#plot_tau_smean(save_path_res, name1, None, None)
#plot_tau_ymean(save_path_res, name1, None, None)
return 1
if __name__ == "__main__":
cmdline_arguments = sys.argv
run_experiment(cmdline_arguments)
|
# version code 80e56511a793+
# Please fill out this stencil and submit using the provided submission script.
# Some of the GF2 problems require use of the value GF2.one so the stencil imports it.
from GF2 import one
## 1: (Problem 2.14.1) Vector Addition Practice 1
#Please express each answer as a list of numbers
p1_v = [-1, 3]
p1_u = [0, 4]
p1_v_plus_u = [...]
p1_v_minus_u = [...]
p1_three_v_minus_two_u = [...]
## 2: (Problem 2.14.2) Vector Addition Practice 2
p2_u = [-1, 1, 1]
p2_v = [ 2, -1, 5]
p2_v_plus_u = [...]
p2_v_minus_u = [...]
p2_two_v_minus_u = [...]
p2_v_plus_two_u = [...]
## 3: (Problem 2.14.3) Vector Addition Practice 3
# Write your answer using GF2's one instead of the number 1
p3_vector_sum_1 = [...]
p3_vector_sum_2 = [...]
## 4: (Problem 2.14.4) GF2 Vector Addition A
# Please express your solution as a subset of the letters {'a','b','c','d','e','f'}.
# For example, {'a','b','c'} is the subset consisting of:
# a (1100000), b (0110000), and c (0011000).
# The answer should be an empty set, written set(), if the given vector u cannot
# be written as the sum of any subset of the vectors a, b, c, d, e, and f.
u_0010010 = ...
u_0100010 = ...
## 5: (Problem 2.14.5) GF2 Vector Addition B
# Use the same format as the previous problem
v_0010010 = ...
v_0100010 = ...
## 6: (Problem 2.14.6) Solving Linear Equations over GF(2)
#You should be able to solve this without using a computer.
x_gf2 = [...]
## 7: (Problem 2.14.7) Formulating Equations using Dot-Product
#Please provide each answer as a list of numbers
v1 = [...]
v2 = [...]
v3 = [...]
## 8: (Problem 2.14.9) Practice with Dot-Product
uv_a = ...
uv_b = ...
uv_c = ...
uv_d = ...
|
import argparse
import sys
from collections import Counter
from tqdm import tqdm
from transformers import AutoTokenizer
def read_and_preprocess(file:str):
subword_len_counter = 0
with open(file, "rt") as f_p:
for line in f_p:
line = line.rstrip()
if not line:
yield line
subword_len_counter = 0
continue
token = line.split()[0]
current_subwords_len = len(tokenizer.tokenize(token))
# Token contains strange control characters like \x96 or \x95
# Just filter out the complete line
if current_subwords_len == 0:
continue
if (subword_len_counter + current_subwords_len) > max_len:
yield ""
yield line
subword_len_counter = current_subwords_len
continue
subword_len_counter += current_subwords_len
yield line
def build_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
# parser.add_argument(
# "--data_dir",
# default=None,
# type=str,
# required=True,
# help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.",
# )
args = parser.parse_args()
return args
def get_label(s:str):
x = s.split(' ')
if len(x)==2:
label = x[1]
else:
label = None
return label
if __name__ == '__main__':
args = build_args()
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
max_len = args.max_seq_length
max_len -= tokenizer.num_special_tokens_to_add()
label_counter = Counter()
def count_and_return(l:str):
label = get_label(l)
if label is not None:
label_counter.update({label:1})
return l
for split_name in ['train','dev','test']:
dataset = "%s.txt.tmp"%split_name
with open("%s.txt"%split_name,'w') as f:
f.writelines("%s\n"%count_and_return(l) for l in tqdm(read_and_preprocess(dataset)))
with open('labels.txt','w') as f:
f.writelines("%s\n"%l for l in label_counter.keys())
|
import argparse
import csv
import torch
import transformers
def parse_arguments():
parser = argparse.ArgumentParser(description="MiniConf Portal Command Line")
parser.add_argument("papers", default=False, help="papers file to parse")
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
tokenizer = transformers.AutoTokenizer.from_pretrained("deepset/sentence_bert")
model = transformers.AutoModel.from_pretrained("deepset/sentence_bert")
model.eval()
with open(args.papers, "r",encoding='utf-8') as f:
abstracts = list(csv.DictReader(f))
all_abstracts = torch.zeros(len(abstracts), 768)
with torch.no_grad():
for i, row in enumerate(abstracts):
input_ids = torch.tensor([tokenizer.encode(row["abstract"])][:512])
all_hidden_states, _ = model(input_ids)[-2:]
all_abstracts[i] = all_hidden_states.mean(0).mean(0)
print(i)
print(row['author'])
torch.save(all_abstracts, "embeddings.torch")
|
from distutils.core import setup, Extension
import sys
major_version = '4'
minor_version = '0'
cpplmodule = Extension('cppl_cpp_python_bridge',
define_macros = [('MAJOR_VERSION', major_version),
('MINOR_VERSION', minor_version)],
include_dirs = [],
libraries = ['cppl'],
library_dirs = ['../policy-decision-point'],
sources = ['cpplmodule.cc'],
#extra_compile_args = ['-std=c++11', '-Wall', '-Werror',],
extra_compile_args = ['-std=c++11', '-Wall',],
# extra_objects are included _before_ library_dirs and libraries
extra_objects = [],
# extra_link_args are included _after_ library_dirs and libraries
extra_link_args = [])
setup (name = 'cppl',
version = major_version + '.' + minor_version,
description = 'A C++ - Python bridge for CPPL',
author = 'Jens Hiller',
author_email = 'jens.hiller@comsys.rwth-aachen.de',
url = '',
long_description = '''This package provides a C++-Python bridge for cppl (compact privacy policy language) functionality''',
py_modules = ['cppl'],
ext_modules = [cpplmodule])
|
import os
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_TRACK_MODIFICATIONS = False
UPLOADED_PHOTOS_DEST = 'app/static/photos'
# email configurations
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
# simple mde configurations
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
@staticmethod
def init_app(app):
pass
class TestConfig(Config):
pass
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
pass
class DevConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:moringa@localhost/db'
DEBUG = True
config_options = {
'development': DevConfig,
'production': ProdConfig,
'test': TestConfig
}
|
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.request import Request
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
class SkillEnabledRequest(Request):
"""
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
:param event_creation_time:
:type event_creation_time: (optional) datetime
:param event_publishing_time:
:type event_publishing_time: (optional) datetime
"""
deserialized_types = {
'object_type': 'str',
'request_id': 'str',
'timestamp': 'datetime',
'locale': 'str',
'event_creation_time': 'datetime',
'event_publishing_time': 'datetime'
} # type: Dict
attribute_map = {
'object_type': 'type',
'request_id': 'requestId',
'timestamp': 'timestamp',
'locale': 'locale',
'event_creation_time': 'eventCreationTime',
'event_publishing_time': 'eventPublishingTime'
} # type: Dict
def __init__(self, request_id=None, timestamp=None, locale=None, event_creation_time=None, event_publishing_time=None):
# type: (Optional[str], Optional[datetime], Optional[str], Optional[datetime], Optional[datetime]) -> None
"""
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
:param event_creation_time:
:type event_creation_time: (optional) datetime
:param event_publishing_time:
:type event_publishing_time: (optional) datetime
"""
self.__discriminator_value = "AlexaSkillEvent.SkillEnabled" # type: str
self.object_type = self.__discriminator_value
super(SkillEnabledRequest, self).__init__(object_type=self.__discriminator_value, request_id=request_id, timestamp=timestamp, locale=locale)
self.event_creation_time = event_creation_time
self.event_publishing_time = event_publishing_time
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, SkillEnabledRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
import torch
from algorithms.single_model_algorithm import SingleModelAlgorithm
from models.initializer import initialize_model
class GroupDRO(SingleModelAlgorithm):
"""
Group distributionally robust optimization.
Original paper:
@inproceedings{sagawa2019distributionally,
title={Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization},
author={Sagawa, Shiori and Koh, Pang Wei and Hashimoto, Tatsunori B and Liang, Percy},
booktitle={International Conference on Learning Representations},
year={2019}
}
"""
def __init__(self, config, d_out, grouper, loss, metric, n_train_steps, is_group_in_train):
# check config
assert config.uniform_over_groups
# initialize model
model = initialize_model(config, d_out).to(config.device)
# initialize module
super().__init__(
config=config,
model=model,
grouper=grouper,
loss=loss,
metric=metric,
n_train_steps=n_train_steps,
)
# additional logging
self.logged_fields.append('group_weight')
# step size
self.group_weights_step_size = config.group_dro_step_size
# initialize adversarial weights
self.group_weights = torch.zeros(grouper.n_groups)
self.group_weights[is_group_in_train] = 1
self.group_weights = self.group_weights/self.group_weights.sum()
self.group_weights = self.group_weights.to(self.device)
def process_batch(self, batch):
"""
A helper function for update() and evaluate() that processes the batch
Args:
- batch (tuple of Tensors): a batch of data yielded by data loaders
Output:
- results (dictionary): information about the batch
- g (Tensor)
- y_true (Tensor)
- metadata (Tensor)
- loss (Tensor)
- metrics (Tensor)
all Tensors are of size (batch_size,)
"""
results = super().process_batch(batch)
results['group_weight'] = self.group_weights
return results
def objective(self, results):
"""
Takes an output of SingleModelAlgorithm.process_batch() and computes the
optimized objective. For group DRO, the objective is the weighted average
of losses, where groups have weights groupDRO.group_weights.
Args:
- results (dictionary): output of SingleModelAlgorithm.process_batch()
Output:
- objective (Tensor): optimized objective; size (1,).
"""
group_losses, _, _ = self.loss.compute_group_wise(
results['y_pred'],
results['y_true'],
results['g'],
self.grouper.n_groups,
return_dict=False)
return group_losses @ self.group_weights
def _update(self, results):
"""
Process the batch, update the log, and update the model, group weights, and scheduler.
Args:
- batch (tuple of Tensors): a batch of data yielded by data loaders
Output:
- results (dictionary): information about the batch, such as:
- g (Tensor)
- y_true (Tensor)
- metadata (Tensor)
- loss (Tensor)
- metrics (Tensor)
- objective (float)
"""
# compute group losses
group_losses, _, _ = self.loss.compute_group_wise(
results['y_pred'],
results['y_true'],
results['g'],
self.grouper.n_groups,
return_dict=False)
# update group weights
self.group_weights = self.group_weights * torch.exp(self.group_weights_step_size*group_losses.data)
self.group_weights = (self.group_weights/(self.group_weights.sum()))
# save updated group weights
results['group_weight'] = self.group_weights
# update model
super()._update(results)
|
import numpy
from matplotlib import pyplot
import gdal
from skimage import io,exposure
from skimage.segmentation import slic,mark_boundaries
import os
from PIL import Image
import shelve
import sys
sys.path.append('..')
from Config import config
def seg(path,n_segments=500, compactness=20):
i=io.imread(path)[:,:,[3,2,1,7]]
img=i[:,:,:3]
img=(img-img.min())/(img.max()-img.min())
img=img*255
img=img.astype(numpy.uint8)
img=exposure.adjust_gamma(img,0.5)
segment=slic(img,n_segments=n_segments, compactness=compactness,enforce_connectivity=True)
out=mark_boundaries(img,segment,color=[0,0,0.2])
#img=exposure.adjust_gamma(img,0.5)
#out=exposure.adjust_gamma(out,0.5)
wdi=(i[:,:,3]-i[:,:,1])/(i[:,:,3]+i[:,:,1])
wdi=(wdi/wdi.max())*255
return segment,out,img,wdi
def getname(path,namelist):
if namelist[0]==0:
season='ROIs1158_spring'
elif namelist[0]==1:
season='ROIs1868_summer'
elif namelist[0]==2:
season='ROIs1970_fall'
elif namelist[0]==3:
season='ROIs2017_winter'
path_s2=path+'\\'+season+'\\s2_'+str(namelist[1])+'\\'+season+'_s2_'+str(namelist[1])+'_p'+str(namelist[2])+'.tif'
return path_s2
def transform(name):
if 'spring' in name:
season=0
elif 'summer' in name:
season=1
elif 'fall' in name:
season=2
elif 'winter' in name:
season=3
l=[]
l.append(season)
l.append(int(name.split('_')[3]))
l.append(int(name.split('_')[4].split('.')[0][1:]))
return l
class UI:
def __init__(self,mode='normal',init=0):
'''mode = normal 正常
mode=review 仅仅显示已经标记的
'''
self.mode=mode
self.path_label=config.path_labels
if self.mode=='normal':
with shelve.open(config.path_devision) as f:
self.imglist=f['test']
else:
self.imglist=os.listdir(config.path_labels)
self.n=init
self.ifpress=False
self.ifloadlabel=False
fig=pyplot.figure()
fig.canvas.mpl_disconnect(fig.canvas.manager.key_press_handler_id)
fig.canvas.mpl_connect('key_press_event',self.on_key_press)
fig.canvas.mpl_connect('button_press_event',self.on_button_press)
fig.canvas.mpl_connect('motion_notify_event',self.on_button_move)
fig.canvas.mpl_connect('button_release_event',self.on_button_release)
self.fig=fig
self.ax1=fig.add_subplot(3,2,1)
self.ax2=fig.add_subplot(3,2,3)
self.ax4=fig.add_subplot(3,2,5)
self.ax3=fig.add_subplot(1,2,2)
pyplot.get_current_fig_manager().window.state('zoomed')
#self.ax2=fig.add_subplot(1,2,2)
self.valuelist=[]
self.label=numpy.zeros((256,256))
self.ifloadlabel=True
self.draw()
pyplot.show()
def on_key_press(self,event):
if event.key=='a' or event.key=='left':
self.n-=1
print(self.n)
self.valuelist=[]
self.label=numpy.zeros(self.segment.shape)
self.ifloadlabel=True
self.draw()
if event.key=='d' or event.key=='right':
if self.n+1>=len(self.imglist):
return
self.n+=1
print(self.n)
self.valuelist=[]
self.label=numpy.zeros(self.segment.shape)
self.ifloadlabel=True
self.draw()
if event.key=='e' or event.key=='enter':
self.save_label()
if event.key=='Q':
f=numpy.unique(self.segment).tolist()
for i in f:
if i not in self.valuelist:
self.valuelist.append(i)
for i in range(len(self.valuelist)):
if i==0:
flag=(self.segment==self.valuelist[i])
else:
flag=flag+(self.segment==self.valuelist[i])
self.label=numpy.where(flag,1.0,0)
self.draw()
def on_button_press(self,event):
try:
r=int(event.ydata)
c=int(event.xdata)
except TypeError:
return
value=self.segment[r,c]
if event.button==1:
if value not in self.valuelist:
self.ifpress=True
self.valuelist.append(value)
elif event.button==3:
if value in self.valuelist:
self.ifpress=True
self.valuelist.remove(value)
def on_button_move(self,event):
if not self.ifpress:
return
try:
r=int(event.ydata)
c=int(event.xdata)
except TypeError:
return
value=self.segment[r,c]
if event.button==1:
if value not in self.valuelist:
self.valuelist.append(value)
elif event.button==3:
if value in self.valuelist:
self.valuelist.remove(value)
def on_button_release(self,event):
if not self.ifpress:
return
self.ifpress=False
for i in range(len(self.valuelist)):
if i==0:
flag=(self.segment==self.valuelist[i])
else:
flag=flag+(self.segment==self.valuelist[i])
self.label=numpy.where(flag,1,0).astype(int)
self.draw()
def draw(self):
if self.mode=='normal':
segment,out,img,wdi=seg(getname(config.path,self.imglist[self.n]))
else:
segment,out,img,wdi=seg(getname(config.path,transform(self.imglist[self.n])))
self.segment=segment
if self.ifloadlabel:
self.read_label()
self.ifloadlabel=False
#self.ax1.imshow(out)
t=numpy.where(self.label==1,0.5,out[:,:,2])
out[:,:,2]=t
self.ax1.cla()
self.ax2.cla()
self.ax3.cla()
self.ax4.cla()
self.ax1.imshow(img)
self.ax2.imshow(wdi,cmap='gray')
self.ax3.imshow(out)
self.ax4.imshow(self.label,cmap='gray')
d=os.listdir(config.path_labels)
self.ax3.set_title(str(len(d))+'/'+str(self.n+1))
self.fig.canvas.draw_idle()
def save_label(self):
label=self.label*255
label=label.astype(numpy.uint8)
label=Image.fromarray(label)
if self.mode=='normal':
name=getname(config.path,self.imglist[self.n]).split('\\')[-1]
name=name.split('_')
name[2]='label'
name='_'.join(name)
else:
name=self.imglist[self.n]
label.save(self.path_label+'\\'+name)
def read_label(self):
dirlist=os.listdir(self.path_label)
if self.mode=='normal':
name=getname(config.path,self.imglist[self.n]).split('\\')[-1]
name=name.split('_')
name[2]='label'
name='_'.join(name)
else:
name=self.imglist[self.n]
if name in dirlist:
self.label=numpy.array(Image.open(self.path_label+'\\'+name))/255
self.label=self.label.astype(int)
self.valuelist=list(numpy.unique(numpy.where(self.label==1,self.segment,-2)))
self.valuelist.remove(-2)
def statistic():
d=os.listdir(config.path_labels)
n=numpy.array([0,0,0,0])
for i in d:
if 'spring' in i:
n[0]=n[0]+1
if 'summer' in i:
n[1]=n[1]+1
if 'fall' in i:
n[2]=n[2]+1
if 'winter' in i:
n[3]=n[3]+1
print(n)
n=n/len(d)
print(n)
if __name__=='__main__':
test=UI(mode='normal',init=100)
#statistic()
|
import maya.cmds as mc
class UserInputError(Exception): pass
class Spaces(object):
def __init__(self):
'''
Initializer for Spaces class object
'''
self.allChannels = ['t','tx','ty','tz','r','rx','ry','rz','s','sx','sy','sz']
self.channels = self.allChannels[0:8]
self.transform = ['transform','joint']
self.worldNode = 'spaces_wld01_loc'
self.managerUI = 'spacesUI'
self.uiRCL = 'spacesRCL'
self.uiKeyCBG = 'spacesKeyCBG'
self.uiKeyPreviousCBG = 'spacesKeyPrevCBG'
self.uiMaintainPosCBG = 'spacesMaintainPosCBG'
self.uiAllOMG = 'spacesAllOMG'
def create(self,ctrl,targetList=[],abrTargetList=[],nameTag='',worldParent=''):
'''
Create a new spaces node
@param targetList: list of target transforms for the space node constraint
@type targetList: list
@param abrTargetList: list of abreviated target names. Used in UI.
@type abrTargetList: list
@param ctrl: Control to be parented to spaces node
@type ctrl: str
@param nameTag: Shortened, descriptive name for control. Used in UI.
@type nameTag: str
'''
if not len(abrTargetList): abrTargetList = targetList
# Verify target list
for target in targetList:
if not mc.objExists(target):
raise UserInputError('Target object '+target+' does not exists!')
# Determine SPACE node
par = ''
try: par = self.getSpacesNode(ctrl)
except:
# Get control transform parent
par = mc.listRelatives(ctrl,p=True)
# If none exist, create one
if par == None: par = mc.group(ctrl,n=ctrl+'_buf')
else: par = par[0]
else:
# Spaces node exists, run Spaces().add()
#print('Spaces node already exists. Running Spaces().add() instead!')
result = self.add(ctrl,targetList,abrTargetList,nameTag)
return result
# Create spaces WORLD transform
if not mc.objExists(self.worldNode):
self.worldNode = mc.createNode('transform',n=self.worldNode)
if len(worldParent):
if mc.objExists(worldParent): mc.parent(self.worldNode,worldParent)
else:
if len(worldParent):
currentWorldParent = mc.listRelatives(self.worldNode,p=1)[0]
print('Spaces WORLD node already exists and is parented to '+currentWorldParent+'!!')
# Adjust TargetList Arrays
targetList.insert(0,self.worldNode)
targetList.insert(0,par)
abrTargetList.insert(0,'SuperMover')
abrTargetList.insert(0,'Default')
# Create SPACES constraint transform
spacesNode = mc.duplicate(par,rr=1,rc=1,n=ctrl+'_spn')[0]
mc.delete(mc.listRelatives(spacesNode,ad=1))
# Unlock Constraint Offset
for ch in self.allChannels: mc.setAttr(spacesNode+'.'+ch,l=False,k=False)
# Reparent hierarchy
mc.parent(spacesNode,par)
mc.parent(ctrl,spacesNode)
# Add targetOffset attributes to link to constraint
mc.addAttr(spacesNode,ln='targetOffsetTranslate',sn='tot',at='double3')
mc.addAttr(spacesNode,ln='targetOffsetTranslateX',sn='totx',at='double',p='targetOffsetTranslate')
mc.addAttr(spacesNode,ln='targetOffsetTranslateY',sn='toty',at='double',p='targetOffsetTranslate')
mc.addAttr(spacesNode,ln='targetOffsetTranslateZ',sn='totz',at='double',p='targetOffsetTranslate')
mc.addAttr(spacesNode,ln='targetOffsetRotate',sn='tor',at='double3')
mc.addAttr(spacesNode,ln='targetOffsetRotateX',sn='torx',at='double',p='targetOffsetRotate')
mc.addAttr(spacesNode,ln='targetOffsetRotateY',sn='tory',at='double',p='targetOffsetRotate')
mc.addAttr(spacesNode,ln='targetOffsetRotateZ',sn='torz',at='double',p='targetOffsetRotate')
# Set targetOffset attributes as keyable
for ch in self.channels: mc.setAttr(spacesNode+'.to'+ch,k=True)
# Add default offset value attributes
mc.addAttr(spacesNode,ln='defaultOffset',at='compound',numberOfChildren=2,m=True)
mc.addAttr(spacesNode,ln='defaultOffsetTranslate',sn='dot',at='double3',p='defaultOffset')
mc.addAttr(spacesNode,ln='defaultOffsetTranslateX',sn='dotx',at='double',p='defaultOffsetTranslate')
mc.addAttr(spacesNode,ln='defaultOffsetTranslateY',sn='doty',at='double',p='defaultOffsetTranslate')
mc.addAttr(spacesNode,ln='defaultOffsetTranslateZ',sn='dotz',at='double',p='defaultOffsetTranslate')
mc.addAttr(spacesNode,ln='defaultOffsetRotate',sn='dor',at='double3',p='defaultOffset')
mc.addAttr(spacesNode,ln='defaultOffsetRotateX',sn='dorx',at='doubleAngle',p='defaultOffsetRotate')
mc.addAttr(spacesNode,ln='defaultOffsetRotateY',sn='dory',at='doubleAngle',p='defaultOffsetRotate')
mc.addAttr(spacesNode,ln='defaultOffsetRotateZ',sn='dorz',at='doubleAngle',p='defaultOffsetRotate')
# Setup .spaces attribute
enumString = ''
for abr in abrTargetList: enumString += abr+':'
if not mc.objExists(spacesNode+'.spaces'):
mc.addAttr(spacesNode,ln='spaces',at='enum',en=enumString)
mc.setAttr(spacesNode+'.spaces',k=1)
else:
mc.addAttr(spacesNode+'.spaces',e=1,en=enumString)
# Name Tag
if not len(nameTag): nameTag = ctrl
if not mc.objExists(spacesNode+'.nameTag'):
mc.addAttr(spacesNode,ln='nameTag',dt='string')
mc.setAttr(spacesNode+'.nameTag',nameTag,type='string')
# Create constraint
spacesNodeConstraint = ''
for i in range(len(targetList)):
# Add target to constraint
if not i:
# First iteration - Create new constraint
spacesNodeConstraint = mc.parentConstraint(targetList[i],spacesNode,n=ctrl+'_pcn',w=0.0)[0]
else:
# Add to existing constraint
mc.parentConstraint(targetList[i],spacesNode,mo=True,w=0.0)
# Unlock target offset attributes
for ch in self.channels:
mc.setAttr(spacesNodeConstraint+'.target['+str(i)+'].to'+ch,l=False,k=True)
translateOffset = mc.getAttr(spacesNodeConstraint+'.target['+str(i)+'].targetOffsetTranslate')[0]
rotateOffset = mc.getAttr(spacesNodeConstraint+'.target['+str(i)+'].targetOffsetRotate')[0]
mc.setAttr(spacesNode+'.defaultOffset',l=False)
mc.setAttr(spacesNode+'.defaultOffset['+str(i)+'].dot',translateOffset[0],translateOffset[1],translateOffset[2])
mc.setAttr(spacesNode+'.defaultOffset['+str(i)+'].dor',rotateOffset[0],rotateOffset[1],rotateOffset[2])
mc.setAttr(spacesNode+'.defaultOffset',l=True)
# Connect spacesNode to Constraint
weightAliasList = mc.parentConstraint(spacesNodeConstraint,q=True,weightAliasList=True)
for i in range(len(targetList)):
# Create targetWeight attribute on spacesNode
mc.addAttr(spacesNode,ln=weightAliasList[i],min=0.0,max=1.0,dv=0.0)
mc.setAttr(spacesNode+'.'+weightAliasList[i],l=False,k=True)
mc.connectAttr(spacesNode+'.'+weightAliasList[i], spacesNodeConstraint+'.'+weightAliasList[i], f=True)
# Connect targetOffset attributes
translateOffset = mc.getAttr(spacesNode+'.defaultOffset['+str(i)+'].dot')[0]
rotateOffset = mc.getAttr(spacesNode+'.defaultOffset['+str(i)+'].dor')[0]
mc.setAttr(spacesNode+'.tot',translateOffset[0],translateOffset[1],translateOffset[2],l=False)
mc.setAttr(spacesNode+'.tor',rotateOffset[0],rotateOffset[1],rotateOffset[2],l=False)
mc.connectAttr(spacesNode+'.tot', spacesNodeConstraint+'.target['+str(i)+'].tot',f=True)
mc.connectAttr(spacesNode+'.tor', spacesNodeConstraint+'.target['+str(i)+'].tor',f=True)
# Set all spaces to Default
self.switch(ctrl,'Default',0)
# Return result
return [spacesNode,spacesNodeConstraint]
def add(self,ctrl,targetList=[],abrTargetList=[],nameTag=''):
'''
add to an existing spaces node
@param targetList: list of target transforms for the space node constraint
@type targetList: list
@param abrTargetList: list of abreviated target names. Used in UI.
@type abrTargetList: list
@param ctrl: Control to be parented to spaces node
@type ctrl: str
@param nameTag: Shortened, descriptive name for control. Used in UI.
@type nameTag: str
'''
if not len(abrTargetList): abrTargetList = targetList
# Determine SPACE node
spacesNode = ''
try:
spacesNode = self.getSpacesNode(ctrl)
except:
#print('Spaces node does not exists. Running Spaces().create() instead!')
result = self.create(ctrl,targetList,abrTargetList,nameTag)
return result
# Determine SPACE node constraint
spacesNodeConstraint = self.getSpacesConstraint(ctrl)
# Verify target list
for target in targetList:
if not mc.objExists(target):
raise UserInputError('Target object '+target+' does not exists!')
# Add Constraint Targets
targetListSize = len(mc.parentConstraint(spacesNodeConstraint,q=True,tl=True))
for i in range(len(targetList)):
mc.parentConstraint(targetList[i],spacesNodeConstraint,mo=True,w=0.0)
# Unlock target offset attributes
for ch in self.channels:
mc.setAttr(spacesNodeConstraint+'.target['+str(targetListSize)+'].to'+ch,l=False,k=True)
# Store Default Offset Values
translateOffset = mc.getAttr(spacesNodeConstraint+'.target['+str(targetListSize)+'].targetOffsetTranslate')[0]
rotateOffset = mc.getAttr(spacesNodeConstraint+'.target['+str(targetListSize)+'].targetOffsetRotate')[0]
mc.setAttr(spacesNode+'.defaultOffset',l=False)
mc.setAttr(spacesNode+'.defaultOffset['+str(targetListSize)+'].dot',translateOffset[0],translateOffset[1],translateOffset[2])
mc.setAttr(spacesNode+'.defaultOffset['+str(targetListSize)+'].dor',rotateOffset[0],rotateOffset[1],rotateOffset[2])
mc.setAttr(spacesNode+'.defaultOffset',l=True)
# Connect spacesNode offset to constraintNode offset
mc.connectAttr(spacesNode+'.tot', spacesNodeConstraint+'.target['+str(targetListSize)+'].tot',f=True)
mc.connectAttr(spacesNode+'.tor', spacesNodeConstraint+'.target['+str(targetListSize)+'].tor',f=True)
# Increment targetListSize
targetListSize += 1
# Add and connect new weight attrs
weightAliasList = mc.parentConstraint(spacesNodeConstraint,q=True,weightAliasList=True)
for i in range(len(weightAliasList)):
if not mc.objExists(spacesNode+'.'+weightAliasList[i]):
mc.addAttr(spacesNode,ln=weightAliasList[i],k=True,min=0.0,max=1.0,dv=0.0)
mc.connectAttr(spacesNode+'.'+weightAliasList[i],spacesNodeConstraint+'.'+weightAliasList[i],f=True)
# Append .spaces attribute
enumString = mc.addAttr(spacesNode +'.spaces',q=True,en=True) + ':'
for abr in abrTargetList: enumString += abr+':'
mc.addAttr(spacesNode+'.spaces',e=True,en=enumString)
# Return result
return [spacesNode,spacesNodeConstraint]
def switch(self,ctrl,newTarget,key=0,keyPreviousFrame=0,maintainPos=1):
'''
Switch spaces state for specified control.
@param ctrl: Control to switch spaces for
@type ctrl: str
@param newTarget: Spaces target to swicth to
@type newTarget: str
@param key: Set key for spaces state after switch
@type key: bool
@param keyPreviousFrame: Set key on previous frame for spaces state before switch. Only relevant when key is also True.
@type keyPreviousFrame: bool
'''
# Find Space Node and Relevant Constraint
spacesNode = self.getSpacesNode(ctrl)
spacesNodeConstraint = self.getSpacesConstraint(ctrl)
weightAliasList = mc.parentConstraint(spacesNodeConstraint,q=True,weightAliasList=True)
targetTransform = mc.parentConstraint(spacesNodeConstraint,q=True,targetList=True)
# Get newTarget constraint index
validSpaces = mc.addAttr(spacesNode+'.spaces',q=True,en=True).split(':')
if not validSpaces.count(newTarget):
raise UserInputError('Object '+newTarget+' is not a spaces target for '+ctrl)
newTargetIndex = validSpaces.index(newTarget)
# Key previous frame
if keyPreviousFrame: self.key(ctrl,[],mc.currentTime(q=True)-1,)
# Calculate constraint offsets to maintain control position
if maintainPos:
# Create temporary constraint transform
temp = mc.duplicate(spacesNode,rr=True,rc=True,n='temp_spaceNode')
mc.delete(mc.listRelatives(temp[0],ad=True,pa=True))
# Unlock channels
for ch in self.channels: mc.setAttr(temp[0]+'.'+ch,l=False)
# Create temporary parent constraint
conn = mc.parentConstraint(targetTransform[newTargetIndex],temp[0],mo=True)
translateOffset = mc.getAttr(conn[0]+'.target[0].targetOffsetTranslate')[0]
rotateOffset = mc.getAttr(conn[0]+'.target[0].targetOffsetRotate')[0]
# Delete temporary nodes
mc.delete(conn)
mc.delete(temp)
# Set Constraint Offsets
mc.setAttr(spacesNode+'.tot',translateOffset[0],translateOffset[1],translateOffset[2])
mc.setAttr(spacesNode+'.tor',rotateOffset[0],rotateOffset[1],rotateOffset[2])
# Set Constraint Target Weights
for i in range(len(validSpaces)): mc.setAttr(spacesNode+'.'+weightAliasList[i],i==newTargetIndex)
# Set ".spaces" attribute
mc.setAttr(spacesNode+'.spaces',newTargetIndex)
# Reset to default offset values
if not maintainPos:
self.reset(ctrl,0,0)
# Key current frame
if key: self.key(ctrl,[],mc.currentTime(q=True))
def switchAllTo(self,target,char='',key=0,keyPrevious=0,maintainPos=1):
'''
Switch all spaces nodes to the specified target
@param target: Spaces target to switch all spaces nodes to
@type target: str
@param char: Character namespace to filter for when searching for spaces nodes
@type char: str
@param key: Set key for spaces state after switch
@type key: bool
@param keyPreviousFrame: Set key on previous frame for spaces state before switch. Only relevant when "key" is also True.
@type keyPreviousFrame: bool
'''
# Find all spacesNodes
spaceNodeList = mc.ls('*_spn',r=True,et='transform')
for spacesNode in spaceNodeList:
child = mc.listRelatives(spacesNode,c=1)
try: self.switch(child[0],target,key,keyPrevious,maintainPos)
except: print('Object '+child[0]+' is not able to be placed in the space of '+target+'! Skipping control!!')
def key(self,ctrl,targetList=[],frame=None):
'''
Set keyframe on spaces relevant attribute for the given control and specified target(s).
@param ctrl: Control to set spaces keys on
@type ctrl: str
@param targetList: List of targets to set keys on
@type targetList: list
@param frame: Frame to set keys on
@type frame: float
'''
# Check default frame value
if frame == None: frame = mc.currentTime(q=True)
# Get spaces info
spacesNode = self.getSpacesNode(ctrl)
spacesNodeConstraint = self.getSpacesConstraint(ctrl)
weightAliasList = mc.parentConstraint(spacesNodeConstraint,q=True,weightAliasList=True)
# Check target list
if not len(targetList): targetList = self.targetList(ctrl)
# Get target indices
targetIndexList = []
for target in targetList:
targetIndexList.append(self.targetIndex(ctrl,target))
# Set keys
for i in targetIndexList:
mc.setKeyframe(spacesNode+'.'+weightAliasList[i],t=frame,itt='clamped',ott='step')
mc.setKeyframe(spacesNode+'.tot',t=frame,itt='clamped',ott='step')
mc.setKeyframe(spacesNode+'.tor',t=frame,itt='clamped',ott='step')
mc.setKeyframe(spacesNode+'.spaces',t=frame,itt='clamped',ott='step')
def reset(self,ctrl,key=0,keyPreviousFrame=0):
'''
Reset spaces constraint offsets for the specified control
@param ctrl: Control whose spaces target offset values will be rest
@type ctrl: str
@param key: Set keyframe after reset
@type key: bool
@param keyPreviousFrame: Set keyframe before reset. Only relevant when "key" is also True.
@type keyPreviousFrame: bool
'''
# Get spaces info
spacesNode = self.getSpacesNode(ctrl)
spacesNodeConstraint = self.getSpacesConstraint(ctrl)
# Check spaces attribute
if not mc.objExists(spacesNode+'.spaces'):
raise UserInputError('Object '+spacesNode+ 'does not contain a ".spaces" attribute!')
targetIndex = mc.getAttr(spacesNode+'.spaces')
target = self.targetList(ctrl)[targetIndex]
# Key previous frame
if keyPreviousFrame: self.key(ctrl,[],mc.currentTime(q=True)-1,)
# Reset Offset Values
translateOffset = mc.getAttr(spacesNode+'.defaultOffset['+str(targetIndex)+'].dot')[0]
rotateOffset = mc.getAttr(spacesNode+'.defaultOffset['+str(targetIndex)+'].dor')[0]
mc.setAttr(spacesNode+'.tot',translateOffset[0],translateOffset[1],translateOffset[2])
mc.setAttr(spacesNode+'.tor',rotateOffset[0],rotateOffset[1],rotateOffset[2])
# Key current frame
if key: self.key(ctrl)
def targetList(self,ctrl):
'''
Return the spaces target list for the specified control or spaces node
@param ctrl: Control whose spaces target list will be returned
@type ctrl: str
'''
# Check spaces attribute
spacesNode = self.getSpacesNode(ctrl)
# Get target list info
spacesList = mc.addAttr(spacesNode+'.spaces',q=True,en=True)
return spacesList.split(':')
def targetIndex(self,ctrl,target):
'''
Return the target index for the specified control and spaces target
@param ctrl: Control whose spaces target index will be returned
@type ctrl: str
@param target: Spaces target whose index will be returned
@type ctrl: str
'''
spacesNode = self.getSpacesNode(ctrl)
validSpaces = mc.addAttr(spacesNode+'.spaces',q=True,en=True).split(':')
if not validSpaces.count(target):
raise UserInputError('Object '+target+' is not a spaces target for '+ctrl)
return validSpaces.index(target)
def getSpacesNode(self,ctrl):
'''
Return the name of the spaces node of the specified control
@param ctrl: Control whose spaces node will be returned
@type ctrl: str
'''
# Check control
if mc.objExists(ctrl+'.spaces') and mc.objExists(ctrl+'.defaultOffset'): return ctrl
# Determine spaces node
parent = mc.listRelatives(ctrl,p=True)
if parent == None:
raise UserInputError('Object '+ctrl+' is not the child of a valid spaces node!')
if not parent[0].endswith('_spn'):
raise UserInputError('Object '+ctrl+' is not the child of a valid spaces node!')
# Check spaces attribute
if not mc.objExists(parent[0]+'.spaces'):
raise UserInputError('Spaces node '+parent[0]+ 'does not contain a ".spaces" attribute!')
return parent[0]
def getSpacesConstraint(self,ctrl):
'''
Return the name of the spaces constraint node of the specified control
@param ctrl: Control whose spaces constraint node will be returned
@type ctrl: str
'''
spacesNode = self.getSpacesNode(ctrl)
spaceTransConst = mc.listConnections(spacesNode+'.tx',s=True,d=False,type='parentConstraint')
spaceRotConst = mc.listConnections(spacesNode+'.rx',s=True,d=False,type='parentConstraint')
if type(spaceTransConst)!=list or type(spaceRotConst)!=list:
raise UserInputError('No spaces constraint found for '+ctrl)
if spaceTransConst[0] != spaceRotConst[0]:
raise UserInputError('Translate and Rotate Constraint Mis-match on '+ctrl)
return spaceTransConst[0]
def updateOld(self):
'''
Update old style spaces setup to work with new spaces procedures.
'''
# Get list of existing spaces nodes
spacesNodeList = mc.ls('*_spn',r=1,et='transform')
for spacesNode in spacesNodeList:
spacesChild = mc.listRelatives(spacesNode,c=True,type='transform')[0]
# Transfer .spaces attribute
if mc.objExists(spacesChild+'.spaces'):
enumString = mc.addAttr(spacesChild+'.spaces',q=True,en=True)
mc.addAttr(spacesNode,ln='spaces',at='enum',en=enumString)
mc.setAttr(spacesNode+'.spaces',k=1)
mc.deleteAttr(spacesChild+'.spaces')
# Transfer .nameTag attribute
if mc.objExists(spacesChild+'.nameTag'):
nameTagStr = mc.getAttr(spacesChild+'.nameTag')
mc.addAttr(spacesNode,ln='nameTag',dt='string')
mc.setAttr(spacesNode+'.nameTag',nameTagStr,type='string')
mc.deleteAttr(spacesChild+'.nameTag')
#=====================================
# UI METHODS =========================
def ui_embed(self,parentLayout,char=''):
'''
@param parentLayout: Parent UI layout to parent spaces UI to.
@type parentLayout: str
@param char: Character namespace to create UI for
@type char: str
'''
# Get Character Prefix
if char: char += ':'
# List all spaces with given prefix
spaceNodeList = mc.ls(char+'*_spn',r=True,et='transform')
spaceNodeList = [i for i in spaceNodeList if not i.endswith('_con_spn')]
# Generate target list with default elements
targetList = ['SuperMover','Default']
# Build Spaces List from eNum attributes
for node in spaceNodeList:
if mc.objExists(node+'.spaces'):
enumList = mc.addAttr(node+'.spaces',q=True,en=True).split(':')
[targetList.append(i) for i in enumList if not targetList.count(i)]
# Begin UI Build
if not mc.layout(parentLayout,q=True,ex=True):
raise UserInputError('Parent layout '+parentLayout+' does not exists! Unable to embed Spaces UI!')
mc.setParent(parentLayout)
# Clear Layout
childArray = mc.layout(parentLayout,q=True,ca=True)
if type(childArray) == list: mc.deleteUI(childArray)
# Add Spaces control layout
mc.rowColumnLayout(self.uiRCL,numberOfColumns=5,columnWidth=[(1,160),(2,160),(3,80),(4,80),(5,80)])
# Add KeyFrame CheckBoxs
mc.checkBoxGrp(self.uiKeyPreviousCBG,numberOfCheckBoxes=1,label="Key Before Switch",v1=0)
mc.checkBoxGrp(self.uiKeyCBG,numberOfCheckBoxes=1,label="Key After Switch",v1=0)
for i in range(3): mc.separator(h=20,style='none')
mc.checkBoxGrp(self.uiMaintainPosCBG,numberOfCheckBoxes=1,label="Maintain Position",v1=1)
for i in range(4): mc.separator(h=20,style='none')
for i in range(5): mc.separator(h=20,style='single')
# ALL OptionMenu
mc.text(label='ALL')
mc.optionMenuGrp(self.uiAllOMG,cw=(1,1),cal=(1,'center'),label='',cc='glTools.tools.spaces.Spaces().switchAllFromUI()')
for item in targetList: mc.menuItem(label=item)
mc.button(w=80,l='Reset',c='glTools.tools.spaces.Spaces().resetAllFromUI("'+char+'")')
mc.button(w=80,l='Select',c='mc.select(mc.ls("'+char+'*_spn"))')
mc.button(w=80,l='Key',c='glTools.tools.spaces.Spaces().keyAllFromUI("'+char+'")')
for i in range(5): mc.separator(h=20,style='single')
# Create attrEnumOptionMenu controls to accurately represent attribute values at all times.
# ie - Update on frame change
for spacesNode in spaceNodeList:
tag = mc.getAttr(spacesNode+'.nameTag')
mc.text(label=tag)
mc.attrEnumOptionMenu(tag+'_switchAEO',w=160,label='',attribute=spacesNode+'.spaces',dtg=spacesNode+'.spaces',cc='glTools.tools.spaces.Spaces().switchFromUI("'+spacesNode+'")')
mc.button(w=80,l='Reset',c='glTools.tools.spaces.Spaces().resetFromUI("'+spacesNode+'")')
mc.button(w=80,l='Select',c='mc.select("'+spacesNode+'")')
mc.button(w=80,l='Key',c='glTools.tools.spaces.Spaces().key("'+spacesNode+'")')
#mc.separator(h=20,style='none')
def ui(self,char=''):
'''
Creates the main control interface for manipulating spaces
@param char: Character namespace to filter for when populating UI.
@type char: str
'''
# Generate window
win = self.managerUI
if mc.window(win,q=True,ex=True): mc.deleteUI(win)
win = mc.window(win,t='Spaces UI - '+char.upper())
# Create column layout for controls items
spacesCL = mc.columnLayout('spacesUI_CL',adjustableColumn=True)
# Open window
mc.showWindow(win)
# Embed control layout into window
self.ui_embed(spacesCL,char)
def switchFromUI(self,ctrl):
'''
Switch a spaces nodes to the specified target from the spaces UI
@param ctrl: Control whose spaces target will be switched
@type ctrl: str
'''
# Determine spaces node
spacesNode = self.getSpacesNode(ctrl)
tag = mc.getAttr(spacesNode+'.nameTag')
# Query Target
# !!! optionMenu command no longer allows access to attrEnumOptionMenu !!!
#target = mc.optionMenu(tag+'_switchAEO',q=True,v=True)
targetIndex = mc.getAttr(mc.attrEnumOptionMenu(tag+'_switchAEO',q=True,dtg=True))
target = self.targetList(spacesNode)[targetIndex]
# Check keyframe options
key = mc.checkBoxGrp(self.uiKeyCBG,q=True,v1=True)
keyPrevious = mc.checkBoxGrp(self.uiKeyPreviousCBG,q=True,v1=True)
# Check offset options
maintainPos = mc.checkBoxGrp(self.uiMaintainPosCBG,q=True,v1=True)
# Do switch
self.switch(ctrl,target,key,keyPrevious,maintainPos)
def switchAllFromUI(self,char=''):
'''
Switch all spaces nodes to the specified target from the UI.
@param char: Character whose spaces target will be switched
@type char: str
'''
# Determine target
if not mc.optionMenuGrp(self.uiAllOMG,q=True,ex=True):
raise UserInputError('OptionMenuGrp '+self.uiAllOMG+' does not exist!')
target = mc.optionMenuGrp(self.uiAllOMG,q=True,v=True)
# Check keyframe options
key = mc.checkBoxGrp(self.uiKeyCBG,q=True,v1=True)
keyPrevious = mc.checkBoxGrp(self.uiKeyPreviousCBG,q=True,v1=True)
# Check offset options
maintainPos = mc.checkBoxGrp(self.uiMaintainPosCBG,q=True,v1=True)
# Do switch
self.switchAllTo(target,char,key,keyPrevious,maintainPos)
def resetFromUI(self,ctrl):
'''
Reset spaces constraint offsets for the specified control from the spaces UI
@param ctrl: Control whose spaces target offset values will be rest
@type ctrl: str
'''
# Get reset options
key = mc.checkBoxGrp(self.uiKeyCBG,q=True,v1=True)
keyPrevious = mc.checkBoxGrp(self.uiKeyPreviousCBG,q=True,v1=True)
# Reset
self.reset(ctrl,key,keyPrevious)
def resetAllFromUI(self,char):
'''
Reset spaces constraint offsets for all controls in the specified character namespace
@param char: Namespace of the character to reset spaces node offsets for
@type char: str
'''
# Get key options
key = mc.checkBoxGrp(self.uiKeyCBG,q=True,v1=True)
keyPrevious = mc.checkBoxGrp(self.uiKeyPreviousCBG,q=True,v1=True)
# Reset Spaces Nodes
for spacesNode in mc.ls(char+'*_spn'):
self.reset(spacesNode,key,keyPrevious)
def keyAllFromUI(self,char):
'''
Key all spaces nodes in the specified character namespace
@param char: Namespace of the character to key spaces node for
@type char: str
'''
# Key Spaces Nodes
for spacesNode in mc.ls(char+'*_spn'): self.key(spacesNode)
|
try:
from django.urls import path
from django.contrib import admin
urlpatterns = [path('admin', admin.site.urls)]
except ImportError:
# django < 2.0
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [url(r'^admin/', include(admin.site.urls))]
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Recomputation IPU Keras layers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.ipu.ops import pipelining_ops
class RecomputationCheckpoint(Layer):
"""
Layer for checkpointing values in a computational pipeline stage.
When recomputation is enabled, these values will not be recomputed and they
will be stored in memory instead.
This layer can reduce memory liveness peaks when using recomputation if
there are too many activations which need to be recomputed before the
backpropagation operations can be executed.
This layer should be used with the
`RecomputationMode.RecomputeAndBackpropagateInterleaved` pipelining
recomputation mode.
Note that this layer has no effect when used with the
`RecomputationMode.RecomputeThenBackpropagate` pipelining
recomputation mode.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def call(self, inputs, **kwargs):
"""
Checkpoint the input tensors.
Args:
inputs: A tensor or a structure of tensors which should be checkpointed.
Returns:
A tensor or a structure of tensors which matches shape and type of
`inputs`.
"""
return pipelining_ops.recomputation_checkpoint(inputs, name=self.name)
def get_config(self):
return {}
|
#
# PySNMP MIB module ZYXEL-BRIDGE-CONTROL-PROTOCOL-TRANSPARENCY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ZYXEL-BRIDGE-CONTROL-PROTOCOL-TRANSPARENCY-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:43:05 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint")
dot1dBasePort, = mibBuilder.importSymbols("BRIDGE-MIB", "dot1dBasePort")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Bits, ObjectIdentity, MibIdentifier, NotificationType, iso, Unsigned32, Counter32, TimeTicks, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, IpAddress, Counter64, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "ObjectIdentity", "MibIdentifier", "NotificationType", "iso", "Unsigned32", "Counter32", "TimeTicks", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "IpAddress", "Counter64", "ModuleIdentity")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
esMgmt, = mibBuilder.importSymbols("ZYXEL-ES-SMI", "esMgmt")
zyxelBridgeControlProtocolTransparency = ModuleIdentity((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15))
if mibBuilder.loadTexts: zyxelBridgeControlProtocolTransparency.setLastUpdated('201207010000Z')
if mibBuilder.loadTexts: zyxelBridgeControlProtocolTransparency.setOrganization('Enterprise Solution ZyXEL')
zyxelBridgeControlProtocolTransparencySetup = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15, 1))
zyBridgeControlProtocolTransparencyState = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15, 1, 1), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyBridgeControlProtocolTransparencyState.setStatus('current')
zyxelBridgeControlProtocolTransparencyPortTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15, 1, 2), )
if mibBuilder.loadTexts: zyxelBridgeControlProtocolTransparencyPortTable.setStatus('current')
zyxelBridgeControlProtocolTransparencyPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15, 1, 2, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"))
if mibBuilder.loadTexts: zyxelBridgeControlProtocolTransparencyPortEntry.setStatus('current')
zyBridgeControlProtocolTransparencyPortMode = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("peer", 0), ("tunnel", 1), ("discard", 2), ("network", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyBridgeControlProtocolTransparencyPortMode.setStatus('current')
mibBuilder.exportSymbols("ZYXEL-BRIDGE-CONTROL-PROTOCOL-TRANSPARENCY-MIB", zyxelBridgeControlProtocolTransparencySetup=zyxelBridgeControlProtocolTransparencySetup, zyxelBridgeControlProtocolTransparency=zyxelBridgeControlProtocolTransparency, PYSNMP_MODULE_ID=zyxelBridgeControlProtocolTransparency, zyxelBridgeControlProtocolTransparencyPortTable=zyxelBridgeControlProtocolTransparencyPortTable, zyxelBridgeControlProtocolTransparencyPortEntry=zyxelBridgeControlProtocolTransparencyPortEntry, zyBridgeControlProtocolTransparencyPortMode=zyBridgeControlProtocolTransparencyPortMode, zyBridgeControlProtocolTransparencyState=zyBridgeControlProtocolTransparencyState)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Bitcash documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 20 15:41:44 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from bitcash import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.githubpages',
'sphinxcontrib.fulltoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Bitcash'
copyright = '2017, Ofek Lev'
author = 'Ofek Lev'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['sidebarintro.html', 'sourcelink.html', 'searchbox.html',
'hacks.html'],
'**': ['sidebarlogo.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html', 'hacks.html']
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'show_powered_by': False,
'github_user': 'ofek',
'github_repo': 'bitcash',
'github_banner': True,
'show_related': False
}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bitcashdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Bitcash.tex', 'Bitcash Documentation',
'Ofek Lev', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bitcash', 'Bitcash Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Bitcash', 'Bitcash Documentation',
author, 'Bitcash', 'One line description of project.',
'Miscellaneous'),
]
|
import geopandas as gpd
# Networkx werkt erg traag
gdf = gpd.read_file(r"C:\Users\bruno\Downloads\snelwegen_provincie.geojson")
gdf
|
import socket
import sys
ESP_IP = '192.168.7.1'
PORT = 10000
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('try to connect')
sock.connect((ESP_IP, PORT))
print('connected...')
data = sock.recv(255)
print('msg: ', data.decode())
sock.close()
|
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_config import types
from oslo_utils import netutils
class HostAddressPortOpt(cfg.Opt):
"""Option for HostAddressPortType.
Accept hostname or ip address with TCP/IP port number.
"""
def __init__(self, name, **kwargs):
ip_port_type = HostAddressPortType()
super(HostAddressPortOpt, self).__init__(name,
type=ip_port_type,
**kwargs)
class HostAddressPortType(types.HostAddress):
"""HostAddress with additional port."""
def __init__(self, version=None):
type_name = 'ip and port value'
super(HostAddressPortType, self).__init__(version, type_name=type_name)
def __call__(self, value):
addr, port = netutils.parse_host_port(value)
# NOTE(gmann): parse_host_port() return port as None if no port is
# supplied in value so setting port as string for correct
# parsing and error otherwise it will not be parsed for NoneType.
port = 'None' if port is None else port
addr = self.validate_addr(addr)
port = self._validate_port(port)
if not addr and not port:
raise ValueError('%s is not valid ip with optional port')
return '%s:%d' % (addr, port)
@staticmethod
def _validate_port(port):
return types.Port()(port)
def validate_addr(self, addr):
try:
addr = self.ip_address(addr)
except ValueError:
try:
addr = self.hostname(addr)
except ValueError:
raise ValueError("%s is not a valid host address", addr)
return addr
|
#!/usr/bin/env python3
import rospy
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import numpy as np
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5, 5))
kernel1= cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
aratio = 1.0
def nothing(x):
pass
# *********************************************************************************************************************
def adjust_gamma(image, gamma=1.0):
if gamma == 0:
gamma = 0.01
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(image, table)
# *********************************************************************************************************************
img1= np.zeros((300, 512, 3), np.uint8)
cv2.namedWindow('GAMMA')
cv2.createTrackbar('g', 'GAMMA', 1, 10, nothing)
def callback(data):
global aratio
br = CvBridge()
frame1 = br.imgmsg_to_cv2(data)
frame1 = cv2.cvtColor(frame1, cv2.COLOR_RGB2BGR)
frame = frame1
gamma = (cv2.getTrackbarPos('g', 'GAMMA')) * 0.1
cv2.imshow('GAMMA', img1)
frame = adjust_gamma(frame, gamma=gamma)
cv2.putText(frame, "g={}".format(gamma), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3)
#cv2.imshow("camera", frame)
hsv = frame
hsv = cv2.cvtColor(hsv, cv2.COLOR_BGR2HSV) #RGB reading
hsv = cv2.GaussianBlur(hsv, (5, 5), 0)
# define range of yellow color in HSV
lower_yellow = np.array([29, 86, 6])
upper_yellow = np.array([64, 255, 255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel1)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel1)
mask = cv2.erode(mask, kernel, iterations=2)
mask = cv2.dilate(mask, kernel1, iterations=13)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame, frame, mask=mask)
# BOUNDING RECTANGLE .............................................................................................
conts, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
conts = np.array(conts)
if len(conts) > 0:
for i, contour in enumerate(conts):
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
aratio = (rect[1][0] / rect[1][1])
if (aratio > 0.9) and (aratio < 1.1):
cv2.drawContours(frame, [box], 0, (0, 0, 255), 2)
#print("Aspect Ratio", aratio)
# HOUGH CIRCLES........................................................................................................
gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 200, param1=255, param2=20, minRadius=0, maxRadius=0)
# # print circles
# ensure at least some circles were found
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
# loop over the (x, y) coordinates and radius of the circles
for (x, y, r) in circles:
# draw the circle in the output image, then draw a rectangle in the image
# corresponding to the center of the circle
if (aratio > 0.9) and (aratio < 1.1):
cv2.circle(res, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(res, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
cv2.putText(frame, "BALL DETECTED", (430, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
(255, 0, 0),
3)
# DISPLAY................................................................................................................
cv2.putText(frame1, "ORIGINAL FRAME", (10, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)
cv2.putText(frame, "OUTPUT FRAME", (10, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)
cv2.putText(res, "RESULTANT", (10, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)
mask = cv2.cvtColor(mask,cv2.COLOR_GRAY2BGR)
horizontal1 = np.hstack([frame1,frame])
horizontal2 = np.hstack((mask,res))
vertical = np.vstack((horizontal1,horizontal2))
'''cv2.imshow('GAMMA CORRECTED', frame)
cv2.imshow('MASK', mask)
cv2.imshow('RESULT', res)
cv2.imshow('ORIGINAL FRAME', frame1)'''
cv2.putText(vertical, "MASK", (10, 940), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)
cv2.imshow('RESULT', vertical)
# .....................................................................................................................
k = cv2.waitKey(5) & 0xFF
if k == 27:
quit()
def listener():
rospy.init_node('listener', anonymous=True,disable_signals=True)
rospy.Subscriber('/d435/camera/color/image_raw', Image, callback)
rospy.spin()
cv2.destroyAllWindows()
if __name__ == '__main__':
listener()
|
'''
Openstack App for Splunk
Copyright (c) 2017, Great Software Laboratory Private Limited.
All rights reserved.
Contributor: Vikas Sanap [vikas.sanap@gslab.com], Basant Kumar [basant.kumar@gslab.com]
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the "Great Software Laboratory Private Limited" nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
#!/usr/bin/python
'''
This script fetches routers stats information from OpenStack API
Author: Basant Kumar, GSLab
'''
#Import from standard libraries
import sys
import argparse
import requests
from authentication import *
from pprint import pprint
import json
import os
#Import from own classes
from dict_operations import *
from credentials import *
def main():
#Variable declaration
app_name = 'openstack_app'
user_name = ''
password = ''
session_key = sys.stdin.readline().strip()
splunk_credential = credential(app_name,user_name)
user_name,password = splunk_credential.getPassword(session_key)
base_url = ''
running_routers_count = 0
stopped_routers_count = 0
auth_token,auth_services = login(user_name,password)
for service in auth_services['token']['catalog']:
if service['name'] == 'neutron':
base_url = service['endpoints'][2]['url']
headers = {'content-type': 'application/json','X-Auth-Token':auth_token}
response = requests.get(base_url + '/v2.0/routers.json',headers=headers).json();
for router in response['routers']:
if router["status"] == 'ACTIVE':
running_routers_count = running_routers_count + 1
else:
stopped_routers_count = stopped_routers_count + 1
#Print console line with routers stats information
print "running_routers_count="+str(running_routers_count)+",stopped_routers_count="+str(stopped_routers_count)
if __name__ == "__main__":
main()
|
#line = r'''execute if score waveGlowTimer glowTimer matches %s run tag @e[type=!player,type=!dolphin,distance=%s,nbt={Attributes:[{Name:"generic.attackDamage"}]},nbt=!{Glowing: 1b}] add madeGlowing'''
#type=!player,type=!dolphin,distance=16..20,nbt={Attributes:[{Name:"generic.attackDamage"}]},nbt=!{Glowing: 1b}
line = r'''execute if score waveGlowTimer glowTimer matches %s if entity @a[distance=%s] run tag @s add madeGlowing'''
bandDistance = 4
bandDuration = 0
minDistance = 16
maxDistance = 64
timeMod = (3 * bandDistance)
distMod = maxDistance - minDistance
def dotdotspan(start, end):
if start != end:
return "%s..%s" % (start, end)
return str(start)
maxDistance += (minDistance - maxDistance) % timeMod
print("#NOTE: The conditions for waveGlowTimer wrapping in 'dotick' must be made to match the maximum count in this file (%r)" % (timeMod - 1,))
#print(r'''tag @e[type=!player,type=!dolphin,distance=..%s,nbt={Attributes:[{Name:"generic.attackDamage"}]},nbt=!{Glowing: 1b}] add madeGlowing''' % (minDistance-1,))
print(r'''execute if entity @a[distance=%s] run tag @s add madeGlowing''' % (minDistance-1,))
for ii, dd in enumerate(range(minDistance, maxDistance)):
startTime = ii % timeMod
endTime = (startTime + bandDuration) % timeMod
startDist = (dd - minDistance) % distMod + minDistance
endDist = (startDist + bandDistance - minDistance) % distMod + minDistance
if endTime != startTime + bandDuration:
if endDist != startDist + bandDistance:
print(line % (dotdotspan(startTime, timeMod-1), dotdotspan(startDist, distMod-1+minDistance)))
print(line % (dotdotspan(0, endTime), dotdotspan(startDist, distMod-1+minDistance)))
print(line % (dotdotspan(startTime, timeMod-1), dotdotspan(minDistance, endDist)))
print(line % (dotdotspan(0, endTime), dotdotspan(minDistance, endDist)))
else:
print(line % (dotdotspan(startTime, timeMod-1), dotdotspan(dd, dd + bandDistance)))
print(line % (dotdotspan(0, endTime), dotdotspan(dd, dd + bandDistance)))
else:
if endDist != startDist + bandDistance:
print(line % (dotdotspan(startTime, endTime), dotdotspan(startDist, distMod-1+minDistance)))
print(line % (dotdotspan(startTime, endTime), dotdotspan(minDistance, endDist)))
else:
print(line % (dotdotspan(startTime, endTime), dotdotspan(dd, dd + bandDistance)))
|
import os
import re
import secrets
import string
import pulumi
from pulumi import ResourceOptions
from pulumi_kubernetes.apps.v1 import Deployment
from pulumi_kubernetes.core.v1 import Service
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication, KeyVaultId
from azure.common.credentials import ServicePrincipalCredentials
def normalize_name(name):
regex = re.compile('[^a-zA-Z0-9]')
replaced = regex.sub('', name)
normalized = replaced[:23] if len(replaced) > 23 else replaced
return normalized
def _get_kvclient():
def auth_callback(server, resource, scope):
credentials = ServicePrincipalCredentials(
client_id = os.getenv('ARM_CLIENT_ID'),
secret = os.getenv('ARM_CLIENT_SECRET'),
tenant = os.getenv('ARM_TENANT_ID'),
resource = "https://vault.azure.net"
)
token = credentials.token
return token['token_type'], token['access_token']
kv_client = KeyVaultClient(KeyVaultAuthentication(auth_callback))
return kv_client
def get_kv_secret(name):
kv_client = _get_kvclient()
secret = kv_client.get_secret("https://placeholder.vault.azure.net/", name, KeyVaultId.version_none).value
return secret
def _get_password():
alphabet = string.ascii_letters + string.digits
password = ''.join(secrets.choice(alphabet) for i in range(20))
return password
config = pulumi.Config('aks')
PREFIX = pulumi.get_stack()
PASSWORD = config.get('password') or _get_password()
SSHKEY = config.get('sshkey') or 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCxinIAIDDCradZPAgX5GzBLv00u4rigOLUbU00E44FrfMTqu5wXiejJ4ycSb1bI+//ZNgaB2UYRbPL7A9OUKY+K4sX5O84Q6DPMjo/90IANHVTLf3xTaSc7hpvXOtIjJTJeiamxClgnTAcR55RV/j9/Wptxa8GGcRmRCcSmJUkx5AZTFI+s8aF0W3aeHHRw7TxNKBuwrX7FDcHyGKvdkFg4OP863Xe5hp5ql1C3XibmCOp1CMPIU2hCmGOy1LGbOf/Pa+QKAdtUSrPNK/jBWvPWo0k02Ii0JtMAdlpVqnJc3czNIp5gEqZCRCGEdkb/kZnJiMRZhmLBYnC8tiMxvZj core@k8s'
LOCATION = config.get('location') or 'westeurope'
NAMESPACE = config.get('namespace') or 'flux'
args_flux = [
"--ssh-keygen-dir=/var/fluxd/keygen",
"--k8s-secret-name=flux-ssh",
"--memcached-hostname=memcached",
"--memcached-service=",
"--git-url=git@ssh.dev.azure.com:v3/xxxxxx",
"--git-branch=master",
"--git-path=flux/cluster-setup,flux/{}".format(PREFIX),
"--git-user=Weave Flux",
"--git-email=support@weave.works",
"--git-set-author=false",
"--git-poll-interval=5m",
"--git-label={}".format(PREFIX),
"--git-timeout=20s",
"--sync-interval=5m",
"--git-ci-skip=false",
"--registry-exclude-image=*",
"--registry-poll-interval=5m",
"--registry-rps=200",
"--registry-burst=125",
"--registry-trace=false"
]
args_memcached = ["-m 64","-p 11211","-I 1m"]
volumeMounts_flux = [
{
"name": "kubedir",
"mountPath": "/root/.kubectl"
},
{
"name": "git-key",
"mountPath": "/etc/fluxd/ssh",
"readOnly": True
},
{
"name": "git-keygen",
"mountPath": "/var/fluxd/keygen"
}
]
volumes_flux = [
{
"name": "kubedir",
"configmap": {
"name": "flux-configmap"
}
},
{
"name": "git-key",
"secret": {
"secretName": "flux-ssh",
"defaultMode": 0o400 # has to be in octal
}
},
{
"name": "git-keygen",
"emptyDir": {
"medium": "Memory"
}
}
]
def _gen_service(name, ports, custom_provider, dependencies=[], service_type="ClusterIP"):
ports = [{"port": port, "target_port": port,
"name": str(port)} for port in ports]
labels = {
"app": name,
"purpose": "flux"
}
Service(name,
metadata={
"name": name,
"labels": labels,
"namespace": NAMESPACE
},
spec={
"ports": ports,
"selector": labels,
"type": service_type,
"sessionAffinity": "ClientIP"
},
__opts__=ResourceOptions(
provider=custom_provider, depends_on=dependencies)
)
def _gen_deployment(name, ports, image, custom_provider, serviceAccount, args=[], dependencies=[],
replicas=1, resources={}, env={}, volumes=[], volume_mounts=[]):
keys = ['container_port']
ports = [dict.fromkeys(keys, port) for port in ports]
labels = {
"app": name,
"purpose": "flux"
}
container = {
"name": name,
"image": image,
"imagePullPolicy": "Always",
"resources": resources,
"ports": ports,
"args": args,
"env": [
{
"name": "KUBECONFIG",
"value": "/root/.kubectl/config"
}
],
"volumeMounts": volume_mounts
}
Deployment(name,
metadata={
"name": name,
"labels": labels,
"namespace": NAMESPACE
},
spec={
"selector": {
"match_labels": labels
},
"replicas": replicas,
"template": {
"metadata": {
"labels": labels
},
"spec": {
"containers": [
container
],
"serviceAccount": serviceAccount,
"volumes": volumes
}
}
},
__opts__=ResourceOptions(
provider=custom_provider, depends_on=dependencies)
)
def gen_application(name, ports, image, customProvider, dependencies=[], serviceAccount="default", volumes=False, volumeMounts=False):
args = globals()["args_{}".format(name)]
if volumes:
volumes = globals()["volumes_{}".format(name)]
else:
volumes = []
if volumeMounts:
volumeMounts = globals()["volumeMounts_{}".format(name)]
else:
volumeMounts = []
_gen_service(name, ports, customProvider)
_gen_deployment(name, ports, image, customProvider, serviceAccount, args=args, dependencies=dependencies, volumes=volumes, volume_mounts=volumeMounts)
|
#!/usr/bin/env python
import argparse
from .database import YamlDatabase as DB
from . import utils
def cli():
parser = argparse.ArgumentParser()
parser.add_argument('-S', '--scope', default='directory', help="flag scope")
parser.add_argument('-F', '--output-format', default='yaml', dest='format', help="output format")
parser.add_argument('-g', '--get', help="get a value")
parser.add_argument('-s', '--set', help="set a value")
parser.add_argument('-v', '--value', help="set a value")
parser.add_argument('-d', '--dump', action="store_true", help="dump the database")
args = parser.parse_args()
db = DB(scope=args.scope)
if args.get:
utils.print_formatted_message(db.get(query=args.get), format=args.format)
elif args.set:
utils.print_formatted_message(db.set(query=args.set, value=args.value), format=args.format)
elif args.dump:
utils.print_formatted_message(db.dump(), format=args.format)
|
# coding: utf-8
from __future__ import unicode_literals
from spacy.lang.da import Danish
import pytest
@pytest.fixture(scope="session")
def da_nlp():
return Danish()
@pytest.mark.parametrize(
"string,lemma",
[
("affaldsgruppernes", "affaldsgruppe"),
("detailhandelsstrukturernes", "detailhandelsstruktur"),
("kolesterols", "kolesterol"),
("åsyns", "åsyn"),
],
)
def test_da_lemmatizer_lookup_assigns(da_nlp, string, lemma):
tokens = da_nlp(string)
assert tokens[0].lemma_ == lemma
@pytest.mark.parametrize(
"text,norm", [("akvarium", "akvarie"), ("bedstemoder", "bedstemor")]
)
def test_da_nlp_norm_exceptions(da_nlp, text, norm):
tokens = da_nlp(text)
assert tokens[0].norm_ == norm
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libcircle(AutotoolsPackage):
"""libcircle provides an efficient distributed queue on a cluster,
using self-stabilizing work stealing."""
homepage = "https://github.com/hpc/libcircle"
git = "https://github.com/hpc/libcircle.git"
url = "https://github.com/hpc/libcircle/releases/download/0.2.1-rc.1/libcircle-0.2.1-rc.1.tar.gz"
version('master', branch='master')
version('0.3.0', sha256='5ce38eb5b3c2b394bca1316310758f276c893dd3f4c15d7bc14ea05d3110ce58', url='https://github.com/hpc/libcircle/releases/download/v0.3/libcircle-0.3.0.tar.gz')
version('0.2.1-rc.1', sha256='5747f91cf4417023304dcc92fd07e3617ac712ca1eeb698880979bbca3f54865')
depends_on('mpi')
@when('@master')
def autoreconf(self, spec, prefix):
with working_dir(self.configure_directory):
# Bootstrap with autotools
bash = which('bash')
bash('./autogen.sh')
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2018-2019 NVIDIA CORPORATION. All rights reserved.
"""Centralized catalog of paths."""
import os
class DatasetCatalog(object):
DATA_DIR = os.environ['DATA_DIR']
DATASETS = {
"coco_2017_train": {
"img_dir": "train2017",
"ann_file": "annotations/instances_train2017.json"
},
"coco_2017_val": {
"img_dir": "val2017",
"ann_file": "annotations/instances_val2017.json"
},
"coco_2014_train": {
"img_dir": "coco_train2014",
"ann_file": "annotations/instances_train2014.json"
},
"coco_2014_val": {
"img_dir": "coco_val2014",
"ann_file": "annotations/instances_val2014.json"
},
"coco_2014_minival": {
"img_dir": "coco_val2014",
"ann_file": "annotations/instances_minival2014.json"
},
"coco_2014_valminusminival": {
"img_dir": "coco_val2014",
"ann_file": "annotations/instances_valminusminival2014.json"
},
"voc_2007_train": {
"data_dir": "voc/VOC2007",
"split": "train"
},
"voc_2007_train_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_train2007.json"
},
"voc_2007_val": {
"data_dir": "voc/VOC2007",
"split": "val"
},
"voc_2007_val_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_val2007.json"
},
"voc_2007_test": {
"data_dir": "voc/VOC2007",
"split": "test"
},
"voc_2007_test_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_test2007.json"
},
"voc_2012_train": {
"data_dir": "voc/VOC2012",
"split": "train"
},
"voc_2012_train_cocostyle": {
"img_dir": "voc/VOC2012/JPEGImages",
"ann_file": "voc/VOC2012/Annotations/pascal_train2012.json"
},
"voc_2012_val": {
"data_dir": "voc/VOC2012",
"split": "val"
},
"voc_2012_val_cocostyle": {
"img_dir": "voc/VOC2012/JPEGImages",
"ann_file": "voc/VOC2012/Annotations/pascal_val2012.json"
},
"voc_2012_test": {
"data_dir": "voc/VOC2012",
"split": "test"
# PASCAL VOC2012 doesn't made the test annotations available, so there's no json annotation
},
"cityscapes_fine_instanceonly_seg_train_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_train.json"
},
"cityscapes_fine_instanceonly_seg_val_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_val.json"
},
"cityscapes_fine_instanceonly_seg_test_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_test.json"
}
}
@staticmethod
def get(name):
if "coco" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
root=os.path.join(data_dir, attrs["img_dir"]),
ann_file=os.path.join(data_dir, attrs["ann_file"]),
)
return dict(
factory="COCODataset",
args=args,
)
elif "voc" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
data_dir=os.path.join(data_dir, attrs["data_dir"]),
split=attrs["split"],
)
return dict(
factory="PascalVOCDataset",
args=args,
)
raise RuntimeError("Dataset not available: {}".format(name))
class ModelCatalog(object):
S3_C2_DETECTRON_URL = "https://dl.fbaipublicfiles.com/detectron"
C2_IMAGENET_MODELS = {
"MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
"MSRA/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
"MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
"MSRA/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
"FAIR/20171220/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
}
C2_DETECTRON_SUFFIX = "output/train/coco_2014_train%3Acoco_2014_valminusminival/generalized_rcnn/model_final.pkl"
C2_DETECTRON_MODELS = {
"35857197/e2e_faster_rcnn_R-50-C4_1x": "01_33_49.iAX0mXvW",
"35857345/e2e_faster_rcnn_R-50-FPN_1x": "01_36_30.cUF7QR7I",
"35857890/e2e_faster_rcnn_R-101-FPN_1x": "01_38_50.sNxI7sX7",
"36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "06_31_39.5MIHi1fZ",
"35858791/e2e_mask_rcnn_R-50-C4_1x": "01_45_57.ZgkA7hPB",
"35858933/e2e_mask_rcnn_R-50-FPN_1x": "01_48_14.DzEQe4wC",
"35861795/e2e_mask_rcnn_R-101-FPN_1x": "02_31_37.KqyEK4tT",
"36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "06_35_59.RZotkLKI",
}
@staticmethod
def get(name):
if name.startswith("Caffe2Detectron/COCO"):
return ModelCatalog.get_c2_detectron_12_2017_baselines(name)
if name.startswith("ImageNetPretrained"):
return ModelCatalog.get_c2_imagenet_pretrained(name)
raise RuntimeError("model not present in the catalog {}".format(name))
@staticmethod
def get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_URL
name = name[len("ImageNetPretrained/"):]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
url = "/".join([prefix, name])
return url
@staticmethod
def get_c2_detectron_12_2017_baselines(name):
# Detectron C2 models are stored following the structure
# prefix/<model_id>/2012_2017_baselines/<model_name>.yaml.<signature>/suffix
# we use as identifiers in the catalog Caffe2Detectron/COCO/<model_id>/<model_name>
prefix = ModelCatalog.S3_C2_DETECTRON_URL
suffix = ModelCatalog.C2_DETECTRON_SUFFIX
# remove identification prefix
name = name[len("Caffe2Detectron/COCO/"):]
# split in <model_id> and <model_name>
model_id, model_name = name.split("/")
# parsing to make it match the url address from the Caffe2 models
model_name = "{}.yaml".format(model_name)
signature = ModelCatalog.C2_DETECTRON_MODELS[name]
unique_name = ".".join([model_name, signature])
url = "/".join([prefix, model_id, "12_2017_baselines", unique_name, suffix])
return url
|
import tensorflow.compat.v1 as tf
#from tensorflow.contrib import slim
import tf_slim as slim
from avod.core.avod_fc_layers import avod_fc_layer_utils
def build(fc_layers_config,
input_rois, input_weights,
num_final_classes, box_rep,
is_training,
end_points_collection):
"""Builds basic layers
Args:
fc_layers_config: Fully connected layers config object
input_rois: List of input roi feature maps
input_weights: List of weights for each input e.g. [1.0, 1.0]
num_final_classes: Final number of output classes, including
'Background'
box_rep: Box representation (e.g. 'box_3d', 'box_8c', 'box_4c')
is_training: Whether the network is training or evaluating
end_points_collection: End points collection to add entries to
Returns:
cls_logits: Output classification logits
offsets: Output offsets
angle_vectors: Output angle vectors (or None)
end_points: End points dict
"""
# Parse config
fusion_method = fc_layers_config.fusion_method
num_layers = fc_layers_config.num_layers
layer_sizes = fc_layers_config.layer_sizes
l2_weight_decay = fc_layers_config.l2_weight_decay
keep_prob = fc_layers_config.keep_prob
cls_logits, offsets, angle_vectors = \
_basic_fc_layers(num_layers=num_layers,
layer_sizes=layer_sizes,
input_rois=input_rois,
input_weights=input_weights,
fusion_method=fusion_method,
l2_weight_decay=l2_weight_decay,
keep_prob=keep_prob,
num_final_classes=num_final_classes,
box_rep=box_rep,
is_training=is_training)
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
return cls_logits, offsets, angle_vectors, end_points
def build_output_layers(tensor_in,
num_final_classes,
box_rep,
output):
"""Builds flattened output layers
Args:
tensor_in: Input tensor
num_final_classes: Final number of output classes, including
'Background'
box_rep: Box representation (e.g. 'box_3d', 'box_8c', 'box_4c')
Returns:
Output layers
"""
layer_out = None
if output == 'cls':
# Classification
layer_out = slim.fully_connected(tensor_in,
num_final_classes,
activation_fn=None,
scope='cls_out')
elif output == 'off':
# Offsets
off_out_size = avod_fc_layer_utils.OFFSETS_OUTPUT_SIZE[box_rep]
if off_out_size > 0:
layer_out = slim.fully_connected(tensor_in,
off_out_size,
activation_fn=None,
scope='off_out')
else:
layer_out = None
elif output == 'ang':
# Angle Unit Vectors
ang_out_size = avod_fc_layer_utils.ANG_VECS_OUTPUT_SIZE[box_rep]
if ang_out_size > 0:
layer_out = slim.fully_connected(tensor_in,
ang_out_size,
activation_fn=None,
scope='ang_out')
else:
layer_out = None
return layer_out
def _basic_fc_layers(num_layers, layer_sizes,
input_rois, input_weights, fusion_method,
l2_weight_decay, keep_prob,
num_final_classes, box_rep,
is_training):
if not num_layers == len(layer_sizes):
raise ValueError('num_layers does not match length of layer_sizes')
if l2_weight_decay > 0:
weights_regularizer = slim.l2_regularizer(l2_weight_decay)
else:
weights_regularizer = None
# Feature fusion
fused_features = avod_fc_layer_utils.feature_fusion(fusion_method,
input_rois,
input_weights)
output_names = ['cls', 'off', 'ang']
cls_logits = None
offsets = None
angles = None
with slim.arg_scope(
[slim.fully_connected],
weights_regularizer=weights_regularizer):
for output in output_names:
# Flatten
fc_drop = slim.flatten(fused_features,
scope=output + '_flatten')
for layer_idx in range(num_layers):
fc_name_idx = 6 + layer_idx
# Use conv2d instead of fully_connected layers.
fc_layer = slim.fully_connected(fc_drop, layer_sizes[layer_idx],
scope=output + '_fc{}'.format(fc_name_idx))
fc_drop = slim.dropout(fc_layer,
keep_prob=keep_prob,
is_training=is_training,
scope=output + '_fc{}_drop'.format(fc_name_idx))
fc_name_idx += 1
if output == 'cls':
cls_logits= build_output_layers(fc_drop,
num_final_classes,
box_rep,
output)
elif output == 'off':
offsets = build_output_layers(fc_drop,
num_final_classes,
box_rep,
output)
elif output == 'ang':
angles = build_output_layers(fc_drop,
num_final_classes,
box_rep,
output)
return cls_logits, offsets, angles
|
from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from .util import *
from .darknet import Darknet
from .preprocess import prep_image, inp_to_image, letterbox_image
import pandas as pd
import random
import pickle as pkl
import argparse
def get_test_input(input_dim, CUDA):
img = cv2.imread("dog-cycle-car.png")
img = cv2.resize(img, (input_dim, input_dim))
img_ = img[:,:,::-1].transpose((2,0,1))
img_ = img_[np.newaxis,:,:,:]/255.0
img_ = torch.from_numpy(img_).float()
img_ = Variable(img_)
if CUDA:
img_ = img_.cuda()
return img_
def prep_image(img, inp_dim):
"""
Prepare image for inputting to the neural network.
Returns a Variable
"""
orig_im = img
dim = orig_im.shape[1], orig_im.shape[0]
img = (letterbox_image(orig_im, (inp_dim, inp_dim)))
img_ = img[:,:,::-1].transpose((2,0,1)).copy()
img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)
return img_, orig_im, dim
def write(x, img):
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
label = "{0}".format(classes[cls])
color = random.choice(colors)
cv2.rectangle(img, c1, c2,color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2,color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);
return img
def arg_parse():
"""
Parse arguements to the detect module
"""
parser = argparse.ArgumentParser(description='YOLO v2 Video Detection Module')
parser.add_argument("--video", dest = 'video', help =
"Video to run detection upon",
default = "video.avi", type = str)
parser.add_argument("--dataset", dest = "dataset", help = "Dataset on which the network has been trained", default = "pascal")
parser.add_argument("--confidence", dest = "confidence", help = "Object Confidence to filter predictions", default = 0.5)
parser.add_argument("--nms_thresh", dest = "nms_thresh", help = "NMS Threshhold", default = 0.4)
parser.add_argument("--cfg", dest = 'cfgfile', help =
"Config file",
default = "cfg/yolov3-spp.cfg", type = str)
parser.add_argument("--weights", dest = 'weightsfile', help =
"weightsfile",
default = "yolov3-spp.weights", type = str)
parser.add_argument("--reso", dest = 'reso', help =
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default = "416", type = str)
return parser.parse_args()
if __name__ == '__main__':
args = arg_parse()
confidence = float(args.confidence)
nms_thesh = float(args.nms_thresh)
start = 0
CUDA = torch.cuda.is_available()
CUDA = torch.cuda.is_available()
num_classes = 80
bbox_attrs = 5 + num_classes
print("Loading network.....")
model = Darknet(args.cfgfile)
model.load_weights(args.weightsfile)
print("Network successfully loaded")
model.net_info["height"] = args.reso
inp_dim = int(model.net_info["height"])
assert inp_dim % 32 == 0
assert inp_dim > 32
if CUDA:
model.cuda().half()
model(get_test_input(inp_dim, CUDA), CUDA)
model.eval()
videofile = 'video.avi'
cap = cv2.VideoCapture(videofile)
assert cap.isOpened(), 'Cannot capture source'
frames = 0
start = time.time()
while cap.isOpened():
ret, frame = cap.read()
if ret:
img, orig_im, dim = prep_image(frame, inp_dim)
im_dim = torch.FloatTensor(dim).repeat(1,2)
if CUDA:
img = img.cuda().half()
im_dim = im_dim.half().cuda()
write_results = write_results_half
predict_transform = predict_transform_half
output = model(Variable(img, volatile = True), CUDA)
output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh)
if type(output) == int:
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
cv2.imshow("frame", orig_im)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
continue
im_dim = im_dim.repeat(output.size(0), 1)
scaling_factor = torch.min(inp_dim/im_dim,1)[0].view(-1,1)
output[:,[1,3]] -= (inp_dim - scaling_factor*im_dim[:,0].view(-1,1))/2
output[:,[2,4]] -= (inp_dim - scaling_factor*im_dim[:,1].view(-1,1))/2
output[:,1:5] /= scaling_factor
for i in range(output.shape[0]):
output[i, [1,3]] = torch.clamp(output[i, [1,3]], 0.0, im_dim[i,0])
output[i, [2,4]] = torch.clamp(output[i, [2,4]], 0.0, im_dim[i,1])
classes = load_classes('data/coco.names')
colors = pkl.load(open("pallete", "rb"))
list(map(lambda x: write(x, orig_im), output))
cv2.imshow("frame", orig_im)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
else:
break
|
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2016 Abram Hindle, https://github.com/tywtyw2002, and https://github.com/treedust
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do not use urllib's HTTP GET and POST mechanisms.
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import sys
import socket
import re
# you may use urllib to encode data appropriately
from urllib.parse import urlparse
def help():
print("httpclient.py [GET/POST] [URL]\n")
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
class HTTPClient(object):
#def get_host_port(self,url):
def connect(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
return None
def get_code(self, data):
return int(data.splitlines()[0].split()[1])
def get_headers(self,data):
header = data.split("\r\n\r\n")[0].splitlines()
return " ".join(header[0].split()[1:]) + "\r\n" + "\r\n".join(header[1:]) + "\r\n"
def get_body(self, data):
return data.split("\r\n\r\n")[1]
def sendall(self, data):
self.socket.sendall(data.encode('utf-8'))
def close(self):
self.socket.close()
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
while not done:
part = sock.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
return buffer.decode('utf-8')
def GET(self, url, args=None):
code = 500
body = ""
parsed_url = urlparse(url)
host = parsed_url.hostname
port = parsed_url.port
if not port:
if parsed_url.scheme.lower() == 'http':
port = 80
else:
port = 443
path = parsed_url.path if parsed_url.path else "/"
if parsed_url.query:
path += "?"
path += parsed_url.query
self.connect(host, port)
request = "GET {} HTTP/1.1\r\n".format(path)
request += "Host: {}\r\n".format(host)
request += "Accept: */*\r\n"
request += "Connection: close\r\n\r\n"
#print(request)
self.sendall(request)
# print("Request Sent")
response = self.recvall(self.socket)
# print("Response Recieved")
self.close()
code = self.get_code(response)
body = self.get_body(response)
header = self.get_headers(response)
print("\n#####Response Header#####")
print(header)
print("#######################\n")
print("\n*****Response Body*****")
print(body)
print("***********************\n")
return HTTPResponse(code, body)
def POST(self, url, args=None):
code = 500
body = ""
content = ""
parsed_url = urlparse(url)
host = parsed_url.hostname
port = parsed_url.port
if not port:
if parsed_url.scheme.lower() == 'http':
port = 80
else:
port = 443
path = parsed_url.path if parsed_url.path else "/"
if args:
content = ""
for key, value in args.items():
content += "{}={}&".format(key, value)
content = content[:-1]
content_len = len(content)
self.connect(host, port)
request = "POST {} HTTP/1.1\r\n".format(path)
request += "Host: {}\r\n".format(host)
request += "Content-Type: {}\r\n".format("application/x-www-form-urlencoded")
request += "Content-Length: {}\r\n\r\n".format(content_len)
request += "{}\r\n\r\n".format(content)
self.sendall(request)
response = self.recvall(self.socket)
self.close()
code = self.get_code(response)
body = self.get_body(response)
header = self.get_headers(response)
print("\n#####Response Header#####")
print(header)
print("#######################\n")
print("\n*****Response Body*****")
print(body)
print("***********************\n")
return HTTPResponse(code, body)
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print(client.command( sys.argv[2], sys.argv[1] ))
else:
print(client.command( sys.argv[1] ))
|
#!/usr/bin/python
########################################################################################################################
#
# Copyright (c) 2014, Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################################################################################
"""DES library
"""
import laygo
import numpy as np
#from logic_layout_generator import *
from math import log
import yaml
import os
#import logging;logging.basicConfig(level=logging.DEBUG)
def generate_boundary(laygen, objectname_pfix, placement_grid,
devname_bottom, devname_top, devname_left, devname_right,
shape_bottom=None, shape_top=None, shape_left=None, shape_right=None,
transform_bottom=None, transform_top=None, transform_left=None, transform_right=None,
origin=np.array([0, 0])):
#generate a boundary structure to resolve boundary design rules
pg = placement_grid
#parameters
if shape_bottom == None:
shape_bottom = [np.array([1, 1]) for d in devname_bottom]
if shape_top == None:
shape_top = [np.array([1, 1]) for d in devname_top]
if shape_left == None:
shape_left = [np.array([1, 1]) for d in devname_left]
if shape_right == None:
shape_right = [np.array([1, 1]) for d in devname_right]
if transform_bottom == None:
transform_bottom = ['R0' for d in devname_bottom]
if transform_top == None:
transform_top = ['R0' for d in devname_top]
if transform_left == None:
transform_left = ['R0' for d in devname_left]
if transform_right == None:
transform_right = ['R0' for d in devname_right]
#bottom
dev_bottom=[]
dev_bottom.append(laygen.place("I" + objectname_pfix + 'BNDBTM0', devname_bottom[0], pg, xy=origin,
shape=shape_bottom[0], transform=transform_bottom[0]))
for i, d in enumerate(devname_bottom[1:]):
dev_bottom.append(laygen.relplace(name = "I" + objectname_pfix + 'BNDBTM'+str(i+1), templatename = d, gridname = pg, refinstname = dev_bottom[-1].name,
shape=shape_bottom[i+1], transform=transform_bottom[i+1]))
dev_left=[]
dev_left.append(laygen.relplace(name = "I" + objectname_pfix + 'BNDLFT0', templatename = devname_left[0], gridname = pg, refinstname = dev_bottom[0].name, direction='top',
shape=shape_left[0], transform=transform_left[0]))
for i, d in enumerate(devname_left[1:]):
dev_left.append(laygen.relplace(name = "I" + objectname_pfix + 'BNDLFT'+str(i+1), templatename = d, gridname = pg, refinstname = dev_left[-1].name, direction='top',
shape=shape_left[i+1], transform=transform_left[i+1]))
dev_right=[]
dev_right.append(laygen.relplace(name = "I" + objectname_pfix + 'BNDRHT0', templatename = devname_right[0], gridname = pg, refinstname = dev_bottom[-1].name, direction='top',
shape=shape_right[0], transform=transform_right[0]))
for i, d in enumerate(devname_right[1:]):
dev_right.append(laygen.relplace(name = "I" + objectname_pfix + 'BNDRHT'+str(i+1), templatename = d, gridname = pg, refinstname = dev_right[-1].name, direction='top',
shape=shape_right[i+1], transform=transform_right[i+1]))
dev_top=[]
dev_top.append(laygen.relplace(name = "I" + objectname_pfix + 'BNDTOP0', templatename = devname_top[0], gridname = pg, refinstname = dev_left[-1].name, direction='top',
shape=shape_top[0], transform=transform_top[0]))
for i, d in enumerate(devname_top[1:]):
dev_top.append(laygen.relplace(name = "I" + objectname_pfix + 'BNDTOP'+str(i+1), templatename = d, gridname = pg, refinstname = dev_top[-1].name,
shape=shape_top[i+1], transform=transform_top[i+1]))
dev_right=[]
return [dev_bottom, dev_top, dev_left, dev_right]
def generate_deserializer(laygen, objectname_pfix, templib_logic, placement_grid, routing_grid_m2m3,
routing_grid_m4m5, num_des=8, num_flop=1, m_des_dff=1, origin=np.array([0, 0])):
"""generate deserializer """
pg = placement_grid
rg_m2m3 = routing_grid_m2m3
rg_m4m5 = routing_grid_m4m5
tap_name='tap'
#ff_name = 'dff_1x'
#ff_rst_name = 'dff_strsth_1x'
ff_name = 'dff_'+str(m_des_dff)+'x'
ff_rst_name = 'dff_strsth_'+str(m_des_dff)+'x'
#Calculate layout size
x0=num_flop * (2*laygen.templates.get_template(ff_name, templib_logic).xy[1][0] + laygen.templates.get_template(ff_rst_name, templib_logic).xy[1][0]) \
+ 2*laygen.templates.get_template(tap_name, templib_logic).xy[1][0]
num_row=int((num_des/num_flop + 0.99))+1
#boundaries
m_bnd = int(x0 / laygen.templates.get_template('boundary_bottom').xy[1][0])
devname_bnd_left = []
devname_bnd_right = []
transform_bnd_left = []
transform_bnd_right = []
for i in range(num_row):
if i%2==0:
devname_bnd_left += ['nmos4_fast_left', 'pmos4_fast_left']
devname_bnd_right += ['nmos4_fast_right', 'pmos4_fast_right']
transform_bnd_left += ['R0', 'MX']
transform_bnd_right += ['R0', 'MX']
else:
devname_bnd_left += ['pmos4_fast_left', 'nmos4_fast_left']
devname_bnd_right += ['pmos4_fast_right', 'nmos4_fast_right']
transform_bnd_left += ['R0', 'MX']
transform_bnd_right += ['R0', 'MX']
[bnd_bottom, bnd_top, bnd_left, bnd_right] = generate_boundary(laygen, objectname_pfix='BND0',
placement_grid=pg,
devname_bottom=['boundary_bottomleft',
'boundary_bottom',
'boundary_bottomright'],
shape_bottom=[np.array([1, 1]), np.array([m_bnd, 1]),
np.array([1, 1])],
devname_top=['boundary_topleft', 'boundary_top',
'boundary_topright'],
shape_top=[np.array([1, 1]), np.array([m_bnd, 1]),
np.array([1, 1])],
devname_left=devname_bnd_left,
transform_left=transform_bnd_left,
devname_right=devname_bnd_right,
transform_right=transform_bnd_right,
origin=np.array([0, 0]))
#Calculate origins for placement
tap_origin = origin + laygen.get_xy(obj = bnd_bottom[0], gridname = pg) \
+ laygen.get_xy(obj = bnd_bottom[0].template, gridname = pg)
array_origin = origin + laygen.get_xy(obj = bnd_bottom[0], gridname = pg) \
+ laygen.get_xy(obj = bnd_bottom[0].template, gridname = pg) \
+ np.array([laygen.get_xy(obj=laygen.get_template(name = tap_name, libname = templib_logic), gridname = pg)[0], 0])
tapr_origin = tap_origin + m_bnd*np.array([laygen.get_xy(obj=laygen.get_template(name = 'boundary_bottom'), gridname = pg)[0], 0]) \
- np.array([laygen.get_xy(obj=laygen.get_template(name = tap_name, libname = templib_logic), gridname = pg)[0], 0])
FF0_origin = array_origin + np.array([0, laygen.get_xy(obj=laygen.get_template(name = 'inv_1x', libname = templib_logic), gridname = pg)[1]]) + \
np.array([0, laygen.get_xy(obj=laygen.get_template(name = ff_name, libname = templib_logic), gridname = pg)[1]])
# placement
iffout=[]
iffin=[]
iffdiv=[]
iclkbuf=[]
idivbuf=[]
isp1x=[]
itapl=[]
itapr=[]
tf='R0'
if num_flop == 1: #Layout height reduction factor, no reduction
for i in range(num_row):
if i%2==0: tf='R0'
else: tf='MX'
if i==0: #Row for clock buffers
itapl.append(laygen.place(name = "I" + objectname_pfix + 'TAPL0', templatename = tap_name,
gridname = pg, xy=tap_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))
itapr.append(laygen.place(name = "I" + objectname_pfix + 'TAPR0', templatename = tap_name,
gridname = pg, xy=tapr_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))
idivbuf.append(laygen.place(name = "I" + objectname_pfix + 'DIVBUF32x', templatename = 'inv_32x',
gridname = pg, xy=array_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))
idivbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'DIVBUF8x', templatename = 'inv_8x',
gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
idivbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'DIVBUF2x', templatename = 'inv_2x',
gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
idivbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'DIVBUF1x', templatename = 'inv_1x',
gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF1x', templatename = 'inv_1x',
gridname = pg, refinstname = idivbuf[3].name, transform=tf, shape=np.array([1,1]), xy=np.array([0,0]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF2x', templatename = 'inv_2x',
gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF8x', templatename = 'inv_8x',
gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF32x', templatename = 'inv_32x',
gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
else:
itapl.append(laygen.relplace(name = "I" + objectname_pfix + 'TAPL'+str(i), templatename = tap_name,
gridname = pg, refinstname = itapl[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
itapr.append(laygen.relplace(name = "I" + objectname_pfix + 'TAPR'+str(i), templatename = tap_name,
gridname = pg, refinstname = itapr[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
if i==1: #Reference FF: FFOUT1
iffout.append(laygen.place(name = "I" + objectname_pfix + 'FFOUT1', templatename = ff_name,
gridname = pg, xy=FF0_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))
else:
iffout.append(laygen.relplace(name = "I" + objectname_pfix + 'FFOUT'+str(i), templatename = ff_name,
gridname = pg, refinstname = iffout[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
refi = iffout[-1].name
iffin.append(laygen.relplace(name = "I" + objectname_pfix + 'FFIN'+str(i), templatename = ff_name,
gridname = pg, refinstname = refi, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
refi2 = iffin[-1].name
iffdiv.append(laygen.relplace(name = "I" + objectname_pfix + 'FFDIV'+str(i), templatename = ff_rst_name,
gridname = pg, refinstname = refi2, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
if num_flop == 2: #Layout height reduced by half
for i in range(num_row):
if i%2==0: tf='R0'
else: tf='MX'
if i==0: #Low for clock buffers
itapl.append(laygen.place(name = "I" + objectname_pfix + 'TAPL0', templatename = tap_name,
gridname = pg, xy=tap_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))
itapr.append(laygen.place(name = "I" + objectname_pfix + 'TAPR0', templatename = tap_name,
gridname = pg, xy=tapr_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))
idivbuf.append(laygen.place(name = "I" + objectname_pfix + 'DIVBUF32x', templatename = 'inv_32x',
gridname = pg, xy=array_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))
idivbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'DIVBUF8x', templatename = 'inv_8x',
gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
idivbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'DIVBUF2x', templatename = 'inv_2x',
gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
idivbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'DIVBUF1x', templatename = 'inv_1x',
gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF1x', templatename = 'inv_1x',
gridname = pg, refinstname = idivbuf[3].name, transform=tf, shape=np.array([1,1]), xy=np.array([0,0]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF2x', templatename = 'inv_2x',
gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF8x', templatename = 'inv_8x',
gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF32x', templatename = 'inv_32x',
gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
else:
itapl.append(laygen.relplace(name = "I" + objectname_pfix + 'TAPL'+str(i), templatename = tap_name,
gridname = pg, refinstname = itapl[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
itapr.append(laygen.relplace(name = "I" + objectname_pfix + 'TAPR'+str(i), templatename = tap_name,
gridname = pg, refinstname = itapr[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
if i==1: #Reference FF: FFOUT1 and FFOUT2
iffout.append(laygen.place(name = "I" + objectname_pfix + 'FFOUT1', templatename = ff_name,
gridname = pg, xy=FF0_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))
iffout.append(laygen.relplace(name = "I" + objectname_pfix + 'FFOUT2', templatename = ff_name,
gridname = pg, refinstname = iffout[0].name, transform=tf, shape=np.array([1,1]),
direction = 'right', template_libname=templib_logic))
elif i==(num_row-1): #The last low depending on num_des: even or odd
iffout.append(laygen.relplace(name = "I" + objectname_pfix + 'FFOUT'+str(2*i-1), templatename = ff_name,
gridname = pg, refinstname = iffout[-2].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
if num_des%2==0: #If not, space should be placed rather than FF
iffout.append(laygen.relplace(name = "I" + objectname_pfix + 'FFOUT'+str(2*i), templatename = ff_name,
gridname = pg, refinstname = iffout[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'right', template_libname=templib_logic))
else: #FFOUTs will be the reference for FFIN and FFDIV
iffout.append(laygen.relplace(name = "I" + objectname_pfix + 'FFOUT'+str(2*i-1), templatename = ff_name,
gridname = pg, refinstname = iffout[-2].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
iffout.append(laygen.relplace(name = "I" + objectname_pfix + 'FFOUT'+str(2*i), templatename = ff_name,
gridname = pg, refinstname = iffout[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'right', template_libname=templib_logic))
for j in range(num_des): #Relplace of FFIN and the left side of FFDIV
if iffout[j].transform=='MX': tf='MX'
else: tf='R0'
iffin.append(laygen.relplace(name = "I" + objectname_pfix + 'FFIN'+str(j+1), templatename = ff_name,
gridname = pg, refinstname = iffout[j].name, transform=tf, shape=np.array([1,1]),
xy=np.array([laygen.get_xy(obj=laygen.get_template(name = ff_name, libname = templib_logic), gridname = pg)[0], 0]), template_libname=templib_logic))
if j%2==0:
iffdiv.append(laygen.relplace(name = "I" + objectname_pfix + 'FFDIV'+str(int(j/2+1)), templatename = ff_rst_name,
gridname = pg, refinstname = iffin[j].name, transform=tf, shape=np.array([1,1]),
xy=np.array([laygen.get_xy(obj=laygen.get_template(name = ff_name, libname = templib_logic), gridname = pg)[0], 0]), template_libname=templib_logic))
for i in range(num_row, num_des+1): #Right side of FFDIV
if num_des%2==1:
if i%2==0: tf='R0'
else: tf='MX'
if num_des%2==0:
if i%2==0: tf='MX'
else: tf='R0'
if i==num_row: #Even: relplaced by top FFDIV, odd: relplaced by second FFDIV from top
iffdiv.append(laygen.relplace(name = "I" + objectname_pfix + 'FFDIV'+str(i), templatename = ff_rst_name,
gridname = pg, refinstname = iffdiv[int(num_des/2)-1].name, transform=tf, shape=np.array([1,1]),
direction = 'right', template_libname=templib_logic))
else:
iffdiv.append(laygen.relplace(name = "I" + objectname_pfix + 'FFDIV'+str(i), templatename = ff_rst_name,
gridname = pg, refinstname = iffdiv[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'bottom', template_libname=templib_logic))
#Space placement at the first row
space_name = 'space_1x'
space4x_name = 'space_4x'
space_width = laygen.get_xy(obj=laygen.get_template(name = space_name, libname = templib_logic), gridname = pg)[0]
space4_width = laygen.get_xy(obj=laygen.get_template(name = space4x_name, libname = templib_logic), gridname = pg)[0]
inv_width=[]
for i in (1,2,8,32):
inv_width.append(laygen.get_xy(obj=laygen.get_template(name = 'inv_' + str(i) + 'x', libname = templib_logic), gridname = pg)[0])
blank_width = tapr_origin[0] - array_origin[0] - 2 * (inv_width[0]+inv_width[1]+inv_width[2]+inv_width[3])
m_space4 = int(blank_width / space4_width)
m_space1 = int((blank_width-m_space4*space4_width)/space_width)
ispace4=laygen.relplace(name = "I" + objectname_pfix + 'SPACE4', templatename = space4x_name,
gridname = pg, refinstname = iclkbuf[3].name, transform='R0', shape=np.array([m_space4-1,1]),
template_libname=templib_logic)
ispace1=laygen.relplace(name = "I" + objectname_pfix + 'SPACE1', templatename = space_name,
gridname = pg, refinstname = ispace4.name, transform='R0', shape=np.array([m_space1+4,1]),
template_libname=templib_logic)
#Space placement at the last row for odd num_des
m_ff_space = int(laygen.get_xy(obj=laygen.get_template(name = ff_name, libname = templib_logic), gridname = pg)[0] / space_width)
m_ffrst_space = int(laygen.get_xy(obj=laygen.get_template(name = ff_rst_name, libname = templib_logic), gridname = pg)[0] / space_width)
if (num_des%2)==1:
if num_flop==2:
ispace_out=laygen.relplace(name = "I" + objectname_pfix + 'SPACEOUT', templatename = space_name,
gridname = pg, refinstname = iffout[num_des-1].name, transform=iffout[num_des-1].transform, shape=np.array([m_ff_space,1]),
template_libname=templib_logic)
ispace_in=laygen.relplace(name = "I" + objectname_pfix + 'SPACEIN', templatename = space_name,
gridname = pg, refinstname = iffin[num_des-1].name, transform=iffin[num_des-1].transform, shape=np.array([m_ff_space,1]),
template_libname=templib_logic)
ispace_div=laygen.relplace(name = "I" + objectname_pfix + 'SPACEDIV', templatename = space_name,
gridname = pg, refinstname = iffdiv[int(num_des/2)].name, transform=iffdiv[int(num_des/2)].transform, shape=np.array([m_ffrst_space,1]),
template_libname=templib_logic)
#Internal Pins
ffin_in_xy=[]
ffin_in_xy45=[]
ffin_out_xy=[]
ffout_in_xy=[]
ffout_out_xy=[]
ffdiv_in_xy=[]
ffdiv_in_xy45=[]
ffdiv_out_xy=[]
ffdiv_rst_xy=[]
ffdiv_st_xy=[]
for i in range(num_des):
ffin_in_xy.append(laygen.get_inst_pin_xy(iffin[i].name, 'I', rg_m3m4))
ffin_out_xy.append(laygen.get_inst_pin_xy(iffin[i].name, 'O', rg_m3m4))
ffout_in_xy.append(laygen.get_inst_pin_xy(iffout[i].name, 'I', rg_m3m4))
ffout_out_xy.append(laygen.get_inst_pin_xy(iffout[i].name, 'O', rg_m3m4))
ffdiv_in_xy.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'I', rg_m3m4))
ffdiv_out_xy.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'O', rg_m3m4))
ffdiv_rst_xy.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'RST', rg_m3m4))
ffdiv_st_xy.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'ST', rg_m3m4))
ffin_in_xy45.append(laygen.get_inst_pin_xy(iffin[i].name, 'I', rg_m4m5))
ffdiv_in_xy45.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'I', rg_m4m5))
# Route
for i in range(num_des):
if num_flop==1: #Routing offset selection for rows in R0 and MX
if iffin[i].transform=='MX': offset=1
if iffin[i].transform=='R0': offset=4
if iffdiv[i].transform=='MX': offset_div=1
if iffdiv[i].transform=='R0': offset_div=3
if num_flop==2: #Offset_div would be different because of different placement
if i in range(int((num_des+1)/2)):
if iffin[i].transform=='MX':
if i%2==1:
offset=1
else:
offset=8
if iffin[i].transform=='R0': offset=3+i%2
if iffdiv[i].transform=='MX': offset_div=1
if iffdiv[i].transform=='R0': offset_div=3
else:
if iffin[i].transform=='MX':
if i%2==1:
offset=1
else:
offset=8
if iffin[i].transform=='R0': offset=3+i%2
if iffdiv[i].transform=='MX': offset_div=10
if iffdiv[i].transform=='R0': offset_div=13
if i in range(num_des-1):
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #in-to-in
ffin_out_xy[i][0], ffin_in_xy[i+1][0], ffin_out_xy[i][1][1]+7-offset, rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #div-to-div
ffdiv_out_xy[i][0], ffdiv_in_xy[i+1][0]-np.array([0,0]), ffdiv_out_xy[i][1][1]+7-offset_div, rg_m3m4)
#[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
# ffdiv_in_xy[i+1][0], ffdiv_in_xy[i+1][0]-np.array([0,0]), ffdiv_in_xy[i+1][0][1], rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #in-to-out
ffin_out_xy[i][0], ffout_in_xy[i][0], ffin_out_xy[i][1][1]+7-offset, rg_m3m4)
if m_des_dff==1:
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #div feedback
ffdiv_out_xy[num_des-1][0], ffdiv_in_xy[0][0]+np.array([-2,0]), ffdiv_out_xy[num_des-1][1][1]+7-offset_div,
rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #M3-to-M5
ffdiv_in_xy[0][0], ffdiv_in_xy[0][1]+np.array([-2,0]), ffdiv_in_xy[0][0][1], rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
else:
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #div feedback
ffdiv_out_xy[num_des-1][0], ffdiv_in_xy[0][0]+np.array([-2,0]), ffdiv_out_xy[num_des-1][1][1]+7-offset_div,
rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #M3-to-M5
ffdiv_in_xy[0][0], ffdiv_in_xy[0][1]+np.array([-2,0]), ffdiv_in_xy[0][0][1], rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
#CLK Buffer
for i in range(3):
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(iclkbuf[i].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(iclkbuf[i + 1].name, 'I', rg_m3m4)[0],
laygen.get_inst_pin_xy(iclkbuf[i].name, 'O', rg_m3m4)[0][1] + i % 2, rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(idivbuf[3 - i].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(idivbuf[2 - i].name, 'I', rg_m3m4)[0],
laygen.get_inst_pin_xy(idivbuf[3 - i].name, 'O', rg_m3m4)[0][1] + i % 2, rg_m3m4)
#DIVCLK Route
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(idivbuf[3].name, 'I', rg_m3m4)[0], laygen.get_inst_pin_xy(iffdiv[0].name, 'I', rg_m3m4)[0],
laygen.get_inst_pin_xy(idivbuf[3].name, 'I', rg_m3m4)[0][1] + 3, rg_m3m4)
for i in range(num_des):
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(idivbuf[0].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(iffout[i].name, 'CLK', rg_m3m4)[0],
laygen.get_inst_pin_xy(idivbuf[0].name, 'O', rg_m3m4)[0][1] + 5, rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(iclkbuf[3].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(iffin[i].name, 'CLK', rg_m3m4)[0],
laygen.get_inst_pin_xy(iclkbuf[3].name, 'O', rg_m3m4)[0][1] + 6, rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(iclkbuf[3].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(iffdiv[i].name, 'CLK', rg_m3m4)[0],
laygen.get_inst_pin_xy(iclkbuf[3].name, 'O', rg_m3m4)[0][1] + 6, rg_m3m4)
#RST Route
for i in range(num_des):
if i in range(int((num_des+1)/2)): #First half of FFDIVs
if not i==int((num_des+1)/2)-1:
rrst=laygen.route(None, laygen.layers['metal'][3], xy0=ffdiv_rst_xy[i][0], xy1=ffdiv_rst_xy[i+1][0], gridname0=rg_m3m4)
rst=laygen.route(None, laygen.layers['metal'][3], xy0=ffdiv_st_xy[i][0], xy1=ffdiv_st_xy[i+1][0], gridname0=rg_m3m4)
#[rrstv, rrsth] = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4],
# ffdiv_rst_xy[i][0], ffdiv_rst_xy[i+1][0], rg_m3m4)
#[rstv, rsth] = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4],
# ffdiv_st_xy[i][0], ffdiv_st_xy[i+1][0], rg_m3m4)
else:
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
ffdiv_rst_xy[i][0], ffdiv_st_xy[i+1][0], ffdiv_rst_xy[i][1][1]+5, rg_m3m4)
else: #Second half of FFDIVs
if not i==num_des-1:
rst=laygen.route(None, laygen.layers['metal'][3], xy0=ffdiv_st_xy[i][0], xy1=ffdiv_st_xy[i+1][0], gridname0=rg_m3m4)
rrst=laygen.route(None, laygen.layers['metal'][3], xy0=ffdiv_rst_xy[i][0], xy1=ffdiv_rst_xy[i+1][0], gridname0=rg_m3m4)
#[rrstv, rrsth] = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4],
# ffdiv_rst_xy[i][0], ffdiv_rst_xy[i+1][0], rg_m3m4)
#[rstv, rsth] = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4],
# ffdiv_st_xy[i][0], ffdiv_st_xy[i+1][0], rg_m3m4)
[rh0, rv0] = laygen.route_hv(laygen.layers['metal'][2], laygen.layers['metal'][3],
laygen.get_inst_pin_xy(iffdiv[0].name, 'VSS', rg_m2m3)[0], laygen.get_inst_pin_xy(iffdiv[0].name, 'ST', rg_m2m3)[0], rg_m2m3)
[rh0, rv0] = laygen.route_hv(laygen.layers['metal'][2], laygen.layers['metal'][3],
laygen.get_inst_pin_xy(iffdiv[num_des - 1].name, 'VSS', rg_m2m3)[0], laygen.get_inst_pin_xy(iffdiv[num_des - 1].name, 'RST', rg_m2m3)[0], rg_m2m3)
#Pin
clkin_xy=laygen.get_inst_pin_xy(iclkbuf[0].name, 'I', rg_m3m4)
rclkin=laygen.route(None, laygen.layers['metal'][3], xy0=clkin_xy[0], xy1=np.array([clkin_xy[0][0],0]), gridname0=rg_m3m4)
laygen.boundary_pin_from_rect(rclkin, rg_m3m4, "clk", laygen.layers['pin'][3], size=0, direction='left')
divin_xy=laygen.get_inst_pin_xy(idivbuf[len(divbuf_list)-1].name, 'I', rg_m3m4)
rdivin=laygen.route(None, laygen.layers['metal'][3], xy0=divin_xy[0], xy1=np.array([divin_xy[0][0],0]), gridname0=rg_m3m4)
laygen.boundary_pin_from_rect(rdivin, rg_m3m4, "div<0>", laygen.layers['pin'][3], size=0, direction='left')
din_xy34=laygen.get_inst_pin_xy(iffin[0].name, 'I', rg_m3m4)
din_xy45=laygen.get_inst_pin_xy(iffin[0].name, 'I', rg_m4m5)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
din_xy34[0], np.array([din_xy34[0][0]-1,0]), din_xy34[0][1],
rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
rdummy = laygen.route(None, laygen.layers['metal'][4], xy0=din_xy34[0], xy1=din_xy34[0]+np.array([-4,0]), gridname0=rg_m3m4)
laygen.boundary_pin_from_rect(rv1, rg_m3m4, "in", laygen.layers['pin'][3], size=4, direction='bottom')
for i in range(num_des):
datao_xy = laygen.get_inst_pin_xy(iffout[i].name, 'O', rg_m3m4)
laygen.pin(name='dout<'+str(i)+'>', layer=laygen.layers['pin'][3], xy=datao_xy, gridname=rg_m3m4)
clkdiv_xy = laygen.get_inst_pin_xy(iffout[-1].name, 'CLK', rg_m3m4)
laygen.pin(name='clk_div', layer=laygen.layers['pin'][3], xy=clkdiv_xy, gridname=rg_m3m4)
rst_xy34=laygen.get_inst_pin_xy(iffdiv[0].name, 'RST', rg_m3m4)
rst_xy45=laygen.get_inst_pin_xy(iffdiv[0].name, 'RST', rg_m4m5)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
rst_xy34[0], np.array([rst_xy34[0][0]-2,0]), rst_xy34[0][1],
rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
rdummy = laygen.route(None, laygen.layers['metal'][4], xy0=rst_xy34[0], xy1=rst_xy34[0]+np.array([-4,0]), gridname0=rg_m3m4)
laygen.boundary_pin_from_rect(rv1, rg_m3m4, "RST", laygen.layers['pin'][3], size=4, direction='bottom')
# power pin
pwr_dim=laygen.get_xy(obj =itapl[-1].template, gridname=rg_m2m3)
rvdd = []
rvss = []
if num_row%2==0: rp1='VSS'
else: rp1='VDD'
print(int(pwr_dim[0]/2))
for i in range(0, int(pwr_dim[0]/2)):
rvdd.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i, 0]), xy1=np.array([2*i, 0]), gridname0=rg_m2m3,
refinstname0=itapl[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]),
refinstname1=itapl[-1].name, refpinname1=rp1, refinstindex1=np.array([0, 0])))
rvss.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+1, 0]), xy1=np.array([2*i+1, 0]), gridname0=rg_m2m3,
refinstname0=itapl[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]),
refinstname1=itapl[-1].name, refpinname1=rp1, refinstindex1=np.array([0, 0])))
laygen.pin(name = 'VDD'+str(2*i-2), layer = laygen.layers['pin'][3], refobj = rvdd[-1], gridname=rg_m2m3, netname='VDD')
laygen.pin(name = 'VSS'+str(2*i-2), layer = laygen.layers['pin'][3], refobj = rvss[-1], gridname=rg_m2m3, netname='VSS')
rvdd.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+2+1, 0]), xy1=np.array([2*i+2+1, 0]), gridname0=rg_m2m3,
refinstname0=itapr[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]),
refinstname1=itapr[-1].name, refpinname1=rp1, refinstindex1=np.array([0, 0])))
rvss.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+2, 0]), xy1=np.array([2*i+2, 0]), gridname0=rg_m2m3,
refinstname0=itapr[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]),
refinstname1=itapr[-1].name, refpinname1=rp1, refinstindex1=np.array([0, 0])))
laygen.pin(name = 'VDD'+str(2*i-1), layer = laygen.layers['pin'][3], refobj = rvdd[-1], gridname=rg_m2m3, netname='VDD')
laygen.pin(name = 'VSS'+str(2*i-1), layer = laygen.layers['pin'][3], refobj = rvss[-1], gridname=rg_m2m3, netname='VSS')
for i in range(num_row):
for j in range(0, int(pwr_dim[0]/2)):
rvdd.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*j, 0]), xy1=np.array([2*j, 0]), gridname0=rg_m2m3,
refinstname0=itapl[i].name, refpinname0='VDD', refinstindex0=np.array([0, 0]), via0=[[0, 0]],
refinstname1=itapl[i].name, refpinname1='VSS', refinstindex1=np.array([0, 0])))
rvss.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*j+1, 0]), xy1=np.array([2*j+1, 0]), gridname0=rg_m2m3,
refinstname0=itapl[i].name, refpinname0='VDD', refinstindex0=np.array([0, 0]),
refinstname1=itapl[i].name, refpinname1='VSS', refinstindex1=np.array([0, 0]), via1=[[0, 0]]))
rvdd.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*j+2+1, 0]), xy1=np.array([2*j+2+1, 0]), gridname0=rg_m2m3,
refinstname0=itapr[i].name, refpinname0='VDD', refinstindex0=np.array([0, 0]), via0=[[0, 0]],
refinstname1=itapr[i].name, refpinname1='VSS', refinstindex1=np.array([0, 0])))
rvss.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*j+2, 0]), xy1=np.array([2*j+2, 0]), gridname0=rg_m2m3,
refinstname0=itapr[i].name, refpinname0='VDD', refinstindex0=np.array([0, 0]),
refinstname1=itapr[i].name, refpinname1='VSS', refinstindex1=np.array([0, 0]), via1=[[0, 0]]))
if __name__ == '__main__':
laygen = laygo.GridLayoutGenerator(config_file="laygo_config.yaml")
import imp
try:
imp.find_module('bag')
laygen.use_phantom = False
except ImportError:
laygen.use_phantom = True
tech=laygen.tech
utemplib = tech+'_microtemplates_dense'
logictemplib = tech+'_logic_templates'
laygen.load_template(filename=tech+'_microtemplates_dense_templates.yaml', libname=utemplib)
laygen.load_grid(filename=tech+'_microtemplates_dense_grids.yaml', libname=utemplib)
laygen.load_template(filename=logictemplib+'.yaml', libname=logictemplib)
laygen.templates.sel_library(utemplib)
laygen.grids.sel_library(utemplib)
#library load or generation
workinglib = 'serdes_generated'
laygen.add_library(workinglib)
laygen.sel_library(workinglib)
if os.path.exists(workinglib+'.yaml'): #generated layout file exists
laygen.load_template(filename=workinglib+'.yaml', libname=workinglib)
laygen.templates.sel_library(utemplib)
#grid
pg = 'placement_basic' #placement grid
rg_m1m2 = 'route_M1_M2_cmos'
rg_m1m2_thick = 'route_M1_M2_thick'
rg_m2m3 = 'route_M2_M3_cmos'
rg_m3m4 = 'route_M3_M4_basic'
rg_m4m5 = 'route_M4_M5_basic'
rg_m5m6 = 'route_M5_M6_basic'
rg_m1m2_pin = 'route_M1_M2_basic'
rg_m2m3_pin = 'route_M2_M3_basic'
#display
#laygen.display()
#laygen.templates.display()
#laygen.save_template(filename=workinglib+'_templates.yaml', libname=workinglib)
mycell_list = []
#load from preset
load_from_file=True
yamlfile_spec="serdes_spec.yaml"
yamlfile_size="serdes_size.yaml"
if load_from_file==True:
with open(yamlfile_spec, 'r') as stream:
specdict = yaml.load(stream)
with open(yamlfile_size, 'r') as stream:
sizedict = yaml.load(stream)
cell_name='des_1to'+str(specdict['num_des'])
num_des=specdict['num_des']
num_flop=specdict['num_flop']
m_des_dff=sizedict['m_des_dff']
clkbuf_list=sizedict['des_clkbuf_list']
divbuf_list=sizedict['des_divbuf_list']
print(cell_name+" generating")
mycell_list.append(cell_name)
laygen.add_cell(cell_name)
laygen.sel_cell(cell_name)
generate_deserializer(laygen, objectname_pfix='DES', templib_logic=logictemplib,
placement_grid=pg, routing_grid_m2m3=rg_m2m3, routing_grid_m4m5=rg_m4m5, num_des=num_des,
num_flop=num_flop, m_des_dff=m_des_dff, origin=np.array([0, 0]))
laygen.add_template_from_cell()
laygen.save_template(filename=workinglib+'.yaml', libname=workinglib)
#bag export, if bag does not exist, gds export
import imp
try:
imp.find_module('bag')
import bag
prj = bag.BagProject()
for mycell in mycell_list:
laygen.sel_cell(mycell)
laygen.export_BAG(prj, array_delimiter=['[', ']'])
except ImportError:
laygen.export_GDS('output.gds', cellname=mycell_list, layermapfile=tech+".layermap") # change layermapfile
|
from django.contrib.auth.models import User
from django.urls import reverse_lazy
from .models import FakeNews
from ..utils.base_test import AuthenticationTestTemplate
class FakeNewsListTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.get
def _get_basename_url(self):
return "fakenews-list"
def _get_kwargs_url(self):
return {}
def test_action_user_without_permission(self):
"""all logged user has permission."""
pass
def test_list_10_obj_paginated_token(self):
fakenews = []
user = self.create_normal_user("author")
for i in range(0, 11):
fakenews.append(
FakeNews(
author=user,
title=f"test create fakenews title{i}",
subtitle=f"test create fakenews subtitle{i}",
body=f"test create fakenews body{i}",
)
)
FakeNews.objects.bulk_create(fakenews)
tokens = self.get_token_valid_normal_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
resp = self._make_request()
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data.get("results")), 10)
self.assertEqual(resp.data.get("count"), 11)
self.assertIsNotNone(resp.data.get("next"))
class FakeNewsDetailTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.get
def _get_basename_url(self):
return "fakenews-detail"
def _get_kwargs_url(self):
return {"slug": "test-create-fakenews-title"}
def test_action_user_without_permission(self):
"""all logged user has permission."""
pass
def test_detail_obj_token(self):
tokens = self.get_token_valid_normal_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
fakenews = FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.get(reverse_lazy("fakenews-detail", kwargs={"slug": fakenews.slug}))
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data.get("title"), fakenews.title)
def test_detail_not_found(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
resp = self._client.get(reverse_lazy("fakenews-detail", kwargs={"slug": "slug-not-found"}))
self.assertEqual(resp.status_code, 404)
class FakeNewsCreateTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.post
def _get_basename_url(self):
return "fakenews-list"
def _get_kwargs_url(self):
return {}
def test_action_user_without_permission(self):
"""all logged user has permission."""
pass
def test_create_successful(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
resp = self._client.post(
reverse_lazy("fakenews-list"),
data={
"title": "test create fakenews title",
"subtitle": "test create fakenews subtitle",
"body": "test create fakenews body",
},
)
self.assertEqual(resp.status_code, 201)
self.assertEqual(FakeNews.objects.last().slug, resp.data.get("slug"))
def test_create_already_exists(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.post(
reverse_lazy("fakenews-list"),
data={
"title": "test create fakenews title",
"subtitle": "test create fakenews subtitle",
"body": "test create fakenews body",
},
)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.data.get("title").pop(0), "fake news with this title already exists.")
def test_create_without_fields_required(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
resp = self._make_request()
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.data.get("title").pop(0), "This field is required.")
class FakeNewsDeleteTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.delete
def _get_basename_url(self):
return "fakenews-detail"
def _get_kwargs_url(self):
return {"slug": "test-create-fakenews-title"}
def test_action_user_without_permission(self):
"""all logged user has permission."""
pass
def test_delete_successful(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
fakenews = FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.delete(reverse_lazy("fakenews-detail", kwargs={"slug": fakenews.slug}))
self.assertEqual(resp.status_code, 204)
class FakeNewsPatchTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.patch
def _get_basename_url(self):
return "fakenews-detail"
def _get_kwargs_url(self):
return {"slug": "test-create-fakenews-title"}
def test_action_user_without_permission(self):
"""all logged user has permission."""
pass
def test_patch_normal_user(self):
tokens = self.get_token_valid_normal_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
fakenews = FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.patch(
reverse_lazy("fakenews-detail", kwargs={"slug": fakenews.slug}),
data={"title": "title updated", "subtitle": "subtitle updated", "body": "body updated"},
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data.get("title"), "title updated")
self.assertEqual(resp.data.get("slug"), "title-updated")
self.assertEqual(FakeNews.objects.last().slug, "title-updated")
class FakeNewsUpdateTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.put
def _get_basename_url(self):
return "fakenews-detail"
def _get_kwargs_url(self):
return {"slug": "test-create-fakenews-title"}
def test_action_user_without_permission(self):
"""all logged user has permission."""
pass
def test_update_normal_user(self):
tokens = self.get_token_valid_normal_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
fakenews = FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.put(
reverse_lazy("fakenews-detail", kwargs={"slug": fakenews.slug}),
data={"title": "title updated", "subtitle": "subtitle updated", "body": "body updated"},
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data.get("title"), "title updated")
self.assertEqual(resp.data.get("slug"), "title-updated")
self.assertEqual(FakeNews.objects.last().slug, "title-updated")
# way to turn a test case class into an abstract
del AuthenticationTestTemplate
|
# Ronschool.py
class Student:
def __init__(self,name): # self คือคำพิเศษเพื่อใช้แทนตัวมันเอง / ต้องใส่ทุกฟังชั่นของ class
self.name = name
# student1.name
# self = student1
self.exp = 0
self.lesson = 0
def Hello(self):
print('สวัสดีจ้าาาา ผมชื่อ{}'.format(self.name))
def Coding(self):
print('{}: กำลังเขียนโปรแกรม..'.format(self.name))
self.exp += 5
self.lesson += 1
def ShowEXP(self):
print('- {} มีประสบการณ์ {} EXP'.format(self.name,self.exp))
print('- เรียนไป {} ครั้งแล้ว'.format(self.lesson))
def AddEXP(self,score):
self.exp += score # self.exp = self.exp + score
self.lesson += 1
class SpecialStudent(Student):
def __init__(self,name,father):
super().__init__(name)
self.father = father
mafia = ['Bill Gates','Thomas Edison']
if father in mafia:
self.exp += 100
def AddEXP(self,score):
self.exp += (score * 3)
self.lesson += 1
def AskEXP(self,score=10):
print('ครู!!! ขอคะแนนพิเศษให้ผมหน่อยสิ ซัก {} EXP'.format(score))
self.AddEXP(score)
if __name__ == '__main__':
print('========1 Jan 2021===============')
student0 = SpecialStudent('Mark Zuckerberg','Bill Gates')
student0.AskEXP()
student0.ShowEXP()
student1 = Student('Albert')
print(student1.name)
student1.Hello()
print('--------------')
student2 = Student('Steve')
print(student2.name)
student2.Hello()
print('========2 Jan 2021===============')
print('---------ใครอยากเรียนโค้ดดิ้ง?----(10 exp)------------')
student1.AddEXP(10)
print('========3 Jan 2021===============')
student1.name = 'Albert Einstein' # สามารถเปลี่ยนแปลงชื่อได้ แล้วเชื่อมต่อในฟังชั่นต่างๆเลย
print('ตอนนี้ exp ของแต่ละคนได้เท่าไหร่แล้ว')
print(student1.name,student1.exp)
print(student2.name,student2.exp)
print('========4 Jan 2021===============')
for i in range(5):
student2.Coding()
student1.ShowEXP()
student2.ShowEXP()
|
"""Console script for mspsmc."""
import argparse
import sys
def main():
"""Console script for mspsmc."""
parser = argparse.ArgumentParser()
parser.add_argument("_", nargs="*")
args = parser.parse_args()
print("Arguments: " + str(args._))
print("Replace this message by putting your code into " "mspsmc.cli.main")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
|
from scipy import signal
from scipy import misc
from scipy import stats as st
import numpy as np
W = 128
L = 128
Body_Width = 3
Border = Body_Width+1
Points = 10
Noise_Max = 10
Body_Separation = 15
Body_Scale = 30
OvScale = 3
def gkern(kernlen=21, nsig=3):
''' 2D Gaussian Kernel. '''
x = np.linspace(-nsig, nsig, kernlen+1)
kern1d = np.diff(st.norm.cdf(x))
kern2d = np.outer(kern1d, kern1d)
return kern2d/kern2d.sum()
def genBackground():
return np.random.rand(W,L)*(Noise_Max)
def genStarCoords():
while True:
star_cords = np.random.rand(Points,3) # N x [x,y,m]
star_cords = star_cords * np.array([[ W-2*Border , L-2*Border , Body_Scale ]])
star_cords = star_cords + np.ones((Points,3)) * np.array([[ Border, Border, Body_Separation ]])
bad = False
for ii in range(0, Points-1):
x0, y0, m0 = star_cords[ii,:]
for jj in range(ii+1, Points):
x1, y1, m1 = star_cords[jj,:]
if np.abs(x0 - x1) < 4*Border and np.abs(y0 - y1) < 4*Border:
'''
x = np.random.random() * (W-2*Border) + Border
y = np.random.random() * (W-2*Border) + Border
star_cords[jj,0] = x
star_cords[jj,1] = y
'''
bad = True
break
if np.abs(m0 - m1) < 5:
star_cords[jj,2] = m1 + 5
if not bad:
break
return star_cords
def starGauss(OvScale):
gausKern = gkern(Body_Width*OvScale, Body_Width/(OvScale/3))
gausKern = gausKern * (Body_Scale/np.max(np.max(gausKern)))
return gausKern
def genImage(star_cords):
# Overscale it
spots_O = np.zeros((W*OvScale, L*OvScale))
for (x,y,m) in star_cords:
x = OvScale * (x+0.5)
y = OvScale * (y+0.5)
x_0, y_0 = map(int, np.floor([x,y]))
x_1, y_1 = map(int, np.ceil([x,y]))
spots_O[x_0:x_1, y_0:y_1] = m
gausKern = starGauss(OvScale)
spots_B = signal.convolve2d(spots_O, gausKern, boundary='symm', mode='same')
spots = np.zeros((W,L))
for (x,y,m) in star_cords:
x = int(x)
y = int(y)
x0 = max(0, x-Body_Width-1)
x1 = min(W, x+Body_Width+1)
y0 = max(0, y-Body_Width-1)
y1 = min(L, y+Body_Width+1)
for ii in range(x0,x1+1):
for jj in range(y0, y1+1):
spots[ii,jj] = np.mean(spots_B[ii*OvScale:(ii+1)*OvScale, jj*OvScale:(jj+1)*OvScale])
final = np.trunc( np.clip(genBackground() + spots, 0, 255) )
return final
|
#
# Copyright (c) 2020 Cord Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import logging
import os
from abc import ABC, abstractmethod
from pickle import NONE
from typing import Dict, Optional
import cryptography
from cryptography.hazmat.primitives.asymmetric.ed25519 import (
Ed25519PrivateKey,
Ed25519PublicKey,
)
from cryptography.hazmat.primitives.serialization import (
Encoding,
PublicFormat,
load_ssh_private_key,
)
import encord.exceptions
ENCORD_DOMAIN = "https://api.cord.tech"
ENCORD_PUBLIC_PATH = "/public"
ENCORD_PUBLIC_USER_PATH = "/public/user"
ENCORD_ENDPOINT = ENCORD_DOMAIN + ENCORD_PUBLIC_PATH
ENCORD_USER_ENDPOINT = ENCORD_DOMAIN + ENCORD_PUBLIC_USER_PATH
WEBSOCKET_PATH = "/websocket"
WEBSOCKET_DOMAIN = "wss://message-api.cord.tech"
WEBSOCKET_ENDPOINT = WEBSOCKET_DOMAIN + WEBSOCKET_PATH
_CORD_PROJECT_ID = "CORD_PROJECT_ID"
_ENCORD_PROJECT_ID = "ENCORD_PROJECT_ID"
_CORD_DATASET_ID = "CORD_DATASET_ID"
_ENCORD_DATASET_ID = "ENCORD_DATASET_ID"
_CORD_API_KEY = "CORD_API_KEY"
_ENCORD_API_KEY = "ENCORD_API_KEY"
READ_TIMEOUT = 180 # In seconds
WRITE_TIMEOUT = 180 # In seconds
CONNECT_TIMEOUT = 180 # In seconds
logger = logging.getLogger(__name__)
class BaseConfig(ABC):
def __init__(self, endpoint: str):
self.read_timeout: int = READ_TIMEOUT
self.write_timeout: int = WRITE_TIMEOUT
self.connect_timeout: int = CONNECT_TIMEOUT
self.endpoint: str = endpoint
@abstractmethod
def define_headers(self, data: str) -> Dict:
pass
class Config(BaseConfig):
"""
Config defining endpoint, project id, API key, and timeouts.
"""
def define_headers(self, data) -> Dict:
return self._headers
def __init__(
self,
resource_id: Optional[str] = None,
api_key: Optional[str] = None,
web_file_path: Optional[str] = None,
domain: Optional[str] = None,
websocket_endpoint: str = WEBSOCKET_ENDPOINT,
):
if resource_id is None:
resource_id = get_env_resource_id()
if api_key is None:
api_key = get_env_api_key()
self.resource_id = resource_id
self.api_key = api_key
self.websocket_endpoint = websocket_endpoint
self._headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"ResourceID": resource_id,
"Authorization": self.api_key,
}
if web_file_path is None:
raise RuntimeError("`web_file_path` must be specified")
if domain is None:
raise RuntimeError("`domain` must be specified")
self.domain = domain
endpoint = domain + web_file_path
super().__init__(endpoint)
logger.info("Initialising Encord client with endpoint: %s and resource_id: %s", endpoint, resource_id)
def get_env_resource_id() -> str:
project_id = os.environ.get(_ENCORD_PROJECT_ID) or os.environ.get(_CORD_PROJECT_ID)
dataset_id = os.environ.get(_ENCORD_DATASET_ID) or os.environ.get(_CORD_DATASET_ID)
if (project_id is not None) and (dataset_id is not None):
raise encord.exceptions.InitialisationError(
message=(
"Found both Project EntityId and Dataset EntityId in os.environ. "
"Please initialise EncordClient by passing resource_id."
)
)
elif project_id is not None:
resource_id = project_id
elif dataset_id is not None:
resource_id = dataset_id
else:
raise encord.exceptions.AuthenticationError(message="Project EntityId or dataset EntityId not provided")
return resource_id
def get_env_api_key() -> str:
api_key = os.environ.get(_ENCORD_API_KEY) or os.environ.get(_CORD_API_KEY)
if api_key is None:
raise encord.exceptions.AuthenticationError(message="API key not provided")
return api_key
class EncordConfig(Config):
def __init__(
self,
resource_id: Optional[str] = None,
api_key: Optional[str] = None,
domain: Optional[str] = None,
):
web_file_path = ENCORD_PUBLIC_PATH
super().__init__(resource_id, api_key, web_file_path=web_file_path, domain=domain)
CordConfig = EncordConfig
class UserConfig(BaseConfig):
def __init__(self, private_key: Ed25519PrivateKey, domain: str = ENCORD_DOMAIN):
self.private_key: Ed25519PrivateKey = private_key
self.public_key: Ed25519PublicKey = private_key.public_key()
self._public_key_hex: str = self.public_key.public_bytes(Encoding.Raw, PublicFormat.Raw).hex()
self.domain = domain
endpoint = domain + ENCORD_PUBLIC_USER_PATH
super().__init__(endpoint)
def define_headers(self, data: str) -> Dict:
hash_builder = hashlib.sha256()
hash_builder.update(data.encode())
contents_hash = hash_builder.digest()
signature = self.private_key.sign(contents_hash)
return {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"{self._public_key_hex}:{signature.hex()}",
}
@staticmethod
def from_ssh_private_key(ssh_private_key: str, password: Optional[str], **kwargs):
key_bytes = ssh_private_key.encode()
password_bytes = password and password.encode()
private_key = cryptography.hazmat.primitives.serialization.load_ssh_private_key(key_bytes, password_bytes)
if isinstance(private_key, Ed25519PrivateKey):
return UserConfig(private_key, **kwargs)
else:
raise ValueError(f"Provided key [{ssh_private_key}] is not an Ed25519 private key")
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Schema for modules.yaml configuration file.
.. literalinclude:: _spack_root/lib/spack/spack/schema/modules.py
:lines: 13-
"""
import spack.schema.environment
import spack.schema.projections
#: Matches a spec or a multi-valued variant but not another
#: valid keyword.
#:
#: THIS NEEDS TO BE UPDATED FOR EVERY NEW KEYWORD THAT
#: IS ADDED IMMEDIATELY BELOW THE MODULE TYPE ATTRIBUTE
spec_regex = r'(?!hierarchy|core_specs|verbose|hash_length|whitelist|' \
r'blacklist|projections|naming_scheme|core_compilers|all|' \
r'defaults)(^\w[\w-]*)'
#: Matches a valid name for a module set
# Banned names are valid entries at that level in the previous schema
set_regex = r'(?!enable|lmod|tcl|dotkit|prefix_inspections)^\w[\w-]*'
#: Matches an anonymous spec, i.e. a spec without a root name
anonymous_spec_regex = r'^[\^@%+~]'
#: Definitions for parts of module schema
array_of_strings = {
'type': 'array', 'default': [], 'items': {'type': 'string'}
}
dictionary_of_strings = {
'type': 'object', 'patternProperties': {r'\w[\w-]*': {'type': 'string'}}
}
dependency_selection = {'type': 'string', 'enum': ['none', 'direct', 'all']}
module_file_configuration = {
'type': 'object',
'default': {},
'additionalProperties': False,
'properties': {
'filter': {
'type': 'object',
'default': {},
'additionalProperties': False,
'properties': {
'environment_blacklist': {
'type': 'array',
'default': [],
'items': {
'type': 'string'
}
}
}
},
'template': {
'type': 'string'
},
'autoload': dependency_selection,
'prerequisites': dependency_selection,
'load_only_generated': {
'type': 'boolean',
'default': False
},
'conflict': array_of_strings,
'load': array_of_strings,
'suffixes': {
'type': 'object',
'validate_spec': True,
'patternProperties': {
r'\w[\w-]*': { # key
'type': 'string'
}
}
},
'environment': spack.schema.environment.definition
}
}
projections_scheme = spack.schema.projections.properties['projections']
module_type_configuration = {
'type': 'object',
'default': {},
'allOf': [
{'properties': {
'verbose': {
'type': 'boolean',
'default': False
},
'hash_length': {
'type': 'integer',
'minimum': 0,
'default': 7
},
'whitelist': array_of_strings,
'blacklist': array_of_strings,
'blacklist_implicits': {
'type': 'boolean',
'default': False
},
'defaults': array_of_strings,
'naming_scheme': {
'type': 'string' # Can we be more specific here?
},
'projections': projections_scheme,
'all': module_file_configuration,
}
},
{'validate_spec': True,
'patternProperties': {
spec_regex: module_file_configuration,
anonymous_spec_regex: module_file_configuration,
}
}
]
}
#: The "real" module properties -- the actual configuration parameters.
#: They are separate from ``properties`` because they can appear both
#: at the top level of a Spack ``modules:`` config (old, deprecated format),
#: and within a named module set (new format with multiple module sets).
module_config_properties = {
'use_view': {'anyOf': [
{'type': 'string'},
{'type': 'boolean'}
]},
'arch_folder': {'type': 'boolean'},
'prefix_inspections': {
'type': 'object',
'additionalProperties': False,
'patternProperties': {
# prefix-relative path to be inspected for existence
r'^[\w-]*': array_of_strings
}
},
'roots': {
'type': 'object',
'properties': {
'tcl': {'type': 'string'},
'lmod': {'type': 'string'},
},
},
'enable': {
'type': 'array',
'default': [],
'items': {
'type': 'string',
'enum': ['tcl', 'dotkit', 'lmod']
},
'deprecatedProperties': {
'properties': ['dotkit'],
'message': 'cannot enable "dotkit" in modules.yaml '
'[support for "dotkit" has been dropped '
'in v0.13.0]',
'error': False
},
},
'lmod': {
'allOf': [
# Base configuration
module_type_configuration,
{
'type': 'object',
'properties': {
'core_compilers': array_of_strings,
'hierarchy': array_of_strings,
'core_specs': array_of_strings,
},
} # Specific lmod extensions
]
},
'tcl': {
'allOf': [
# Base configuration
module_type_configuration,
{} # Specific tcl extensions
]
},
'dotkit': {
'allOf': [
# Base configuration
module_type_configuration,
{} # Specific dotkit extensions
]
},
}
# Properties for inclusion into other schemas (requires definitions)
properties = {
'modules': {
'type': 'object',
'patternProperties': {
set_regex: {
'type': 'object',
'default': {},
'additionalProperties': False,
'properties': module_config_properties,
'deprecatedProperties': {
'properties': ['dotkit'],
'message': 'the "dotkit" section in modules.yaml has no effect'
' [support for "dotkit" has been dropped in v0.13.0]',
'error': False
}
},
},
# Available here for backwards compatibility
'properties': module_config_properties,
'deprecatedProperties': {
'properties': ['dotkit'],
'message': 'the "dotkit" section in modules.yaml has no effect'
' [support for "dotkit" has been dropped in v0.13.0]',
'error': False
}
}
}
#: Full schema with metadata
schema = {
'$schema': 'http://json-schema.org/schema#',
'title': 'Spack module file configuration file schema',
'type': 'object',
'additionalProperties': False,
'properties': properties,
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.inotify
:synopsis: ``inotify(7)`` based emitter implementation.
:author: Sebastien Martini <seb@dbzteam.org>
:author: Luke McCarthy <luke@iogopro.co.uk>
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: Tim Cuthbertson <tim+github@gfxmonk.net>
:platforms: Linux 2.6.13+.
.. ADMONITION:: About system requirements
Recommended minimum kernel version: 2.6.25.
Quote from the inotify(7) man page:
"Inotify was merged into the 2.6.13 Linux kernel. The required library
interfaces were added to glibc in version 2.4. (IN_DONT_FOLLOW,
IN_MASK_ADD, and IN_ONLYDIR were only added in version 2.5.)"
Therefore, you must ensure the system is running at least these versions
appropriate libraries and the kernel.
.. ADMONITION:: About recursiveness, event order, and event coalescing
Quote from the inotify(7) man page:
If successive output inotify events produced on the inotify file
descriptor are identical (same wd, mask, cookie, and name) then they
are coalesced into a single event if the older event has not yet been
read (but see BUGS).
The events returned by reading from an inotify file descriptor form
an ordered queue. Thus, for example, it is guaranteed that when
renaming from one directory to another, events will be produced in
the correct order on the inotify file descriptor.
...
Inotify monitoring of directories is not recursive: to monitor
subdirectories under a directory, additional watches must be created.
This emitter implementation therefore automatically adds watches for
sub-directories if running in recursive mode.
Some extremely useful articles and documentation:
.. _inotify FAQ: http://inotify.aiken.cz/?section=inotify&page=faq&lang=en
.. _intro to inotify: http://www.linuxjournal.com/article/8478
"""
from __future__ import with_statement
import os
import threading
from .inotify_buffer import InotifyBuffer
from watchdog.observers.api import (
EventEmitter,
BaseObserver,
DEFAULT_EMITTER_TIMEOUT,
DEFAULT_OBSERVER_TIMEOUT
)
from watchdog.events import (
DirDeletedEvent,
DirModifiedEvent,
DirMovedEvent,
DirCreatedEvent,
FileDeletedEvent,
FileModifiedEvent,
FileMovedEvent,
FileCreatedEvent,
FileClosedEvent,
generate_sub_moved_events,
generate_sub_created_events,
)
from watchdog.utils import unicode_paths
class InotifyEmitter(EventEmitter):
"""
inotify(7)-based event emitter.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._lock = threading.Lock()
self._inotify = None
def on_thread_start(self):
path = unicode_paths.encode(self.watch.path)
self._inotify = InotifyBuffer(path, self.watch.is_recursive)
def on_thread_stop(self):
if self._inotify:
self._inotify.close()
def queue_events(self, timeout, full_events=False):
# If "full_events" is true, then the method will report unmatched move events as separate events
# This behavior is by default only called by a InotifyFullEmitter
with self._lock:
event = self._inotify.read_event()
if event is None:
return
if isinstance(event, tuple):
move_from, move_to = event
src_path = self._decode_path(move_from.src_path)
dest_path = self._decode_path(move_to.src_path)
cls = DirMovedEvent if move_from.is_directory else FileMovedEvent
self.queue_event(cls(src_path, dest_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
self.queue_event(DirModifiedEvent(os.path.dirname(dest_path)))
if move_from.is_directory and self.watch.is_recursive:
for sub_event in generate_sub_moved_events(src_path, dest_path):
self.queue_event(sub_event)
return
src_path = self._decode_path(event.src_path)
if event.is_moved_to:
if full_events:
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(None, src_path))
else:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
if event.is_directory and self.watch.is_recursive:
for sub_event in generate_sub_created_events(src_path):
self.queue_event(sub_event)
elif event.is_attrib:
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(src_path))
elif event.is_modify:
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(src_path))
elif event.is_delete or (event.is_moved_from and not full_events):
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_moved_from and full_events:
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(src_path, None))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_create:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_close_write and not event.is_directory:
cls = FileClosedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_close_nowrite and not event.is_directory:
cls = FileClosedEvent
self.queue_event(cls(src_path))
def _decode_path(self, path):
""" Decode path only if unicode string was passed to this emitter. """
if isinstance(self.watch.path, bytes):
return path
return unicode_paths.decode(path)
class InotifyFullEmitter(InotifyEmitter):
"""
inotify(7)-based event emitter. By default this class produces move events even if they are not matched
Such move events will have a ``None`` value for the unmatched part.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
InotifyEmitter.__init__(self, event_queue, watch, timeout)
def queue_events(self, timeout, events=True):
InotifyEmitter.queue_events(self, timeout, full_events=events)
class InotifyObserver(BaseObserver):
"""
Observer thread that schedules watching directories and dispatches
calls to event handlers.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT, generate_full_events=False):
if (generate_full_events):
BaseObserver.__init__(self, emitter_class=InotifyFullEmitter, timeout=timeout)
else:
BaseObserver.__init__(self, emitter_class=InotifyEmitter,
timeout=timeout)
|
import _plotly_utils.basevalidators
class ZValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="z", parent_name="isosurface.lightposition", **kwargs
):
super(ZValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 100000),
min=kwargs.pop("min", -100000),
**kwargs,
)
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.base import OpenMetricsBaseCheckV2
from datadog_checks.base.constants import ServiceCheck
from datadog_checks.dev.testing import requires_py3
from .utils import get_check
pytestmark = [requires_py3, pytest.mark.openmetrics, pytest.mark.openmetrics_interface]
def test_default_config(aggregator, dd_run_check, mock_http_response):
class Check(OpenMetricsBaseCheckV2):
__NAMESPACE__ = 'test'
def get_default_config(self):
return {'metrics': ['.+'], 'rename_labels': {'foo': 'bar'}}
mock_http_response(
"""
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes{foo="baz"} 6.396288e+06
"""
)
check = Check('test', {}, [{'openmetrics_endpoint': 'test'}])
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes', 6396288, metric_type=aggregator.GAUGE, tags=['endpoint:test', 'bar:baz']
)
aggregator.assert_all_metrics_covered()
def test_service_check_dynamic_tags(aggregator, dd_run_check, mock_http_response):
mock_http_response(
"""
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes{foo="baz"} 6.396288e+06
# HELP state Node state
# TYPE state gauge
state{bar="baz"} 3
"""
)
check = get_check(
{'metrics': ['.+', {'state': {'type': 'service_check', 'status_map': {'3': 'ok'}}}], 'tags': ['foo:bar']}
)
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes',
6396288,
metric_type=aggregator.GAUGE,
tags=['endpoint:test', 'foo:bar', 'foo:baz'],
)
aggregator.assert_service_check('test.state', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_service_check('test.openmetrics.health', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_all_metrics_covered()
assert len(aggregator.service_check_names) == 2
aggregator.reset()
check.set_dynamic_tags('baz:foo')
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes',
6396288,
metric_type=aggregator.GAUGE,
tags=['endpoint:test', 'foo:bar', 'foo:baz', 'baz:foo'],
)
aggregator.assert_service_check('test.state', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_service_check('test.openmetrics.health', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_all_metrics_covered()
assert len(aggregator.service_check_names) == 2
def test_custom_transformer(aggregator, dd_run_check, mock_http_response):
class Check(OpenMetricsBaseCheckV2):
__NAMESPACE__ = 'test'
def __init__(self, name, init_config, instances):
super().__init__(name, init_config, instances)
self.check_initializations.append(self.configure_additional_transformers)
def configure_transformer_watchdog_mega_miss(self):
method = self.gauge
def transform(metric, sample_data, runtime_data):
for sample, tags, hostname in sample_data:
method('server.watchdog_mega_miss', sample.value, tags=tags, hostname=hostname)
return transform
def configure_additional_transformers(self):
metric = r"^envoy_server_(.+)_watchdog_mega_miss$"
self.scrapers[self.instance['openmetrics_endpoint']].metric_transformer.add_custom_transformer(
metric, self.configure_transformer_watchdog_mega_miss(), pattern=True
)
mock_http_response(
"""
# TYPE envoy_server_worker_0_watchdog_mega_miss counter
envoy_server_worker_0_watchdog_mega_miss{} 1
# TYPE envoy_server_worker_1_watchdog_mega_miss counter
envoy_server_worker_1_watchdog_mega_miss{} 0
"""
)
check = Check('test', {}, [{'openmetrics_endpoint': 'test'}])
dd_run_check(check)
aggregator.assert_metric('test.server.watchdog_mega_miss', metric_type=aggregator.GAUGE, count=2)
|
from datetime import datetime
from pandas import DataFrame
from models.PyCryptoBot import PyCryptoBot
from models.AppState import AppState
from models.helper.LogHelper import Logger
import sys
class Strategy:
def __init__(
self,
app: PyCryptoBot = None,
state: AppState = AppState,
df: DataFrame = DataFrame,
iterations: int = 0,
) -> None:
if not isinstance(df, DataFrame):
raise TypeError("'df' not a Pandas dataframe")
if len(df) == 0:
raise ValueError("'df' is empty")
self._action = "WAIT"
self.app = app
self.state = state
self._df = df
self._df_last = app.getInterval(df, iterations)
def isBuySignal(
self, price, now: datetime = datetime.today().strftime("%Y-%m-%d %H:%M:%S")
) -> bool:
# required technical indicators or candle sticks for buy signal strategy
required_indicators = [
"ema12gtema26co",
"macdgtsignal",
"goldencross",
"obv_pc",
"eri_buy",
]
for indicator in required_indicators:
if indicator not in self._df_last:
raise AttributeError(f"'{indicator}' not in Pandas dataframe")
# buy signal exclusion (if disabled, do not buy within 3% of the dataframe close high)
if (
self.state.last_action == "SELL"
and self.app.disableBuyNearHigh() is True
and (price > (self._df["close"].max() * 0.97))
):
log_text = (
str(now)
+ " | "
+ self.app.getMarket()
+ " | "
+ self.app.printGranularity()
+ " | Ignoring Buy Signal (price "
+ str(price)
+ " within 3% of high "
+ str(self._df["close"].max())
+ ")"
)
Logger.warning(log_text)
return False
# if EMA, MACD are disabled, do not buy
if self.app.disableBuyEMA() and self.app.disableBuyMACD():
log_text = (
str(now)
+ " | "
+ self.app.getMarket()
+ " | "
+ self.app.printGranularity()
+ " | EMA, MACD indicators are disabled "
)
Logger.warning(log_text)
return False
# criteria for a buy signal 1
if (
(
bool(self._df_last["ema12gtema26co"].values[0]) is True
or self.app.disableBuyEMA()
)
and (
bool(self._df_last["macdgtsignal"].values[0]) is True
or self.app.disableBuyMACD()
)
and (
bool(self._df_last["goldencross"].values[0]) is True
or self.app.disableBullOnly()
)
and (
float(self._df_last["obv_pc"].values[0]) > -5
or self.app.disableBuyOBV()
)
and (
bool(self._df_last["eri_buy"].values[0]) is True
or self.app.disableBuyElderRay()
)
and self.state.last_action != "BUY"
): # required for all strategies
Logger.debug("*** Buy Signal ***")
for indicator in required_indicators:
Logger.debug(f"{indicator}: {self._df_last[indicator].values[0]}")
Logger.debug(f"last_action: {self.state.last_action}")
return True
# criteria for buy signal 2 (optionally add additional buy singals)
elif (
(
bool(self._df_last["ema12gtema26co"].values[0]) is True
or self.app.disableBuyEMA()
)
and bool(self._df_last["macdgtsignalco"].values[0]) is True
and (
bool(self._df_last["goldencross"].values[0]) is True
or self.app.disableBullOnly()
)
and (
float(self._df_last["obv_pc"].values[0]) > -5
or self.app.disableBuyOBV()
)
and (
bool(self._df_last["eri_buy"].values[0]) is True
or self.app.disableBuyElderRay()
)
and self.state.last_action != "BUY"
): # required for all strategies
Logger.debug("*** Buy Signal ***")
for indicator in required_indicators:
Logger.debug(f"{indicator}: {self._df_last[indicator].values[0]}")
Logger.debug(f"last_action: {self.state.last_action}")
return True
return False
def isSellSignal(self) -> bool:
# required technical indicators or candle sticks for buy signal strategy
required_indicators = ["ema12ltema26co", "macdltsignal"]
for indicator in required_indicators:
if indicator not in self._df_last:
raise AttributeError(f"'{indicator}' not in Pandas dataframe")
# criteria for a sell signal 1
if (
bool(self._df_last["ema12ltema26co"].values[0]) is True
and (
bool(self._df_last["macdltsignal"].values[0]) is True
or self.app.disableBuyMACD()
)
and self.state.last_action not in ["", "SELL"]
):
Logger.debug("*** Sell Signal ***")
for indicator in required_indicators:
Logger.debug(f"{indicator}: {self._df_last[indicator].values[0]}")
Logger.debug(f"last_action: {self.state.last_action}")
return True
return False
def isSellTrigger(
self,
price: float = 0.0,
price_exit: float = 0.0,
margin: float = 0.0,
change_pcnt_high: float = 0.0,
obv_pc: float = 0.0,
macdltsignal: bool = False,
) -> bool:
# set to true for verbose debugging
debug = False
if debug:
Logger.warning("\n*** isSellTrigger ***\n")
Logger.warning("-- loss failsafe sell at fibonacci band --")
Logger.warning(f"self.app.disableFailsafeFibonacciLow() is False (actual: {self.app.disableFailsafeFibonacciLow()})")
Logger.warning(f"self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()})")
Logger.warning(f"self.app.sellLowerPcnt() is None (actual: {self.app.sellLowerPcnt()})")
Logger.warning(f"self.state.fib_low {self.state.fib_low} > 0")
Logger.warning(f"self.state.fib_low {self.state.fib_low} >= {float(price)}")
Logger.warning(f"(self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()}) or margin ({margin}) > 0)")
Logger.warning("\n")
# loss failsafe sell at fibonacci band
if (
self.app.disableFailsafeFibonacciLow() is False
and self.app.allowSellAtLoss()
and self.app.sellLowerPcnt() is None
and self.state.fib_low > 0
and self.state.fib_low >= float(price)
and (self.app.allowSellAtLoss() or margin > 0)
):
log_text = (
"! Loss Failsafe Triggered (Fibonacci Band: "
+ str(self.state.fib_low)
+ ")"
)
Logger.warning(log_text)
self.app.notifyTelegram(
self.app.getMarket()
+ " ("
+ self.app.printGranularity()
+ ") "
+ log_text
)
return True
if debug:
Logger.warning("-- loss failsafe sell at trailing_stop_loss --")
Logger.warning(f"self.app.trailingStopLoss() != None (actual: {self.app.trailingStopLoss()})")
Logger.warning(f"change_pcnt_high ({change_pcnt_high}) < self.app.trailingStopLoss() ({self.app.trailingStopLoss()})")
Logger.warning(f"margin ({margin}) > self.app.trailingStopLossTrigger() ({self.app.trailingStopLossTrigger()})")
Logger.warning(f"(self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()}) or margin ({margin}) > 0)")
Logger.warning("\n")
# loss failsafe sell at trailing_stop_loss
if (
self.app.trailingStopLoss() != None
and change_pcnt_high < self.app.trailingStopLoss()
and margin > self.app.trailingStopLossTrigger()
and (self.app.allowSellAtLoss() or margin > 0)
):
log_text = (
"! Trailing Stop Loss Triggered (< "
+ str(self.app.trailingStopLoss())
+ "%)"
)
Logger.warning(log_text)
self.app.notifyTelegram(
self.app.getMarket()
+ " ("
+ self.app.printGranularity()
+ ") "
+ log_text
)
return True
if debug:
Logger.warning("-- loss failsafe sell at sell_lower_pcnt --")
Logger.warning(f"self.app.disableFailsafeLowerPcnt() is False (actual: {self.app.disableFailsafeLowerPcnt()})")
Logger.warning(f"and self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()})")
Logger.warning(f"and self.app.sellLowerPcnt() != None (actual: {self.app.sellLowerPcnt()})")
Logger.warning(f"and margin ({margin}) < self.app.sellLowerPcnt() ({self.app.sellLowerPcnt()})")
Logger.warning(f"(self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()}) or margin ({margin}) > 0)")
Logger.warning("\n")
# loss failsafe sell at sell_lower_pcnt
elif (
self.app.disableFailsafeLowerPcnt() is False
and self.app.allowSellAtLoss()
and self.app.sellLowerPcnt() != None
and margin < self.app.sellLowerPcnt()
and (self.app.allowSellAtLoss() or margin > 0)
):
log_text = (
"! Loss Failsafe Triggered (< " + str(self.app.sellLowerPcnt()) + "%)"
)
Logger.warning(log_text)
self.app.notifyTelegram(
self.app.getMarket()
+ " ("
+ self.app.printGranularity()
+ ") "
+ log_text
)
return True
if debug:
Logger.warning("-- profit bank at sell_upper_pcnt --")
Logger.warning(f"self.app.disableProfitbankUpperPcnt() is False (actual: {self.app.disableProfitbankUpperPcnt()})")
Logger.warning(f"and self.app.sellUpperPcnt() != None (actual: {self.app.sellUpperPcnt()})")
Logger.warning(f"and margin ({margin}) > self.app.sellUpperPcnt() ({self.app.sellUpperPcnt()})")
Logger.warning(f"(self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()}) or margin ({margin}) > 0)")
Logger.warning("\n")
# profit bank at sell_upper_pcnt
if (
self.app.disableProfitbankUpperPcnt() is False
and self.app.sellUpperPcnt() != None
and margin > self.app.sellUpperPcnt()
and (self.app.allowSellAtLoss() or margin > 0)
):
log_text = (
"! Profit Bank Triggered (> " + str(self.app.sellUpperPcnt()) + "%)"
)
Logger.warning(log_text)
self.app.notifyTelegram(
self.app.getMarket()
+ " ("
+ self.app.printGranularity()
+ ") "
+ log_text
)
return True
if debug:
Logger.warning("-- profit bank when strong reversal detected --")
Logger.warning(f"self.app.sellAtResistance() is True (actual {self.app.sellAtResistance()})")
Logger.warning(f"and price ({price}) > 0")
Logger.warning(f"and price ({price}) >= price_exit ({price_exit})")
Logger.warning(f"(self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()}) or margin ({margin}) > 0)")
Logger.warning("\n")
# profit bank when strong reversal detected
if (
self.app.sellAtResistance() is True
and margin >= 2
and price > 0
and price >= price_exit
and (self.app.allowSellAtLoss() or margin > 0)
):
log_text = "! Profit Bank Triggered (Selling At Resistance)"
Logger.warning(log_text)
if not (not self.app.allowSellAtLoss() and margin <= 0):
self.app.notifyTelegram(
self.app.getMarket()
+ " ("
+ self.app.printGranularity()
+ ") "
+ log_text
)
return True
return False
def isWaitTrigger(self, margin: float = 0.0, goldencross: bool = False):
# set to true for verbose debugging
debug = False
if debug and self.state.action != 'WAIT':
Logger.warning("\n*** isWaitTrigger ***\n")
if debug and self.state.action == 'BUY':
Logger.warning("-- if bear market and bull only return true to abort buy --")
Logger.warning(f"self.state.action == 'BUY' (actual: {self.state.action})")
Logger.warning(f"and self.app.disableBullOnly() is True (actual: {self.app.disableBullOnly()})")
Logger.warning(f"and goldencross is False (actual: {goldencross})")
Logger.warning("\n")
# if bear market and bull only return true to abort buy
if (
self.state.action == "BUY"
and not self.app.disableBullOnly()
and not goldencross
):
log_text = "! Ignore Buy Signal (Bear Buy In Bull Only)"
Logger.warning(log_text)
return True
if debug and self.state.action == 'SELL':
Logger.warning("-- configuration specifies to not sell at a loss --")
Logger.warning(f"self.state.action == 'SELL' (actual: {self.state.action})")
Logger.warning(f"and self.app.allowSellAtLoss() is False (actual: {self.app.allowSellAtLoss()})")
Logger.warning(f"and margin ({margin}) <= 0")
Logger.warning("\n")
# configuration specifies to not sell at a loss
if (
self.state.action == "SELL"
and not self.app.allowSellAtLoss()
and margin <= 0
):
log_text = "! Ignore Sell Signal (No Sell At Loss)"
Logger.warning(log_text)
return True
if debug and self.state.action == 'SELL':
Logger.warning("-- configuration specifies not to sell within min and max margin percent bounds --")
Logger.warning(f"self.state.action == 'SELL' (actual: {self.state.action})")
Logger.warning(f"(self.app.nosellminpcnt is not None (actual: {self.app.nosellminpcnt})) and (margin ({margin}) >= self.app.nosellminpcnt ({self.app.nosellminpcnt}))")
Logger.warning(f"(self.app.nosellmaxpcnt is not None (actual: {self.app.nosellmaxpcnt})) and (margin ({margin}) <= self.app.nosellmaxpcnt ({self.app.nosellmaxpcnt}))")
Logger.warning("\n")
# configuration specifies not to sell within min and max margin percent bounds
if self.state.action == "SELL" and (
(self.app.nosellminpcnt is not None) and (margin >= self.app.nosellminpcnt)
) and (
(self.app.nosellmaxpcnt is not None) and (margin <= self.app.nosellmaxpcnt)
):
log_text = "! Ignore Sell Signal (Within No-Sell Bounds)"
Logger.warning(log_text)
return True
return False
def getAction(self, price):
if self.isBuySignal(price):
return "BUY"
elif self.isSellSignal():
return "SELL"
else:
return "WAIT"
|
"""
byceps.services.shop.order.actions.ticket
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from typing import Any, Sequence
from uuid import UUID
from .....typing import UserID
from ....ticketing.dbmodels.ticket import Ticket
from ....ticketing import (
category_service as ticket_category_service,
ticket_creation_service,
ticket_revocation_service,
ticket_service,
)
from ....ticketing.transfer.models import TicketCategoryID, TicketID
from .. import log_service, service as order_service
from ..transfer.order import LineItem, Order, OrderID
from ._ticketing import create_tickets_sold_event, send_tickets_sold_event
def create_tickets(
order: Order,
line_item: LineItem,
ticket_category_id: TicketCategoryID,
initiator_id: UserID,
) -> None:
"""Create tickets."""
owned_by_id = order.placed_by_id
order_number = order.order_number
ticket_quantity = line_item.quantity
ticket_category = ticket_category_service.get_category(ticket_category_id)
tickets = ticket_creation_service.create_tickets(
ticket_category.party_id,
ticket_category_id,
owned_by_id,
ticket_quantity,
order_number=order_number,
used_by_id=owned_by_id,
)
_create_creation_order_log_entries(order.id, tickets)
data: dict[str, Any] = {
'ticket_ids': list(sorted(str(ticket.id) for ticket in tickets))
}
order_service.update_line_item_processing_result(line_item.id, data)
tickets_sold_event = create_tickets_sold_event(
order.id, initiator_id, ticket_category_id, owned_by_id, ticket_quantity
)
send_tickets_sold_event(tickets_sold_event)
def _create_creation_order_log_entries(
order_id: OrderID, tickets: Sequence[Ticket]
) -> None:
event_type = 'ticket-created'
datas = [
{
'ticket_id': str(ticket.id),
'ticket_code': ticket.code,
'ticket_category_id': str(ticket.category_id),
'ticket_owner_id': str(ticket.owned_by_id),
}
for ticket in tickets
]
log_service.create_entries(event_type, order_id, datas)
def revoke_tickets(
order: Order, line_item: LineItem, initiator_id: UserID
) -> None:
"""Revoke all tickets related to the line item."""
ticket_id_strs = line_item.processing_result['ticket_ids']
ticket_ids = {
TicketID(UUID(ticket_id_str)) for ticket_id_str in ticket_id_strs
}
tickets = ticket_service.find_tickets(ticket_ids)
ticket_revocation_service.revoke_tickets(ticket_ids, initiator_id)
_create_revocation_order_log_entries(order.id, tickets, initiator_id)
def _create_revocation_order_log_entries(
order_id: OrderID, tickets: Sequence[Ticket], initiator_id: UserID
) -> None:
event_type = 'ticket-revoked'
datas = [
{
'ticket_id': str(ticket.id),
'ticket_code': ticket.code,
'initiator_id': str(initiator_id),
}
for ticket in tickets
]
log_service.create_entries(event_type, order_id, datas)
|
"""
Module defining classes for tracking results from simulations.
The trackers defined in this module are:
.. autosummary::
:nosignatures:
CallbackTracker
ProgressTracker
PrintTracker
PlotTracker
DataTracker
SteadyStateTracker
RuntimeTracker
ConsistencyTracker
MaterialConservationTracker
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
"""
from datetime import timedelta
import inspect
import sys
import time
from typing import Callable, Optional, Union, IO, List, Any # @UnusedImport
import numpy as np
from .base import TrackerBase, InfoDict, FinishedSimulation, Real
from .intervals import IntervalData, RealtimeIntervals
from ..fields.base import FieldBase
from ..fields import FieldCollection
from ..tools.parse_duration import parse_duration
from ..tools.misc import get_progress_bar_class
class CallbackTracker(TrackerBase):
""" Tracker that calls a function periodically """
def __init__(self, func: Callable,
interval: IntervalData = 1):
"""
Args:
func: The function to call periodically. The function signature
should be `(state)` or `(state, time)`, where `state` contains
the current state as an instance of
:class:`~pde.fields.FieldBase` and `time` is a
float value indicating the current time. Note that only a view
of the state is supplied, implying that a copy needs to be made
if the data should be stored.
interval: |Arg_tracker_interval|
"""
super().__init__(interval=interval)
self._callback = func
self._num_args = len(inspect.signature(func).parameters)
if not 0 < self._num_args < 3:
raise ValueError('`func` must be a function accepting one or two '
f'arguments, not {self._num_args}')
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float): The associated time
"""
if self._num_args == 1:
self._callback(field)
else:
self._callback(field, t)
class ProgressTracker(TrackerBase):
""" Tracker that shows the progress of the simulation """
name = 'progress'
def __init__(self, interval: IntervalData = None,
ndigits: int = 5, leave: bool = True):
"""
Args:
interval: |Arg_tracker_interval|
The default value `None` updates the progress bar approximately
every (real) second.
ndigits (int): The number of digits after the decimal point that are
shown maximally.
leave (bool): Whether to leave the progress bar after the simulation
has finished (default: True)
"""
if interval is None:
# print every second by default
interval = RealtimeIntervals(duration=1)
super().__init__(interval=interval)
self.ndigits = ndigits
self.leave = leave
def initialize(self, field: FieldBase, info: InfoDict = None) -> float:
""" initialize the tracker with information about the simulation
Args:
field (:class:`~pde.fields.FieldBase`):
An example of the data that will be analyzed by the tracker
info (dict):
Extra information from the simulation
Returns:
float: The first time the tracker needs to handle data
"""
result = super().initialize(field, info)
# get solver information
controller_info = {} if info is None else info.get('controller', {})
# initialize the progress bar
pb_cls = get_progress_bar_class()
self.progress_bar = pb_cls(total=controller_info.get('t_end'),
initial=controller_info.get('t_start', 0),
leave=self.leave)
self.progress_bar.set_description('Initializing')
return result
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float): The associated time
"""
# show an update
if self.progress_bar.total:
t_new = min(t, self.progress_bar.total)
else:
t_new = t
self.progress_bar.n = round(t_new, self.ndigits)
self.progress_bar.set_description('')
def finalize(self, info: InfoDict = None) -> None:
""" finalize the tracker, supplying additional information
Args:
info (dict):
Extra information from the simulation
"""
super().finalize(info)
self.progress_bar.set_description('')
# limit progress bar to 100%
controller_info = {} if info is None else info.get('controller', {})
t_final = controller_info.get('t_final', -np.inf)
t_end = controller_info.get('t_end', -np.inf)
if t_final >= t_end and self.progress_bar.total:
self.progress_bar.n = self.progress_bar.total
self.progress_bar.refresh()
if (controller_info.get('successful', False) and self.leave and
hasattr(self.progress_bar, 'sp')):
# show progress bar in green if simulation was successful. We
# need to overwrite the default behavior (and disable the
# progress bar) since reaching steady state means the simulation
# was successful even though it did not reach t_final
try:
self.progress_bar.sp(bar_style='success')
except TypeError:
self.progress_bar.close()
else:
self.disable = True
else:
self.progress_bar.close()
def __del__(self):
if hasattr(self, 'progress_bar') and not self.progress_bar.disable:
self.progress_bar.close()
class PrintTracker(TrackerBase):
""" Tracker that prints data to a stream (default: stdout) """
name = 'print'
def __init__(self, interval: IntervalData = 1,
stream: IO[str] = sys.stdout):
"""
Args:
interval: |Arg_tracker_interval|
stream: The stream used for printing
"""
super().__init__(interval=interval)
self.stream = stream
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float): The associated time
"""
data = f"c={field.data.mean():.3g}±{field.data.std():.3g}"
self.stream.write(f"t={t:g}, {data}\n")
self.stream.flush()
class PlotTracker(TrackerBase):
""" Tracker that plots data on screen, to files, or writes a movie """
name = 'plot'
def __init__(self, interval: IntervalData = 1,
output_file: Optional[str] = None,
output_folder: Optional[str] = None,
movie_file: Optional[str] = None,
quantities=None,
show: bool = True):
"""
Args:
interval: |Arg_tracker_interval|
output_file (str, optional):
Specifies a single image file, which is updated periodically, so
that the progress can be monitored (e.g. on a compute cluster)
output_folder (str, optional):
Specifies a folder to which all images are written. The files
will have names with increasing numbers.
movie_file (str, optional):
Specifies a filename to which a movie of all the frames is
written after the simulation.
quantities:
|Args_plot_quantities|
show (bool, optional):
Determines whether the plot is shown while the simulation is
running. If `False`, the files are created in the background.
"""
super().__init__(interval=interval)
self.output_file = output_file
self.output_folder = output_folder
self.quantities = quantities
self.show = show
if movie_file is not None or output_folder is not None:
from ..visualization.movies import Movie
movie = Movie(filename=movie_file, image_folder=output_folder)
self.movie: Optional[Movie] = movie
self.movie._start() # initialize movie
else:
self.movie = None
def initialize(self, field: FieldBase, info: InfoDict = None) -> float:
""" initialize the tracker with information about the simulation
Args:
field (:class:`~pde.fields.FieldBase`):
An example of the data that will be analyzed by the tracker
info (dict):
Extra information from the simulation
Returns:
float: The first time the tracker needs to handle data
"""
from ..visualization.plotting import ScalarFieldPlot
self.plot = ScalarFieldPlot(field, quantities=self.quantities,
show=self.show)
return super().initialize(field, info=info)
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float): The associated time
"""
self.plot.show_data(field, title=f'Time {t:g}')
if self.output_file:
self.plot.fig.savefig(self.output_file)
if self.movie:
self.movie.add_figure(self.plot.fig)
def finalize(self, info: InfoDict = None) -> None:
""" finalize the tracker, supplying additional information
Args:
info (dict):
Extra information from the simulation
"""
super().finalize(info)
if self.movie:
if self.movie.filename:
# write out movie file if requested
self._logger.info(f'Writing movie to {self.movie.filename}...')
self.movie.save()
# finalize movie (e.g. delete temporary files)
self.movie._end()
if not self.show:
del self.plot
class DataTracker(CallbackTracker):
""" Tracker that stores custom data obtained by calling a function
Attributes:
times (list):
The time points at which the data is stored
data (list):
The actually stored data, which is a list of the objects returned by
the callback function.
"""
def __init__(self, func: Callable,
interval: IntervalData = 1):
"""
Args:
func: The function to call periodically. The function signature
should be `(state)` or `(state, time)`, where `state` contains
the current state as an instance of
:class:`~pde.fields.FieldBase` and `time` is a
float value indicating the current time. Note that only a view
of the state is supplied, implying that a copy needs to be made
if the data should be stored.
interval: |Arg_tracker_interval|
"""
super().__init__(func=func, interval=interval)
self.times: List[float] = []
self.data: List[Any] = []
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float): The associated time
"""
self.times.append(t)
if self._num_args == 1:
self.data.append(self._callback(field))
else:
self.data.append(self._callback(field, t))
@property
def dataframe(self):
""" pandas.DataFrame: the data as a pandas DataFrame """
import pandas as pd
df = pd.DataFrame(self.data)
# insert the times and use them as an index
df.insert(0, 'time', self.times)
return df
class SteadyStateTracker(TrackerBase):
""" Tracker that interrupts the simulation once steady state is reached
Steady state is obtained when the state does not change anymore. This is the
case when the derivative is close to zero.
"""
name = 'steady_state'
def __init__(self, interval: IntervalData = None,
atol: float = 1e-8,
rtol: float = 1e-5):
"""
Args:
interval: |Arg_tracker_interval|
The default value `None` checks for the steady state
approximately every (real) second.
atol (float): Absolute tolerance that must be reached to abort the
simulation
rtol (float): Relative tolerance that must be reached to abort the
simulation
"""
if interval is None:
interval = RealtimeIntervals(duration=1)
super().__init__(interval=interval)
self.atol = atol
self.rtol = rtol
self._last_data = None
def handle(self, field: FieldBase, t: float) -> None:
""" handle the data of `field` for a give `time` """
if self._last_data is not None:
# scale with dt to make test independent of dt
atol = self.atol * self.interval.dt
rtol = self.rtol * self.interval.dt
if np.allclose(self._last_data, field.data,
rtol=rtol, atol=atol, equal_nan=True):
raise FinishedSimulation('Reached stationary state')
self._last_data = field.data.copy() # store data from last timestep
class RuntimeTracker(TrackerBase):
""" Tracker that interrupts the simulation once a duration has passed """
def __init__(self, max_runtime: Union[Real, str],
interval: IntervalData = 1):
"""
Args:
max_runtime (float or str):
The maximal runtime of the simulation. If the runtime is
exceeded, the simulation is interrupted. Values can be either
given as a number (interpreted as seconds) or as a string, which
is then parsed using the function
:func:`~pde.tools.parse_duration.parse_duration`.
interval: |Arg_tracker_interval|
"""
super().__init__(interval=interval)
try:
self.max_runtime = float(max_runtime)
except ValueError:
td = parse_duration(str(max_runtime))
self.max_runtime = td.total_seconds()
def initialize(self, field: FieldBase, info: InfoDict = None) -> float:
"""
Args:
field (:class:`~pde.fields.FieldBase`):
An example of the data that will be analyzed by the tracker
info (dict):
Extra information from the simulation
Returns:
float: The first time the tracker needs to handle data
"""
self.max_time = time.time() + self.max_runtime
return super().initialize(field, info)
def handle(self, field: FieldBase, t: float) -> None:
""" handle the data of `field` for a give `time` """
if time.time() > self.max_time:
dt = timedelta(seconds=self.max_runtime)
raise FinishedSimulation(f'Reached maximal runtime of {str(dt)}')
class ConsistencyTracker(TrackerBase):
""" Tracker that interrupts the simulation when the state is not finite """
name = 'consistency'
def __init__(self, interval: IntervalData = None):
"""
Args:
interval: |Arg_tracker_interval| The default value `None` checks for
consistency approximately every (real) second.
"""
if interval is None:
interval = RealtimeIntervals(duration=1)
super().__init__(interval=interval)
def handle(self, field: FieldBase, t: float) -> None:
""" handle the data of `field` for a give `time` """
if not np.all(np.isfinite(field.data)):
raise StopIteration('Field was not finite')
self._last = field.data.copy() # store data from last timestep
class MaterialConservationTracker(TrackerBase):
""" Ensure that the amount of material is conserved """
name = 'material_conservation'
def __init__(self, interval: IntervalData = 1,
atol: float = 1e-4,
rtol: float = 1e-4):
"""
Args:
interval: |Arg_tracker_interval|
atol (float): Absolute tolerance for amount deviations
rtol (float): Relative tolerance for amount deviations
"""
super().__init__(interval=interval)
self.atol = atol
self.rtol = rtol
def initialize(self, field: FieldBase, info: InfoDict = None) -> float:
"""
Args:
field (:class:`~pde.fields.base.FieldBase`):
An example of the data that will be analyzed by the tracker
info (dict):
Extra information from the simulation
Returns:
float: The first time the tracker needs to handle data
"""
if isinstance(field, FieldCollection):
self._reference = np.array([f.magnitude for f in field])
else:
self._reference = field.magnitude # type: ignore
return super().initialize(field, info)
def handle(self, field: FieldBase, t: float) -> None:
""" handle the data of `field` for a give `time` """
if isinstance(field, FieldCollection):
mags = np.array([f.magnitude for f in field])
else:
mags = field.magnitude # type: ignore
c = np.isclose(mags, self._reference, rtol=self.rtol, atol=self.atol)
if not np.all(c):
if isinstance(field, FieldCollection):
msg = f'Material of field {np.flatnonzero(~c)} is not conserved'
else:
msg = f'Material is not conserved'
raise StopIteration(msg)
__all__ = ['CallbackTracker', 'ProgressTracker', 'PrintTracker', 'PlotTracker',
'DataTracker', 'SteadyStateTracker', 'RuntimeTracker',
'ConsistencyTracker', 'MaterialConservationTracker']
|
import os
import shutil
from typing import List, Tuple
import unittest
from google.protobuf import json_format
from mir.protos import mir_command_pb2 as mirpb
from mir.tools import data_exporter, hash_utils, mir_storage_ops
from tests import utils as test_utils
class TestArkDataExporter(unittest.TestCase):
# life cycle
def __init__(self, methodName: str) -> None:
super().__init__(methodName=methodName)
self._test_root = test_utils.dir_test_root(self.id().split('.')[-3:])
self._assets_location = os.path.join(self._test_root, 'assets_location')
self._dest_root = os.path.join(self._test_root, 'export_dest')
self._mir_root = os.path.join(self._test_root, 'mir-repo')
def setUp(self) -> None:
self.__prepare_dirs()
self.__prepare_mir_repo()
self.__prepare_assets()
return super().setUp()
def tearDown(self) -> None:
# self.__deprepare_dirs()
return super().tearDown()
# private: prepare env
def __prepare_dirs(self):
test_utils.remake_dirs(self._test_root)
test_utils.remake_dirs(self._assets_location)
test_utils.remake_dirs(self._dest_root)
test_utils.remake_dirs(self._mir_root)
def __deprepare_dirs(self):
if os.path.isdir(self._test_root):
shutil.rmtree(self._test_root)
def __prepare_assets(self):
'''
copy all assets from project to assets_location, assumes that `self._assets_location` already created
'''
image_paths = ['tests/assets/2007_000032.jpg', 'tests/assets/2007_000243.jpg']
sha1sum_path_pairs = [(hash_utils.sha1sum_for_file(image_path), image_path)
for image_path in image_paths] # type: List[Tuple[str, str]]
for sha1sum, image_path in sha1sum_path_pairs:
shutil.copyfile(image_path, os.path.join(self._assets_location, sha1sum))
def __prepare_mir_repo(self):
'''
creates mir repo, assumes that `self._mir_root` already created
'''
test_utils.mir_repo_init(self._mir_root)
test_utils.mir_repo_create_branch(self._mir_root, 'a')
# metadatas
metadatas_dict = {
'attributes': {
'430df22960b0f369318705800139fcc8ec38a3e4': {
'assetType': 'AssetTypeImageJpeg',
'width': 500,
'height': 281,
'imageChannels': 3
},
'a3008c032eb11c8d9ffcb58208a36682ee40900f': {
'assetType': 'AssetTypeImageJpeg',
'width': 500,
'height': 333,
'imageChannels': 3
}
}
}
mir_metadatas = mirpb.MirMetadatas()
json_format.ParseDict(metadatas_dict, mir_metadatas)
# annotations
annotations_dict = {
'task_annotations': {
'a': {
'image_annotations': {
'430df22960b0f369318705800139fcc8ec38a3e4': {
'annotations': [{
'index': 0,
'box': {
'x': 104,
'y': 78,
'w': 272,
'h': 105
},
'class_id': 52,
'score': 1,
}, {
'index': 1,
'box': {
'x': 133,
'y': 88,
'w': 65,
'h': 36
},
'class_id': 52,
'score': 1,
}, {
'index': 2,
'box': {
'x': 195,
'y': 180,
'w': 19,
'h': 50
},
'class_id': 2,
'score': 1,
}, {
'index': 3,
'box': {
'x': 26,
'y': 189,
'w': 19,
'h': 95
},
'class_id': 2,
'score': 1,
}]
},
'a3008c032eb11c8d9ffcb58208a36682ee40900f': {
'annotations': [{
'index': 0,
'box': {
'x': 181,
'y': 127,
'w': 94,
'h': 67
},
'class_id': 52,
'score': 1,
}]
},
}
}
},
'head_task_id': 'a',
}
mir_annotations = mirpb.MirAnnotations()
json_format.ParseDict(annotations_dict, mir_annotations)
# keywords
keywords_dict = {
'keywords': {
'430df22960b0f369318705800139fcc8ec38a3e4': {
'predifined_keyids': [2, 52],
'customized_keywords': ['pascal']
},
'a3008c032eb11c8d9ffcb58208a36682ee40900f': {
'predifined_keyids': [52],
'customized_keywords': ['pascal']
},
}
}
mir_keywords = mirpb.MirKeywords()
json_format.ParseDict(keywords_dict, mir_keywords)
# task
task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData,
task_id='a',
message='import')
# save and commit
mir_storage_ops.MirStorageOps.save_and_commit(mir_root=self._mir_root,
mir_branch='a',
his_branch='master',
mir_datas={
mirpb.MirStorage.MIR_METADATAS: mir_metadatas,
mirpb.MirStorage.MIR_ANNOTATIONS: mir_annotations,
},
task=task)
# private: check result
def __check_result(self, asset_ids, format_type, export_path, index_file_path):
# check files
for asset_id in asset_ids:
asset_path = os.path.join(export_path, asset_id + '.jpeg')
self.assertTrue(os.path.isfile(asset_path))
if format_type == data_exporter.ExportFormat.EXPORT_FORMAT_ARK:
annotation_path = os.path.join(export_path, asset_id + '.txt')
elif format_type == data_exporter.ExportFormat.EXPORT_FORMAT_VOC:
annotation_path = os.path.join(export_path, asset_id + '.xml')
self.assertTrue(os.path.isfile(annotation_path))
# index file exists
self.assertTrue(os.path.isfile(index_file_path))
# index file have enough lines
# and each line is accessable
with open(index_file_path, 'r') as idx_f:
lines = idx_f.readlines()
self.assertEqual(len(lines), len(asset_ids))
for line in lines:
asset_rel_path, annotation_rel_path = line.split()
self.assertTrue(os.path.isfile(os.path.join(export_path, asset_rel_path)))
self.assertTrue(os.path.isfile(os.path.join(export_path, annotation_rel_path)))
def __check_ark_annotations(self, asset_id: str, export_path: str, expected_first_two_cols: List[Tuple[int, int]]):
annotation_path = os.path.join(export_path, asset_id + '.txt')
with open(annotation_path, 'r') as f:
lines = f.read().splitlines()
self.assertEqual(len(expected_first_two_cols), len(lines))
for line_idx, line in enumerate(lines):
line_components = line.split(',')
for col_idx in range(2):
self.assertEqual(expected_first_two_cols[line_idx][col_idx], int(line_components[col_idx].strip()))
# public: test cases
def test_normal_00(self):
''' normal case: ark format '''
asset_ids = {'430df22960b0f369318705800139fcc8ec38a3e4', 'a3008c032eb11c8d9ffcb58208a36682ee40900f'}
train_path = os.path.join(self._dest_root, 'train')
data_exporter.export(mir_root=self._mir_root,
assets_location=self._assets_location,
class_type_ids={
2: 0,
52: 1
},
asset_ids=asset_ids,
asset_dir=train_path,
annotation_dir=train_path,
need_ext=True,
need_id_sub_folder=False,
base_branch='a',
base_task_id='a',
format_type=data_exporter.ExportFormat.EXPORT_FORMAT_ARK,
index_file_path=os.path.join(train_path, 'index.tsv'),
index_assets_prefix='')
# check result
self.__check_result(asset_ids=asset_ids,
format_type=data_exporter.ExportFormat.EXPORT_FORMAT_ARK,
export_path=train_path,
index_file_path=os.path.join(train_path, 'index.tsv'))
self.__check_ark_annotations(asset_id='430df22960b0f369318705800139fcc8ec38a3e4',
export_path=train_path,
expected_first_two_cols=[(1, 104), (1, 133), (0, 195), (0, 26)])
def test_normal_01(self):
''' normal case: voc format '''
asset_ids = {'430df22960b0f369318705800139fcc8ec38a3e4', 'a3008c032eb11c8d9ffcb58208a36682ee40900f'}
train_path = os.path.join(self._dest_root, 'train')
data_exporter.export(mir_root=self._mir_root,
assets_location=self._assets_location,
class_type_ids={
2: 0,
52: 1
},
asset_ids=asset_ids,
asset_dir=train_path,
annotation_dir=train_path,
need_ext=True,
need_id_sub_folder=False,
base_branch='a',
base_task_id='a',
format_type=data_exporter.ExportFormat.EXPORT_FORMAT_VOC,
index_file_path=os.path.join(train_path, 'index.tsv'),
index_assets_prefix='')
# check result
self.__check_result(asset_ids=asset_ids,
format_type=data_exporter.ExportFormat.EXPORT_FORMAT_VOC,
export_path=train_path,
index_file_path=os.path.join(train_path, 'index.tsv'))
|
# apis_v1/documentation_source/sitewide_daily_metrics_sync_out_doc.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
def sitewide_daily_metrics_sync_out_doc_template_values(url_root):
"""
Show documentation about sitewideDailyMetricsSyncOut
"""
required_query_parameter_list = [
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server. '
'If not provided, a new voter_device_id (and voter entry) '
'will be generated, and the voter_device_id will be returned.',
},
]
optional_query_parameter_list = [
{
'name': 'starting_date_as_integer',
'value': 'integer', # boolean, integer, long, string
'description': 'The earliest date for the batch we are retrieving. Format: YYYYMMDD (ex/ 20200131) '
'(Default is 3 months ago)',
},
{
'name': 'ending_date_as_integer',
'value': 'integer', # boolean, integer, long, string
'description': 'Retrieve data through this date. Format: YYYYMMDD (ex/ 20200228) (Default is right now.)'
},
{
'name': 'return_csv_format',
'value': 'boolean', # boolean, integer, long, string
'description': 'If set to true, return results in CSV format instead of JSON.'
},
]
potential_status_codes_list = [
]
try_now_link_variables_dict = {
}
api_response = '[{\n' \
' "id": integer,\n' \
' "authenticated_visitors_today": integer,\n' \
' "authenticated_visitors_total": integer,\n' \
' "ballot_views_today": integer: ' \
'The number of voters that viewed at least one ballot on one day,\n' \
' "date_as_integer": integer,\n' \
' "entered_full_address": integer,\n' \
' "friend_entrants_today": integer,\n' \
' "friends_only_positions": integer,\n' \
' "individuals_with_friends_only_positions": integer,\n' \
' "individuals_with_positions": integer,\n' \
' "individuals_with_public_positions": integer,\n' \
' "issue_follows_today": integer,\n' \
' "issue_follows_total": integer,\n' \
' "issues_followed_today": integer,\n' \
' "issues_followed_total": integer,\n' \
' "issues_linked_today": integer,\n' \
' "issues_linked_total": integer,\n' \
' "new_visitors_today": integer,\n' \
' "organization_public_positions": integer,\n' \
' "organizations_auto_followed_today": integer,\n' \
' "organizations_auto_followed_total": integer,\n' \
' "organizations_followed_today": integer,\n' \
' "organizations_followed_total": integer,\n' \
' "organizations_signed_in_total": integer,\n' \
' "organizations_with_linked_issues": integer,\n' \
' "organizations_with_new_positions_today": integer,\n' \
' "organizations_with_positions": integer,\n' \
' "visitors_today": integer,\n' \
' "visitors_total": integer,\n' \
' "voter_guide_entrants_today": integer,\n' \
' "voter_guides_viewed_today": integer,\n' \
' "voter_guides_viewed_total": integer,\n' \
' "welcome_page_entrants_today": integer,\n' \
'}]'
template_values = {
'api_name': 'sitewideDailyMetricsSyncOut',
'api_slug': 'sitewideDailyMetricsSyncOut',
'api_introduction':
"Allow people with Analytics Admin authority to retrieve daily metrics information "
"for data analysis purposes.",
'try_now_link': 'apis_v1:sitewideDailyMetricsSyncOutView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
|
from django.urls import path
from . import views
from .views import SearchResultsView, HomePageView
urlpatterns = [
path('', views.index, name='index'),
# path('books/', views.BookListView.as_view(), name='books'),
path('search/', SearchResultsView.as_view(), name='search_results'),
path('home/', HomePageView.as_view(),name='home'),
# path('author_search/', AuthorSearchResultsView.as_view(), name='author_search_results'),
]
|
#!/usr/bin/env python
try:
from lxml import etree
except ImportError:
try:
import xml.etree.ElementTree as etree
except ImportError:
#try:
# import xml.etree.cElementTree as etree
# commented out because xml.etree.cElementTree is giving errors with dictionary attributes
print("Failed to import ElementTree from any known place")
import numpy as np
from pgmpy.models import BayesianModel
from pgmpy.factors import TabularCPD, State
from pgmpy.extern.six.moves import map, range
class XMLBIFReader(object):
"""
Base class for reading network file in XMLBIF format.
"""
def __init__(self, path=None, string=None):
"""
Initialisation of XMLBIFReader object.
Parameters
----------
path : file or str
File of XMLBIF data
string : str
String of XMLBIF data
Examples
--------
# xmlbif_test.xml is the file present in
# http://www.cs.cmu.edu/~fgcozman/Research/InterchangeFormat/
>>> reader = XMLBIFReader("xmlbif_test.xml")
"""
if path:
self.network = etree.ElementTree(file=path).getroot().find('NETWORK')
elif string:
self.network = etree.fromstring(string).find('NETWORK')
else:
raise ValueError("Must specify either path or string")
self.network_name = self.network.find('NAME').text
self.variables = self.get_variables()
self.variable_parents = self.get_parents()
self.edge_list = self.get_edges()
self.variable_states = self.get_states()
self.variable_CPD = self.get_cpd()
self.variable_property = self.get_property()
def get_variables(self):
"""
Returns list of variables of the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_variables()
['light-on', 'bowel-problem', 'dog-out', 'hear-bark', 'family-out']
"""
variables = [variable.find('NAME').text for variable in self.network.findall('VARIABLE')]
return variables
def get_edges(self):
"""
Returns the edges of the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_edges()
[['family-out', 'light-on'],
['family-out', 'dog-out'],
['bowel-problem', 'dog-out'],
['dog-out', 'hear-bark']]
"""
edge_list = [[value, key] for key in self.variable_parents
for value in self.variable_parents[key]]
return edge_list
def get_states(self):
"""
Returns the states of variables present in the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_states()
{'bowel-problem': ['true', 'false'],
'dog-out': ['true', 'false'],
'family-out': ['true', 'false'],
'hear-bark': ['true', 'false'],
'light-on': ['true', 'false']}
"""
variable_states = {variable.find('NAME').text: [outcome.text for outcome in variable.findall('OUTCOME')]
for variable in self.network.findall('VARIABLE')}
return variable_states
def get_parents(self):
"""
Returns the parents of the variables present in the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_parents()
{'bowel-problem': [],
'dog-out': ['family-out', 'bowel-problem'],
'family-out': [],
'hear-bark': ['dog-out'],
'light-on': ['family-out']}
"""
variable_parents = {definition.find('FOR').text: [edge.text for edge in definition.findall('GIVEN')][::-1]
for definition in self.network.findall('DEFINITION')}
return variable_parents
def get_cpd(self):
"""
Returns the CPD of the variables present in the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_cpd()
{'bowel-problem': array([[ 0.01],
[ 0.99]]),
'dog-out': array([[ 0.99, 0.01, 0.97, 0.03],
[ 0.9 , 0.1 , 0.3 , 0.7 ]]),
'family-out': array([[ 0.15],
[ 0.85]]),
'hear-bark': array([[ 0.7 , 0.3 ],
[ 0.01, 0.99]]),
'light-on': array([[ 0.6 , 0.4 ],
[ 0.05, 0.95]])}
"""
variable_CPD = {definition.find('FOR').text: list(map(float, table.text.split()))
for definition in self.network.findall('DEFINITION')
for table in definition.findall('TABLE')}
for variable in variable_CPD:
arr = np.array(variable_CPD[variable])
arr = arr.reshape((len(self.variable_states[variable]),
arr.size//len(self.variable_states[variable])))
variable_CPD[variable] = arr
return variable_CPD
def get_property(self):
"""
Returns the property of the variable
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_property()
{'bowel-problem': ['position = (190, 69)'],
'dog-out': ['position = (155, 165)'],
'family-out': ['position = (112, 69)'],
'hear-bark': ['position = (154, 241)'],
'light-on': ['position = (73, 165)']}
"""
variable_property = {variable.find('NAME').text: [property.text for property in variable.findall('PROPERTY')]
for variable in self.network.findall('VARIABLE')}
return variable_property
def get_model(self):
model = BayesianModel(self.get_edges())
model.name = self.network_name
tabular_cpds = []
for var, values in self.variable_CPD.items():
cpd = TabularCPD(var, len(self.variable_states[var]), values,
evidence=self.variable_parents[var],
evidence_card=[len(self.variable_states[evidence_var])
for evidence_var in self.variable_parents[var]])
tabular_cpds.append(cpd)
model.add_cpds(*tabular_cpds)
for node, properties in self.variable_property.items():
for prop in properties:
prop_name, prop_value = map(lambda t: t.strip(), prop.split('='))
model.node[node][prop_name] = prop_value
return model
class XMLBIFWriter(object):
"""
Base class for writing XMLBIF network file format.
"""
def __init__(self, model, encoding='utf-8', prettyprint=True):
"""
Initialise a XMLBIFWriter object.
Parameters
----------
model: BayesianModel Instance
Model to write
encoding: str (optional)
Encoding for text data
prettyprint: Bool(optional)
Indentation in output XML if true
Examples
--------
>>> writer = XMLBIFWriter(model)
"""
if not isinstance(model, BayesianModel):
raise TypeError("model must an instance of BayesianModel")
self.model = model
self.encoding = encoding
self.prettyprint = prettyprint
self.xml = etree.Element("BIF", attrib={'version': '0.3'})
self.network = etree.SubElement(self.xml, 'NETWORK')
if self.model.name:
etree.SubElement(self.network, 'NAME').text = self.model.name
self.variables = self.get_variables()
self.states = self.get_states()
self.properties = self.get_properties()
self.definition = self.get_definition()
self.tables = self.get_cpd()
def __str__(self):
"""
Return the XML as string.
"""
if self.prettyprint:
self.indent(self.xml)
return etree.tostring(self.xml, encoding=self.encoding)
def indent(self, elem, level=0):
"""
Inplace prettyprint formatter.
"""
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def get_variables(self):
"""
Add variables to XMLBIF
Return
------
dict: dict of type {variable: variable tags}
Examples
--------
>>> writer = XMLBIFWriter(model)
>>> writer.get_variables()
{'bowel-problem': <Element VARIABLE at 0x7fe28607dd88>,
'family-out': <Element VARIABLE at 0x7fe28607de08>,
'hear-bark': <Element VARIABLE at 0x7fe28607de48>,
'dog-out': <Element VARIABLE at 0x7fe28607ddc8>,
'light-on': <Element VARIABLE at 0x7fe28607de88>}
"""
variables = self.model.nodes()
variable_tag = {}
for var in sorted(variables):
variable_tag[var] = etree.SubElement(self.network, "VARIABLE", attrib={'TYPE': 'nature'})
etree.SubElement(variable_tag[var], "NAME").text = var
return variable_tag
def get_states(self):
"""
Add outcome to variables of XMLBIF
Return
------
dict: dict of type {variable: outcome tags}
Examples
--------
>>> writer = XMLBIFWriter(model)
>>> writer.get_states()
{'dog-out': [<Element OUTCOME at 0x7ffbabfcdec8>, <Element OUTCOME at 0x7ffbabfcdf08>],
'family-out': [<Element OUTCOME at 0x7ffbabfd4108>, <Element OUTCOME at 0x7ffbabfd4148>],
'bowel-problem': [<Element OUTCOME at 0x7ffbabfd4088>, <Element OUTCOME at 0x7ffbabfd40c8>],
'hear-bark': [<Element OUTCOME at 0x7ffbabfcdf48>, <Element OUTCOME at 0x7ffbabfcdf88>],
'light-on': [<Element OUTCOME at 0x7ffbabfcdfc8>, <Element OUTCOME at 0x7ffbabfd4048>]}
"""
outcome_tag = {}
cpds = self.model.get_cpds()
for cpd in cpds:
var = cpd.variable
outcome_tag[var] = []
for state in [State(var, state) for state in range(cpd.get_cardinality([var])[var])]:
# for state in [cpd.variables[var]:
state_tag = etree.SubElement(self.variables[var], "OUTCOME")
state_tag.text = str(state.state)
outcome_tag[var].append(state_tag)
return outcome_tag
def get_properties(self):
"""
Add property to variables in XMLBIF
Return
------
dict: dict of type {variable: property tag}
Examples
--------
>>> writer = XMLBIFWriter(model)
>>> writer.get_property()
{'light-on': <Element PROPERTY at 0x7f7a2ffac1c8>,
'family-out': <Element PROPERTY at 0x7f7a2ffac148>,
'hear-bark': <Element PROPERTY at 0x7f7a2ffac188>,
'bowel-problem': <Element PROPERTY at 0x7f7a2ffac0c8>,
'dog-out': <Element PROPERTY at 0x7f7a2ffac108>}
"""
variables = self.model.nodes()
property_tag = {}
for var in sorted(variables):
properties = self.model.node[var]
property_tag[var] = etree.SubElement(self.variables[var], "PROPERTY")
for prop, val in properties.items():
property_tag[var].text = str(prop) + " = " + str(val)
return property_tag
def get_definition(self):
"""
Add Definition to XMLBIF
Return
------
dict: dict of type {variable: definition tag}
Examples
--------
>>> writer = XMLBIFWriter(model)
>>> writer.get_definition()
{'hear-bark': <Element DEFINITION at 0x7f1d48977408>,
'family-out': <Element DEFINITION at 0x7f1d489773c8>,
'dog-out': <Element DEFINITION at 0x7f1d48977388>,
'bowel-problem': <Element DEFINITION at 0x7f1d48977348>,
'light-on': <Element DEFINITION at 0x7f1d48977448>}
"""
cpds = self.model.get_cpds()
cpds.sort(key=lambda x: x.variable)
definition_tag = {}
for cpd in cpds:
definition_tag[cpd.variable] = etree.SubElement(self.network, "DEFINITION")
etree.SubElement(definition_tag[cpd.variable], "FOR").text = cpd.variable
for child in sorted([] if cpd.evidence is None else cpd.evidence):
etree.SubElement(definition_tag[cpd.variable], "GIVEN").text = child
return definition_tag
def get_cpd(self):
"""
Add Table to XMLBIF.
Return
---------------
dict: dict of type {variable: table tag}
Examples
-------
>>> writer = XMLBIFWriter(model)
>>> writer.get_cpd()
{'dog-out': <Element TABLE at 0x7f240726f3c8>,
'light-on': <Element TABLE at 0x7f240726f488>,
'bowel-problem': <Element TABLE at 0x7f240726f388>,
'family-out': <Element TABLE at 0x7f240726f408>,
'hear-bark': <Element TABLE at 0x7f240726f448>}
"""
cpds = self.model.get_cpds()
definition_tag = self.definition
table_tag = {}
for cpd in cpds:
table_tag[cpd.variable] = etree.SubElement(definition_tag[cpd.variable], "TABLE")
table_tag[cpd.variable].text = ''
for val in cpd.values.ravel():
table_tag[cpd.variable].text += str(val) + ' '
return table_tag
def write_xmlbif(self, filename):
"""
Write the xml data into the file.
Parameters
----------
filename: Name of the file.
Examples
-------
>>> writer = XMLBIFWriter(model)
>>> writer.write_xmlbif(test_file)
"""
writer = self.__str__()[:-1].decode('utf-8')
with open(filename, 'w') as fout:
fout.write(writer)
|
from pyredis import RedisConnection
from pprint import pprint
# 1. Object Creation
# pass everything you would pass to redis.Redis()
redis_args = {
'host': 'localhost',
# 'password': 'redis1234',
# 'port': 1234,
}
with RedisConnection(**redis_args) as my_redis:
my_redis.set('key', 'value')
# 2. Redis Get and Set
# redis set
with RedisConnection(**redis_args) as my_redis:
my_redis.set('a_sting', 'my_sting value')
my_redis.set('a_list', [1, 4, 3, 2])
my_redis.set('a_dict', {'key_1': 'val_1', 'key_2': 'val_2'})
# redis get
with RedisConnection(**redis_args) as my_redis:
data = my_redis.get('a_dict')
# data is already converted to a dict
print(type(data))
# 3. Handle Lists and Dicts
# get multiple keys / data
with RedisConnection(**redis_args) as my_redis:
# get all keys that start with a_
pattern = 'a_'
keys = my_redis.get_key_pattern(pattern)
print(f"list of all keys that start with {pattern}: {keys}")
data = my_redis.get_data_for_keys(keys)
print(f"data of all keys that start with {pattern}: {data}")
# or retrieve the data as a key: data dictionary for a specific pattern
print('data as key: data dictionary for a pattern:')
data = my_redis.get_keys('a_')
pprint(data)
# set all entries of a dictionary to redis
data = {'a': 12, 'b': 'myvalue'}
with RedisConnection(**redis_args) as my_redis:
# yo can continue working with the keys
keys = my_redis.set_dict(data)
print(my_redis.get('a'))
print(my_redis.get(keys[1]))
# 4. Fallback
# or work directly on the redis.Redis() object as you would with the official package
# by using the RedisConnection.R attribute
with RedisConnection(**redis_args) as my_redis:
print('access redis client through object...')
print(my_redis.R.get('a_dict'))
|
'''
Created on Jul 17, 2013
@author: Yubin Bai
'''
import time
from multiprocessing.pool import Pool
parallelSolve = False
INF = 1 << 31
def solve(par):
M, pairs = par
pairs.sort()
pairs1 = []
for p in pairs:
if p[0] >= M or p[1] <= 0:
continue
pairs1.append(tuple(p))
if not pairs1:
return 0
pairs = [pairs1[0]]
left, right = pairs1[0]
for p in pairs1:
p1 = pairs[-1]
if p[0] == p1[0] and p[1] > p[0]:
pairs.pop()
pairs.append(p)
if p[1] > right:
pairs.append(p)
right = p[1]
if right < M:
return 0
return '\n'.join('%d %d' % (e[0], e[1]) for e in pairs)
class Solver:
def getInput(self):
self.numOfTests = int(self.fIn.readline())
self.input = []
for itertest in range(self.numOfTests):
line = self.fIn.readline().strip()
M = int(self.fIn.readline())
pairs = []
while True:
pair = map(int, self.fIn.readline().split())
if pair[0] == 0 and pair[1] == 0:
break
pairs.append(pair)
self.input.append((M, pairs))
def __init__(self):
self.fIn = open('input.txt')
self.fOut = open('output.txt', 'w')
self.results = []
def parallel(self):
self.getInput()
p = Pool(4)
millis1 = int(round(time.time() * 1000))
self.results = p.map(solve, self.input)
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def sequential(self):
self.getInput()
millis1 = int(round(time.time() * 1000))
for i in self.input:
self.results.append(solve(i))
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def makeOutput(self):
for test in range(self.numOfTests):
self.fOut.write("%s\n\n" % self.results[test])
self.fIn.close()
self.fOut.close()
if __name__ == '__main__':
solver = Solver()
if parallelSolve:
solver.parallel()
else:
solver.sequential()
|
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = []
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
# Compare the rendered images between import and export, and if passed,
# compare images against reference test
if ( self.__assistant.CompareRenderedImages(context) ):
self.__assistant.CompareImagesAgainst(context, "_reference_oneImage")
self.status_baseline = self.__assistant.DeferJudgement(context)
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
|
from typing import Tuple
from codegen.ast import *
from codegen.sugar import *
from codegen.forms import *
from codegen.precision import *
import scripts.old_arm
import scripts.max_bn_knl
from cursors import *
import architecture
import numpy
def decompose_pattern(k, n, pattern:Matrix[bool], bk:int, bn:int) -> Tuple[Matrix[int], List[Matrix[bool]]]:
Bk,Bn = k//bk, n//bn
patterns = []
x = 0
n_overhead = n % bn
k_overhead = k % bk
if n_overhead > 0:
Bn += 1
if k_overhead > 0:
Bk += 1
blocks = Matrix.full(Bk,Bn,-1)
for Bni in range(Bn):
for Bki in range(Bk):
if Bni + 1 == Bn and n_overhead > 0 and Bki + 1 == Bk and k_overhead > 0:
block = pattern[(Bki*bk):((Bki+1)*bk+k_overhead), (Bni*bn):((Bni)*bn+n_overhead)]
elif Bni + 1 == Bn and n_overhead > 0:
block = pattern[(Bki*bk):((Bki+1)*bk), (Bni*bn):((Bni)*bn+n_overhead)]
elif Bki + 1 == Bk and k_overhead > 0:
block = pattern[(Bki*bk):((Bki+1)*bk+k_overhead), (Bni*bn):((Bni+1)*bn)]
else:
block = pattern[(Bki*bk):((Bki+1)*bk), (Bni*bn):((Bni+1)*bn)]
blocks[Bki,Bni] = x
x += 1
patterns.append(block)
mtx_overhead = [0] * n
for i in range(n):
for j in range(k, pattern.rows):
if pattern[j, i]:
mtx_overhead[i] += 1
return blocks, patterns, mtx_overhead
class MatMul:
def __init__(self,
m: int,
n: int,
k: int,
lda: int,
ldb: int,
ldc: int,
alpha: str,
beta: str,
mtx_filename: str,
mtx_format: str = 'any',
output_funcname: str = None,
output_filename: str = None,
output_overwrite: bool = False,
bm: int = None,
bn: int = None,
bk: int = None,
arch: str = 'knl',
precision: str = 'd',
prefetching: str = None,
**kwargs # Accept and ignore args which don't belong
) -> None:
self.m = m
self.n = n
self.k = k
self.lda = lda
self.ldb = ldb
self.ldc = ldc
try:
self.alpha = float(alpha)
except:
self.alpha = 'generic'
try:
self.beta = float(beta)
except:
self.beta = 'generic'
if arch == 'skx':
arch = 'knl'
self.arch = arch
assert precision.lower() in ['s', 'd']
self.precision = Precision.DOUBLE if precision.lower() == 'd' else Precision.SINGLE
architecture.init()
architecture.arch = arch
architecture.Generator = architecture.get_class("codegen.architectures." + arch + ".generator.Generator")
architecture.operands = architecture.get_class("codegen.architectures." + arch + ".operands")
self.generator = architecture.Generator(self.precision)
self.v_size = self.generator.get_v_size()
if bk == None:
bk = 2 if arch == 'knl' else 1
if bm == None or bn == None:
if arch == 'knl':
(self.bm, self.bn) = scripts.max_bn_knl.getBlocksize(m, n, bk, self.v_size)
elif arch == 'arm':
(self.bm, self.bn) = scripts.old_arm.getBlocksize(m, n, bk, self.v_size)
else:
self.bm = bm
self.bn = bn
self.bk = bk
self.prefetching = prefetching
self.mtx_filename = mtx_filename
self.mtx_format = mtx_format
self.output_funcname = output_funcname
self.output_filename = output_filename
self.output_overwrite = output_overwrite
if ldb == 0:
pattern = Matrix.load(mtx_filename)
else:
mtx = numpy.zeros((k, n))
for i in range(k):
for j in range(n):
mtx[i, j] = 1
pattern = Matrix(mtx)
blocks,patterns,mtx_overhead = decompose_pattern(self.k, self.n, pattern, self.bk, self.bn)
self.nnz = 0
self.flop = 0
if ldb == 0:
for i in range(n):
for j in range(k):
if pattern[j,i]:
self.nnz += 1
self.flop = self.nnz * m * 2
self.nnz += sum(mtx_overhead)
else:
self.nnz = ldb * self.n
self.flop = m * n * k * 2
prefetchReg = self.generator.init_prefetching(self.prefetching)
assert(self.m % self.v_size == 0)
self.A_regs, self.B_regs, self.C_regs, self.starting_regs, self.alpha_reg, self.beta_reg, self.loop_reg, self.additional_regs = self.generator.make_reg_blocks(self.bm, self.bn, self.bk, self.v_size, self.nnz, self.m, self.n, self.k)
self.A = DenseCursor("A", self.starting_regs[0], self.m, self.k, self.lda, self.bm, self.bk, self.precision.value)
self.B = BlockCursor("B", self.starting_regs[1], self.k, self.n, self.ldb, self.bk, self.bn, self.precision.value, blocks, patterns,mtx_overhead)
self.C = DenseCursor("C", self.starting_regs[2], self.m, self.n, self.ldc, self.bm, self.bn, self.precision.value)
self.C_pf = DenseCursor("C_pf", prefetchReg, self.m, self.n, self.ldc, self.bm, self.bn, self.precision.value) if prefetchReg else None
def make_nk_unroll(self):
asm = block("Unrolling over bn and bk")
A_ptr = CursorLocation()
B_ptr = self.B.start()
C_ptr = CursorLocation()
C_pf_ptr = CursorLocation()
Bn = self.n // self.bn
Bk = self.k // self.bk
vm = self.bm // self.v_size
n_overhead = self.n % self.bn
k_overhead = self.k % self.bk
if n_overhead > 0:
Bn += 1
if k_overhead > 0:
Bk += 1
asm.add(self.generator.make_b_pointers(self.starting_regs[1], self.additional_regs, self.nnz))
for Bni in range(0,Bn):
regs = self.C_regs
if Bni + 1 == Bn and n_overhead > 0:
regs = self.C_regs[0:vm, 0:n_overhead]
if self.alpha == 1.0 and self.beta != 0.0:
asm.add(self.generator.move_register_block(self.C, C_ptr, Coords(), regs, self.v_size, self.additional_regs, None, False))
if self.beta != 1.0:
for ic in range(regs.shape[1]):
for ir in range(regs.shape[0]):
asm.add(mul(regs[ir,ic], self.beta_reg[1], regs[ir,ic]))
else:
asm.add(self.generator.make_zero_block(regs, self.additional_regs))
for Bki in range(0,Bk):
to_A = Coords(right=Bki)
to_B = Coords(right=Bni, down=Bki, absolute=True)
if self.B.has_nonzero_block(B_ptr, to_B):
asm.add(self.generator.make_microkernel(self.A, self.B, A_ptr, B_ptr, self.A_regs, self.B_regs, regs, self.v_size, self.additional_regs, to_A, to_B))
if self.alpha != 1.0:
store_block = block("")
for x in range(0, regs.shape[1], self.A_regs.shape[1]):
A_regs_cut = self.A_regs[0:min(self.A_regs.shape[0], regs.shape[0]), 0:regs.shape[1]-x]
if self.beta != 0.0:
store_block.add(self.generator.move_register_block(self.C, C_ptr, Coords(), A_regs_cut, self.v_size, self.additional_regs, None, False, None, self.ldc * x))
for ir in range(A_regs_cut.shape[0]):
for ic in range(A_regs_cut.shape[1]):
if self.beta != 0.0 and self.beta != 1.0:
store_block.add(mul(A_regs_cut[ir,ic], self.beta_reg[1], A_regs_cut[ir,ic]))
if self.beta == 0.0:
store_block.add(mul(regs[ir, x + ic], self.alpha_reg[1], A_regs_cut[ir, ic], "C = C + alpha * AB"))
else:
store_block.add(fma(regs[ir, x + ic], self.alpha_reg[1], A_regs_cut[ir, ic], "C = C + alpha * AB", False))
store_block.add(self.generator.move_register_block(self.C, C_ptr, Coords(), A_regs_cut, self.v_size, self.additional_regs, None, True, self.prefetching, self.ldc * x))
asm.add(store_block)
else:
asm.add(self.generator.move_register_block(self.C, C_ptr, Coords(), regs, self.v_size, self.additional_regs, None, True, self.prefetching))
if (Bni != Bn-1):
move_C, C_ptr = self.C.move(C_ptr, Coords(right=1))
asm.add(move_C)
if self.C_pf:
move_C_pf, C_pf_ptr = self.C_pf.move(C_pf_ptr, Coords(right=1))
asm.add(move_C_pf)
return asm
def make(self):
A_ptr = CursorLocation()
C_ptr = CursorLocation()
C_pf_ptr = CursorLocation()
Bm = self.m // self.bm
Bn = self.n // self.bn
Bk = self.k // self.bk
if self.n % self.bn != 0:
Bn += 1
loopBody = [
self.make_nk_unroll(),
self.A.move(A_ptr, Coords(down=1))[0],
self.C.move(C_ptr, Coords(down=1, right=1-Bn))[0]
]
if self.C_pf:
loopBody.append(self.C_pf.move(C_pf_ptr, Coords(down=1, right=1-Bn))[0])
asm = block("unrolled_{}x{}x{}".format(self.m,self.n,self.k),
self.generator.bcst_alpha_beta(self.alpha_reg, self.beta_reg),
self.generator.make_scaling_offsets(self.additional_regs, self.nnz),
loop(self.loop_reg, 0, Bm, 1).body(*loopBody)
)
vm_overhead = (self.m % self.bm) // self.v_size
if vm_overhead > 0:
self.m = self.m % self.bm
self.bm = self.m % self.bm
self.A_regs = self.A_regs[0:self.bm // self.v_size, 0:self.bk]
self.C_regs = self.C_regs[0:self.bm // self.v_size, 0:self.bn]
self.A.r = self.m
asm.add(self.make_nk_unroll())
return asm
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from azure.communication.administration.aio import CommunicationIdentityClient
from azure_devtools.scenario_tests import RecordingProcessor
from devtools_testutils import ResourceGroupPreparer
from _shared.helper import URIIdentityReplacer
from _shared.asynctestcase import AsyncCommunicationTestCase
from _shared.testcase import BodyReplacerProcessor
from _shared.communication_service_preparer import CommunicationServicePreparer
class CommunicationIdentityClientTestAsync(AsyncCommunicationTestCase):
def setUp(self):
super(CommunicationIdentityClientTestAsync, self).setUp()
self.recording_processors.extend([
BodyReplacerProcessor(keys=["id", "token"]),
URIIdentityReplacer()])
@ResourceGroupPreparer(random_name_enabled=True)
@CommunicationServicePreparer()
@pytest.mark.live_test_only
@pytest.mark.asyncio
@AsyncCommunicationTestCase.await_prepared_test
async def test_create_user(self, connection_string):
identity_client = CommunicationIdentityClient.from_connection_string(connection_string)
async with identity_client:
user = await identity_client.create_user()
assert user.identifier is not None
@ResourceGroupPreparer(random_name_enabled=True)
@CommunicationServicePreparer()
@pytest.mark.live_test_only
@pytest.mark.asyncio
@AsyncCommunicationTestCase.await_prepared_test
async def test_issue_token(self, connection_string):
identity_client = CommunicationIdentityClient.from_connection_string(connection_string)
async with identity_client:
user = await identity_client.create_user()
token_response = await identity_client.issue_token(user, scopes=["chat"])
assert user.identifier is not None
assert token_response.token is not None
@ResourceGroupPreparer(random_name_enabled=True)
@CommunicationServicePreparer()
@pytest.mark.live_test_only
@pytest.mark.asyncio
@AsyncCommunicationTestCase.await_prepared_test
async def test_revoke_tokens(self, connection_string):
identity_client = CommunicationIdentityClient.from_connection_string(connection_string)
async with identity_client:
user = await identity_client.create_user()
token_response = await identity_client.issue_token(user, scopes=["chat"])
await identity_client.revoke_tokens(user)
assert user.identifier is not None
assert token_response.token is not None
@ResourceGroupPreparer(random_name_enabled=True)
@CommunicationServicePreparer()
@pytest.mark.live_test_only
@pytest.mark.asyncio
@AsyncCommunicationTestCase.await_prepared_test
async def test_delete_user(self, connection_string):
identity_client = CommunicationIdentityClient.from_connection_string(connection_string)
async with identity_client:
user = await identity_client.create_user()
await identity_client.delete_user(user)
assert user.identifier is not None
|
from moviepy.editor import *
from os import chdir, getcwd, mkdir
from random import randint
import sys
import requests
from concurrent.futures import ThreadPoolExecutor
from requests import get, head
import time
# 自定义
THREAD_NUM=12 # 线程数,默认为12个
HEADER=" "# 请求头,默认为一个空格
class downloader:
def __init__(self, url, num, name):
self.url = url
self.num = num
self.name = name
self.getsize = 0
r = head(self.url, allow_redirects=True)
self.size = int(r.headers['Content-Length'])
def down(self, start, end, chunk_size=10240):
headers = {'range': f'bytes={start}-{end}'}
r = get(self.url, headers=headers, stream=True)
with open(self.name, "rb+") as f:
f.seek(start)
for chunk in r.iter_content(chunk_size):
f.write(chunk)
self.getsize += chunk_size
def main(self):
start_time = time.time()
f = open(self.name, 'wb')
f.truncate(self.size)
f.close()
tp = ThreadPoolExecutor(max_workers=self.num)
futures = []
start = 0
for i in range(self.num):
end = int((i+1)/self.num*self.size)
future = tp.submit(self.down, start, end)
futures.append(future)
start = end+1
while True:
process = self.getsize/self.size*100
last = self.getsize
time.sleep(1)
curr = self.getsize
down = (curr-last)/1024
if down > 1024:
speed = f'{down/1024:6.2f}MB/s'
else:
speed = f'{down:6.2f}KB/s'
print(f'process: {process:6.2f}% | speed: {speed}', end='\r')
if process >= 100:
print(f'process: {100.00:6}% | speed: 00.00KB/s', end=' | ')
break
tp.shutdown()
end_time = time.time()
total_time = end_time-start_time
average_speed = self.size/total_time/1024/1024
print(
f'total-time: {total_time:.0f}s | average-speed: {average_speed:.2f}MB/s')
DOWNMUSIC = True
url = input("url:")
video_id = url.split("/") # 切割链接,获取视频id
# 获得视频id 例如https://www.douyin.com/video/7058986650088607012
video_id = video_id[len(video_id)-1]
try:
int(video_id)
except:
print("dy-get [error] 获取video_id失败!")
sys.exit(1)
response = requests.get('https://www.douyin.com/web/api/v2/aweme/iteminfo/?item_ids='+str(video_id),
headers='') # 可填入自己的headers
try:
video_json = response.json() # 读取json
except:
print("dy-get [error] 获取json失败!")
sys.exit(1)
try:
get_id = video_json["item_list"][0]["video"]["vid"]
except:
print("dy-get [error] 获取vid失败!")
sys.exit(1)
picture = video_json["item_list"][0]["video"]["ratio"]
width = video_json["item_list"][0]["video"]["width"]
height = video_json["item_list"][0]["video"]["height"]
music_url = video_json["item_list"][0]["music"]["play_url"]["uri"]
url = "https://aweme.snssdk.com/aweme/v1/play/?video_id="+get_id+"&line=0"
response = requests.get(url,
headers='') # 可填入自己的headers
url = response.url
random_value = str(int(time.time()))
mkdir(str(get_id)+"_"+random_value)
chdir(str(get_id)+"_"+random_value)
mkdir("video")
chdir("video")
save = str(get_id)+".mp4"
down = downloader(url, 12, save)
down.main()
chdir("../")
mkdir("music")
chdir("music")
url = music_url
response = requests.get(url,
headers='') # 可填入自己的headers
url = response.url
save = str(get_id)+".mp3"
down = downloader(url, 12, save)
down.main()
chdir("../")
chdir("video")
old_video = get_id+".mp4"
new_video = get_id+"_nosound.mp4"
video = VideoFileClip(old_video)
video = video.without_audio() # 删除声音,返回新的视频对象,原有对象不更改
video.write_videofile(new_video)
|
#
# Copyright 2021 Budapest Quantum Computing Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from theboss.boson_sampling_utilities.permanent_calculators.glynn_gray_permanent_calculator import ( # noqa: E501
GlynnGrayPermanentCalculator,
)
def _permanent(matrix, rows, columns, calculator_class):
calculator = calculator_class(matrix, rows, columns)
return calculator.compute_permanent()
def glynn_gray_permanent(matrix, rows, columns):
return _permanent(
matrix, rows, columns, calculator_class=GlynnGrayPermanentCalculator
)
|
import sys
import pickle
import typing
import inspect
import pkgutil
import importlib
import ipaddress
import json
import re
from pathlib import Path
from cmd2.ansi import style
from collections import defaultdict
from ..recon.config import defaults
def meets_requirements(requirements, exception):
""" Determine if tools required to perform task are installed. """
tools = get_tool_state()
for tool in requirements:
if not tools.get(tool).get("installed"):
if exception:
raise RuntimeError(
style(f"[!!] {tool} is not installed, and is required to run this scan", fg="bright_red")
)
else:
return False
return True
def get_tool_state() -> typing.Union[dict, None]:
""" Load current tool state from disk. """
tools = Path(defaults.get("tools-dir")) / ".tool-dict.pkl"
if tools.exists():
return pickle.loads(tools.read_bytes())
def get_scans():
""" Iterates over the recon package and its modules to find all of the classes that end in [Ss]can.
**A contract exists here that says any scans need to end with the word scan in order to be found by this function.**
Example:
``defaultdict(<class 'list'>, {'AmassScan': ['pipeline.recon.amass'], 'MasscanScan': ['pipeline.recon.masscan'], ... })``
Returns:
dict containing mapping of ``classname -> [modulename, ...]`` for all potential recon-pipeline commands
"""
scans = defaultdict(list)
file = Path(__file__).expanduser().resolve()
web = file.parent / "web"
recon = file.parents[1] / "recon"
lib_paths = [str(web), str(recon)]
# recursively walk packages; import each module in each package
# walk_packages yields ModuleInfo objects for all modules recursively on path
# prefix is a string to output on the front of every module name on output.
for loader, module_name, is_pkg in pkgutil.walk_packages(path=lib_paths, prefix=f"{__package__}."):
try:
importlib.import_module(module_name)
except ModuleNotFoundError:
# skipping things like recon.aquatone, not entirely sure why they're showing up...
pass
# walk all modules, grabbing classes that we've written and add them to the classlist defaultdict
# getmembers returns all members of an object in a list of tuples (name, value)
for name, obj in inspect.getmembers(sys.modules[__package__]):
if inspect.ismodule(obj) and not name.startswith("_"):
# we're only interested in modules that don't begin with _ i.e. magic methods __len__ etc...
for sub_name, sub_obj in inspect.getmembers(obj):
# now we only care about classes that end in [Ss]can
if inspect.isclass(sub_obj) and sub_name.lower().endswith("scan"):
# final check, this ensures that the tools necessary to AT LEAST run this scan are present
# does not consider upstream dependencies
try:
requirements = sub_obj.requirements
exception = False # let meets_req know we want boolean result
if not meets_requirements(requirements, exception):
continue
except AttributeError:
# some scan's haven't implemented meets_requirements yet, silently allow them through
pass
scans[sub_name].append(f"{__package__}.{name}")
return scans
def is_ip_address(ipaddr):
""" Simple helper to determine if given string is an ip address or subnet """
try:
ipaddress.ip_interface(ipaddr)
return True
except ValueError:
return False
def get_ip_address_version(ipaddr):
""" Simple helper to determine whether a given ip address is ipv4 or ipv6 """
if is_ip_address(ipaddr):
if isinstance(ipaddress.ip_address(ipaddr), ipaddress.IPv4Address): # ipv4 addr
return "4"
elif isinstance(ipaddress.ip_address(ipaddr), ipaddress.IPv6Address): # ipv6
return "6"
def is_inscope(hostname, scope_file):
""" Check given hostname against scope file and return True if it is """
# Use an default-allow rule,
# so if there is no match in "include" or "exclude", it is within scope
in_scope = True
if scope_file == "":
return in_scope
# Load Scope file
with open(scope_file, "r") as f:
scope = json.load(f)
# parse json data
for block in scope['target']['scope']['exclude']:
# Does URL match an excluded hostname?
if re.search(block['host'], hostname):
# If so, check for a more specific allow entry
for allow in scope['target']['scope']['include']:
# remove special chars in allow entry to make a domain name
allow_n = allow['host'].replace('\\', '').replace('^', '').replace('$', '')
# test the block entry against the normalized domain name
if re.search(block['host'], allow_n):
# if the allowed domain name fits in the block domain entry
# , then it must be more specific.
# Test URL against the allow entry
if re.search(allow['host'], hostname):
# If it matches, mark as true
in_scope = True
else:
in_scope = False
else:
in_scope = False
return in_scope
|
import os
import unittest
from typing import Optional
from django.http import HttpResponse
from django.test import RequestFactory
from request_limiter import request_limiter, LimitedIntervalStrategy, \
LimitStrategy, LimitException, django_request_limiter
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_settings')
req_factory = RequestFactory()
class MockStrategy(LimitStrategy):
def __init__(self, allow: bool):
self._allow = allow
def allow(self, key: Optional[str] = None) -> bool:
return self._allow
def get_remaining(self, key: Optional[str] = None) -> float:
return 1
def clean(self):
pass
class TestRequestLimiterDecorator(unittest.TestCase):
def test_when_strategy_not_given_uses_limited_interval_strategy(self):
limiter = request_limiter()
self.assertTrue(isinstance(limiter.strategy, LimitedIntervalStrategy))
def test_when_strategy_allows_invokes_function(self):
@request_limiter(strategy=MockStrategy(allow=True))
def test_func() -> bool:
return True
self.assertTrue(test_func())
def test_when_strategy_denies_raises_exception(self):
@request_limiter(strategy=MockStrategy(allow=False))
def test_func() -> bool:
return True
self.assertRaises(LimitException, test_func)
class TestDjangoRequestLimiter(unittest.TestCase):
def test_limits_based_on_ip(self):
@django_request_limiter
@request_limiter(strategy=LimitedIntervalStrategy(requests=1))
def test_view(request):
return True
res1 = test_view(req_factory.post('/test/', REMOTE_ADDR='127.0.0.1'))
assert res1, 'Expected first request to work'
res2 = test_view(req_factory.post('/test/', REMOTE_ADDR='127.0.0.1'))
assert isinstance(res2, HttpResponse), 'Expected limit http response'
assert res2.status_code == 429, 'Expected 429 response code'
# change Ip
res3 = test_view(req_factory.post('/test/', REMOTE_ADDR='127.0.0.2'))
assert res3, 'Expected different ip request to work'
|
import siliconcompiler
############################################################################
# DOCS
############################################################################
def make_docs():
'''
Demonstration target for compiling ASICs with FreePDK45 and the open-source
asicflow.
'''
chip = siliconcompiler.Chip('<design>')
setup(chip)
return chip
####################################################
# PDK Setup
####################################################
def setup(chip):
'''
Target setup
'''
#0. Defining the project
chip.set('option', 'target', 'freepdk45_demo')
#1. Setting to ASIC mode
chip.set('option', 'mode','asic')
#2. Load PDK, flow, libs combo
chip.load_pdk('freepdk45')
chip.load_flow('lintflow')
chip.load_flow('asicflow')
chip.load_flow('asictopflow')
chip.load_lib('nangate45')
#3. Set flow and pdk
chip.set('option', 'flow', 'asicflow', clobber=False)
chip.set('option', 'pdk', 'freepdk45')
#4. Select libraries
chip.set('asic', 'logiclib', 'nangate45')
#5. Set project specific design choices
chip.set('asic', 'stackup', '10M')
chip.set('asic', 'delaymodel', 'nldm')
chip.set('asic', 'minlayer', "m1")
chip.set('asic', 'maxlayer', "m10")
chip.set('asic', 'maxfanout', 64)
chip.set('asic', 'maxlength', 1000)
chip.set('asic', 'maxslew', 0.2e-9)
chip.set('asic', 'maxcap', 0.2e-12)
chip.set('asic', 'rclayer', 'clk', "m5")
chip.set('asic', 'rclayer', 'data',"m3")
chip.set('asic', 'hpinlayer', "m3")
chip.set('asic', 'vpinlayer', "m2")
chip.set('asic', 'density', 10)
chip.set('asic', 'aspectratio', 1)
chip.set('asic', 'coremargin', 1.9)
#6. Timing corners
corner = 'typical'
chip.set('constraint','worst','libcorner', corner)
chip.set('constraint','worst','pexcorner', corner)
chip.set('constraint','worst','mode', 'func')
chip.set('constraint','worst','check', ['setup','hold'])
#########################
if __name__ == "__main__":
chip = make_docs()
|
from typing import Set
from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
name: str
description: str = None
price: float
tax: float = None
tags: Set[str] = []
@app.post(
"/items/",
response_model=Item,
summary="Create an item",
response_description="The created item",
)
async def create_item(*, item: Item):
"""
Create an item with all the information:
* name: each item must have a name
* description: a long description
* price: required
* tax: if the item doesn't have tax, you can omit this
* tags: a set of unique tag strings for this item
"""
return item
|
#!/usr/bin/env python
# coding=UTF-8
import requests
import json
import datetime
from config.configuration import flower_path, flower_port, flower_server_external, verify_certificate, \
flower_server_internal
def get_task_info(task_id):
flower_request_url = 'http://%s:%s%sapi/tasks' % (flower_server_internal, flower_port, flower_path)
print(flower_request_url)
response = requests.get(flower_request_url,
verify=verify_certificate, headers={'Connection': 'close'})
tasks_json = json.loads(response.text)
print(tasks_json)
task_json = tasks_json[task_id]
task_name = task_json['name']
task_runtime = None
if 'runtime' in task_json and isinstance(task_json['runtime'], int):
task_runtime = round(task_json['runtime'], 2)
process_id = None
if 'args' in task_json:
from ast import literal_eval
tpl = literal_eval(task_json["args"])
if isinstance(tpl[0], dict):
js = tpl[0]
else:
js = json.loads(tpl[0])
process_id = js["process_id"]
task_received = datetime.datetime.fromtimestamp(int(task_json['received'])).strftime('%Y-%m-%d %H:%M:%S')
task_info = {
"name": task_name,
"received": task_received,
}
if task_runtime:
task_info["runtime"] = task_runtime
if process_id:
task_info["process_id"] = process_id
return task_info
def get_task_list(task_id, exclude_tasks:list=None):
flower_request_url = 'http://%s:%s%sapi/tasks' % (flower_server_internal, flower_port, flower_path)
response = requests.get(flower_request_url, verify=verify_certificate, headers={'Connection': 'close'})
tasks_json = json.loads(response.text)
task_ids = []
if task_id in tasks_json:
def find_tasks(t_id):
t_info = tasks_json[t_id]
if 'children' in t_info:
for child_task in t_info['children']:
if exclude_tasks and tasks_json[child_task]['name'] not in exclude_tasks:
task_ids.append(child_task)
find_tasks(child_task)
find_tasks(task_id)
else:
raise ValueError("Task not found: %s" % task_id)
return [tasks_json[tid] for tid in task_ids]
if __name__ == '__main__':
tl = get_task_list('b6791fd7-d7df-41c3-916b-ec046fe15a59', ['file_migration'])
print(len(tl))
for t in tl:
print(t)
|
from __future__ import absolute_import
from __future__ import with_statement
import sys
import logging
from tempfile import mktemp
from celery import log
from celery.log import (setup_logger, setup_task_logger,
get_default_logger, get_task_logger,
redirect_stdouts_to_logger, LoggingProxy,
setup_logging_subsystem)
from celery.utils import uuid
from celery.utils.compat import _CompatLoggerAdapter
from celery.tests.utils import unittest
from celery.tests.utils import (override_stdouts, wrap_logger,
get_handlers, set_handlers)
class test_default_logger(unittest.TestCase):
def setUp(self):
self.setup_logger = setup_logger
self.get_logger = get_default_logger
log._setup = False
def test_setup_logging_subsystem_colorize(self):
setup_logging_subsystem(colorize=None)
setup_logging_subsystem(colorize=True)
def test_setup_logging_subsystem_no_mputil(self):
mputil, log.mputil = log.mputil, None
log.mputil
try:
log.setup_logging_subsystem()
finally:
log.mputil = mputil
def _assertLog(self, logger, logmsg, loglevel=logging.ERROR):
with wrap_logger(logger, loglevel=loglevel) as sio:
logger.log(loglevel, logmsg)
return sio.getvalue().strip()
def assertDidLogTrue(self, logger, logmsg, reason, loglevel=None):
val = self._assertLog(logger, logmsg, loglevel=loglevel)
return self.assertEqual(val, logmsg, reason)
def assertDidLogFalse(self, logger, logmsg, reason, loglevel=None):
val = self._assertLog(logger, logmsg, loglevel=loglevel)
return self.assertFalse(val, reason)
def test_setup_logger(self):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False, colorize=True)
set_handlers(logger, [])
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False, colorize=None)
self.assertIs(get_handlers(logger)[0].stream, sys.__stderr__,
"setup_logger logs to stderr without logfile argument.")
self.assertDidLogFalse(logger, "Logging something",
"Logger doesn't info when loglevel is ERROR",
loglevel=logging.INFO)
def test_setup_logger_no_handlers_stream(self):
l = self.get_logger()
set_handlers(l, [])
with override_stdouts() as outs:
stdout, stderr = outs
l = self.setup_logger(logfile=stderr, loglevel=logging.INFO,
root=False)
l.info("The quick brown fox...")
self.assertIn("The quick brown fox...", stderr.getvalue())
def test_setup_logger_no_handlers_file(self):
l = self.get_logger()
set_handlers(l, [])
tempfile = mktemp(suffix="unittest", prefix="celery")
l = self.setup_logger(logfile=tempfile, loglevel=0, root=False)
self.assertIsInstance(get_handlers(l)[0],
logging.FileHandler)
def test_redirect_stdouts(self):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False)
try:
with wrap_logger(logger) as sio:
redirect_stdouts_to_logger(logger, loglevel=logging.ERROR)
logger.error("foo")
self.assertIn("foo", sio.getvalue())
finally:
sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__
def test_logging_proxy(self):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False)
with wrap_logger(logger) as sio:
p = LoggingProxy(logger, loglevel=logging.ERROR)
p.close()
p.write("foo")
self.assertNotIn("foo", sio.getvalue())
p.closed = False
p.write("foo")
self.assertIn("foo", sio.getvalue())
lines = ["baz", "xuzzy"]
p.writelines(lines)
for line in lines:
self.assertIn(line, sio.getvalue())
p.flush()
p.close()
self.assertFalse(p.isatty())
self.assertIsNone(p.fileno())
class test_task_logger(test_default_logger):
def setUp(self):
logger = get_task_logger()
logger.handlers = []
logging.root.manager.loggerDict.pop(logger.name, None)
self.uid = uuid()
def setup_logger(self, *args, **kwargs):
return setup_task_logger(*args, **dict(kwargs, task_name=self.uid,
task_id=self.uid))
def get_logger(self, *args, **kwargs):
return get_task_logger(*args, **dict(kwargs, name=self.uid))
class MockLogger(logging.Logger):
_records = None
def __init__(self, *args, **kwargs):
self._records = []
logging.Logger.__init__(self, *args, **kwargs)
def handle(self, record):
self._records.append(record)
def isEnabledFor(self, level):
return True
class test_CompatLoggerAdapter(unittest.TestCase):
levels = ("debug",
"info",
"warn", "warning",
"error",
"fatal", "critical")
def setUp(self):
self.logger, self.adapter = self.createAdapter()
def createAdapter(self, name=None, extra={"foo": "bar"}):
logger = MockLogger(name=name or uuid())
return logger, _CompatLoggerAdapter(logger, extra)
def test_levels(self):
for level in self.levels:
msg = "foo bar %s" % (level, )
logger, adapter = self.createAdapter()
getattr(adapter, level)(msg)
self.assertEqual(logger._records[0].msg, msg)
def test_exception(self):
try:
raise KeyError("foo")
except KeyError:
self.adapter.exception("foo bar exception")
self.assertEqual(self.logger._records[0].msg, "foo bar exception")
def test_setLevel(self):
self.adapter.setLevel(logging.INFO)
self.assertEqual(self.logger.level, logging.INFO)
def test_process(self):
msg, kwargs = self.adapter.process("foo bar baz", {"exc_info": 1})
self.assertDictEqual(kwargs, {"exc_info": 1,
"extra": {"foo": "bar"}})
def test_add_remove_handlers(self):
handler = logging.StreamHandler()
self.adapter.addHandler(handler)
self.assertIs(self.logger.handlers[0], handler)
self.adapter.removeHandler(handler)
self.assertListEqual(self.logger.handlers, [])
|
# coding: utf-8
# Modify a specific group entry in database
# Created by James Raphael Tiovalen (2021)
import slack
import ast
import settings
import config
from slackers.hooks import commands
conv_db = config.conv_handler
@commands.on("editgroup")
def editgroup(payload):
return
|
"""Example of pykitti.odometry usage."""
import itertools
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import pykitti
__author__ = "Lee Clement"
__email__ = "lee.clement@robotics.utias.utoronto.ca"
# Change this to the directory where you store KITTI data
basedir = './data/dataset'
# Specify the dataset to load
sequence = '01'
# Load the data. Optionally, specify the frame range to load.
# Passing imformat='cv2' will convert images to uint8 and BGR for
# easy use with OpenCV.
# dataset = pykitti.odometry(basedir, sequence)
dataset = pykitti.odometry(basedir, sequence, frames=range(0, 20, 5))
# dataset.calib: Calibration data are accessible as a named tuple
# dataset.timestamps: Timestamps are parsed into a list of timedelta objects
# dataset.poses: Generator to load ground truth poses T_w_cam0
# dataset.camN: Generator to load individual images from camera N
# dataset.gray: Generator to load monochrome stereo pairs (cam0, cam1)
# dataset.rgb: Generator to load RGB stereo pairs (cam2, cam3)
# dataset.velo: Generator to load velodyne scans as [x,y,z,reflectance]
# Grab some data
second_pose = next(iter(itertools.islice(dataset.poses, 1, None)))
first_gray = next(iter(dataset.gray))
first_cam1 = next(iter(dataset.cam1))
first_rgb = next(iter(dataset.rgb))
first_cam2 = next(iter(dataset.cam2))
third_velo = next(iter(itertools.islice(dataset.velo, 2, None)))
# Display some of the data
np.set_printoptions(precision=4, suppress=True)
print('\nSequence: ' + str(dataset.sequence))
print('\nFrame range: ' + str(dataset.frames))
# print('\nGray stereo pair baseline [m]: ' + str(dataset.calib.b_gray))
print('\nRGB stereo pair baseline [m]: ' + str(dataset.calib.b_rgb))
print('\nFirst timestamp: ' + str(dataset.timestamps[0]))
print('\nSecond ground truth pose:\n' + str(second_pose))
f, ax = plt.subplots(2, 2, figsize=(15, 5))
ax[0, 0].imshow(first_gray[0], cmap='gray')
ax[0, 0].set_title('Left Gray Image (cam0)')
ax[0, 1].imshow(first_cam1, cmap='gray')
ax[0, 1].set_title('Right Gray Image (cam1)')
ax[1, 0].imshow(first_cam2)
ax[1, 0].set_title('Left RGB Image (cam2)')
ax[1, 1].imshow(first_rgb[1])
ax[1, 1].set_title('Right RGB Image (cam3)')
f2 = plt.figure()
ax2 = f2.add_subplot(111, projection='3d')
# Plot every 100th point so things don't get too bogged down
velo_range = range(0, third_velo.shape[0], 10)
ax2.scatter(third_velo[velo_range, 0],
third_velo[velo_range, 1],
third_velo[velo_range, 2],
c=third_velo[velo_range, 3],
cmap='gray',
s=0.1)
ax2.axis('equal')
ax2.set_title('Third Velodyne scan (subsampled)')
plt.show()
|
# -*- coding:utf-8 -*-
# @author :adolf
import os
from data.data_utils import order_points_clockwise
from data.data_aug import *
from data.make_labels import *
class CurrentOcrData(object):
def __init__(self, root, pre_processes=None, transforms=None, filter_keys=None, ignore_tags=None, is_training=True):
self.is_training = is_training
self.root = root
self.transforms = transforms
self.pre_processes = pre_processes
self.filter_key = filter_keys
self.ignore_tags = ignore_tags
self.aug = list()
self.imgs = list(sorted(os.listdir(os.path.join(root, "imgs"))))
self.gts = list(sorted(os.listdir(os.path.join(root, "gts"))))
self.init_pre_process()
def __len__(self):
return len(self.imgs) # - 1
def __getitem__(self, item):
img_path = os.path.join(self.root, 'imgs', self.imgs[item])
gt_path = os.path.join(self.root, 'gts', self.gts[item])
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
targets = self.get_annotation(gt_path)
targets['img'] = img
targets['shape'] = [img.shape[0], img.shape[1]]
targets = self.apply_pre_process(targets)
if self.transforms is not None:
targets['img'] = self.transforms(targets['img'])
targets['text_polys'] = targets['text_polys'].tolist()
if self.filter_key is not None and self.is_training:
targets_dict = dict()
for k, v in targets.items():
if k not in self.filter_key:
targets_dict[k] = v
return targets['img'], targets_dict
else:
return targets['img'], targets
def get_annotation(self, gt_path):
boxes = list()
texts = list()
ignores = list()
with open(gt_path, encoding='utf-8', mode='r') as f:
for line in f.readlines():
params = line.strip().strip('\ufeff').strip('\xef\xbb\xbf').split(',')
# print(params)
try:
box = order_points_clockwise(np.array(list(map(float, params[:8]))).reshape(-1, 2))
# print(box)
if cv2.contourArea(box) > 0:
boxes.append(box)
texts.append(params[8])
ignores.append(params[8] in self.ignore_tags)
except Exception as e:
print(e)
print('get annotation is failed {}'.format(gt_path))
data = {'text_polys': np.array(boxes),
'texts': texts,
'ignore_tags': ignores}
return data
def init_pre_process(self):
if self.pre_processes is not None:
for aug in self.pre_processes:
if 'args' not in aug:
args = {}
else:
args = aug['args']
if isinstance(args, dict):
cls = eval(aug['type'])(**args)
else:
cls = eval(aug['type'])(args)
self.aug.append(cls)
def apply_pre_process(self, data):
for aug in self.aug:
data = aug(data)
return data
if __name__ == '__main__':
dataset = CurrentOcrData('/home/shizai/data2/ocr_data/rctw')
dataset.get_annotation('/home/shizai/data2/ocr_data/rctw/gts/rctw_image_3629.txt')
|
# -*- coding: utf-8 -*-
"""
二叉树:填充每个节点的下一个右侧节点指针2
https://leetcode-cn.com/problems/populating-next-right-pointers-in-each-node-ii/
"""
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
class Solution:
def processChild(self, childNode: 'Node', prev: 'Node', leftmost: 'Node') -> tuple:
# 孩子节点存在
if childNode:
# prev指针已经设置,如果有至少一个节点在下一层则设置prev的next指针指向
if prev:
prev.next = childNode
# prev指针不存在,则表示孩子节点为下一层的第一个节点,所以设置为下一层最左节点leftmost
else:
leftmost = childNode
# prev指向当前孩子节点
prev = childNode
# 孩子节点不存在,则不对prev和leftmost进行更新
return prev, leftmost
def connect(self, root: 'Node') -> 'Node':
"""
思路:使用已建立的next指针:第N层建立next指针后,再建立第N+1层节点的next指针
1.leftmost:每层最左节点.每层最左节点作为链表首部,从该节点开始访问该层所有节点
2.curr:用来遍历当前层的所有节点.从该层的最左节点一直移动到该层最后一个节点
3.prev:指向下一层的节点.使用prev来指向下一层节点,来做next指针连接
hardware.prev指针初始化.每层遍历开始时,prev指针置为空,找到下一层最左节点时,将该节点赋予prev指针
b.当前节点没有左子节点时,将prev指向当前节点的右子节点
c.下一个节点没有孩子节点,则不对prev指针进行更新
d.下一个节点同时拥有左孩子和右孩子,首先prev指向左孩子,处理完后,指向右孩子.
时间复杂度:O(n),每个节点均处理一次
空间复杂度:O(1),不需要额外空间
:param root: 原二叉树
:return: 填充next指针后二叉树
"""
if not root:
return root
# 第一层仅存在一个根节点,初始化最左节点为root节点
leftmost = root
# 循环遍历每一层,最左节点为空时退出循环.
while leftmost:
# 初始化每层prev和curr指针,prev指针初始化None,curr指针初始化为当前层最左节点,用来从该层首部通过next指针进行遍历
prev, curr = None, leftmost
# 初始化下一层最左节点leftmost为None
leftmost = None
# 根据next指针对当前层节点进行遍历
while curr:
# 处理当前节点的孩子节点并更新prev和leftmost指针
prev, leftmost = self.processChild(curr.left, prev, leftmost)
prev, leftmost = self.processChild(curr.right, prev, leftmost)
# 移动到当前层的下一个节点
curr = curr.next
return root
def connect2(self, root: 'Node') -> 'Node':
"""
思路:在当前层,把下一层的第一个节点用哨兵节点记录下来;然后遍历当前层的时候,把下面一层串起来(next指针),当前层遍历完,通过哨兵节点可以开始
下一层的遍历.(重复上述过程,将哨兵节点记录下下层第一个节点,然后遍历该层,并把下面一层串起来)
时间复杂度:O(n),每个节点均需处理一次
空间复杂度:O(1),没有占用额外空间
:param root: 原二叉树
:return: 填充next指针后的二叉树
"""
# curr初始化为根节点
curr = root
# 当前curr为下一层第一个节点,代表将遍历下一层.
while curr:
# 实例化哨兵节点
dummy = Node()
# tail初始化为哨兵节点(尾插法)
tail = dummy
# 遍历当前层的节点,并借助tail逐步后移串联起下一层
while curr:
if curr.left:
tail.next = curr.left
tail = tail.next
if curr.right:
tail.next = curr.right
tail = tail.next
# 当前层下一个节点,通过next指针获取(实际当前层遍历等价于链表遍历)
curr = curr.next
# 哨兵节点持有的next即为下一层第一个节点
curr = dummy.next
return root
|
from tinypy.runtime.testing import UnitTest
class MyTest(UnitTest):
def test_lessthan(self):
assert [1, 2] < [2]
assert [1, 2] <= [2]
assert [1] < [2]
assert [1] <= [2]
assert [] < [1]
def test_greaterthan(self):
assert [2] > [1]
assert [1, 2] > [1]
assert [1, 2] >= [1]
assert [1, 2] >= [1, 2]
assert [2] > []
def test_equal(self):
assert [1] == [1]
assert [1, 2] == [1, 2]
assert [] == []
# FIXME:
# As we don't have an iterable type, there is not much point
# to define min and max.
# We shall probably remove min and max from builtins.
def test_max(self):
assert max(1, 2, 3) == 3
assert max(3, 1, 2) == 3
assert max(3, 1, 3) == 3
def test_min(self):
assert min(1, 2, 3) == 1
assert min(3, 1, 2) == 1
assert min(2, 1, 1) == 1
def test_slice(self):
# FIXME: support 1:2 and 1:2:1
assert [0, 1, 2, 3][1, 2] == [1]
assert [0, 1, 2, 3][1, None] == [1, 2, 3]
assert [0, 1, 2, 3][None, None] == [0, 1, 2, 3]
assert [0, 1, 2, 3][None, 1] == [0]
assert [0, 1, 2, 3][None, 2] == [0, 1]
t = MyTest()
t.run()
|
# Generated by Django 3.2.5 on 2022-03-24 15:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0008_alter_product_product_name'),
]
operations = [
migrations.AlterField(
model_name='product',
name='images',
field=models.ImageField(blank=True, upload_to='photos/products'),
),
]
|
# -*- coding: utf-8 -*-
import fnmatch
import os
from os.path import join as _j
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django_extensions.management.utils import signalcommand
class Command(BaseCommand):
help = "Removes all python bytecode compiled files from the project."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument(
'--optimize', '-o', '-O', action='store_true',
dest='optimize', default=False,
help='Remove optimized python bytecode files'
)
parser.add_argument(
'--path', '-p', action='store', dest='path',
help='Specify path to recurse into'
)
@signalcommand
def handle(self, *args, **options):
project_root = options.get("path", getattr(settings, 'BASE_DIR', None))
if not project_root:
project_root = getattr(settings, 'BASE_DIR', None)
verbosity = options["verbosity"]
if not project_root:
raise CommandError("No --path specified and settings.py does not contain BASE_DIR")
exts = options["optimize"] and "*.py[co]" or "*.pyc"
for root, dirs, filenames in os.walk(project_root):
for filename in fnmatch.filter(filenames, exts):
full_path = _j(root, filename)
if verbosity > 1:
self.stdout.write("%s\n" % full_path)
os.remove(full_path)
|
from config import Configuration
from pymongo import MongoClient
c = Configuration("config.json")
client = MongoClient(c.mongo_uri)
database = client.linkage_agent
results = database.match_groups.aggregate(
[
{
"$group": {
"_id": {"$size": "$run_results"},
"total": {"$sum": 1},
}
}
]
)
for result in results:
print(result)
|
from nose.tools import assert_almost_equal, assert_equal, raises
from numpy.testing import assert_allclose
import numpy as np
from SALib.test_functions.Sobol_G import evaluate, total_variance, \
partial_first_order_variance, \
sensitivity_index, \
total_sensitivity_index
def test_Sobol_G():
'''
'''
parameter_values = np.zeros((1, 8))
actual = evaluate(parameter_values)
expected = np.array([4.0583])
assert_allclose(actual, expected, atol=1e-4, rtol=1e-4)
@raises(ValueError)
def test_Sobol_G_raises_error_if_values_wrong_size():
"""
Tests that a value error is raised if the Sobol G function is called with
the wrong number of variables
"""
a = [1, 2, 3, 4, 5, 6, 7, 8]
evaluate(np.array([1, 2, 3, 4, 5, 6, 7]), a)
@raises(ValueError)
def test_Sobol_G_raises_error_if_values_gt_one():
"""
Tests that a value error is raised if the Sobol G function is called with
values greater than one
"""
evaluate(np.array([0, 1, .02, 0.23, 1.234, 0.02848848, 0, 0.78]))
@raises(ValueError)
def test_Sobol_G_raises_error_if_values_lt_zero():
"""
Tests that a value error is raised if the Sobol G function is called with
values less than zero.
"""
evaluate(np.array([0, -1, -.02, 1, 1, -0.1, -0, -12]))
@raises(TypeError)
def test_Sobol_G_raises_error_if_values_not_numpy_array():
"""
Tests that a type error is raised if the Sobol G function is called with
values argument not as a numpy array.
"""
fixture = [list(range(8)), str(12345678)]
for x in fixture:
evaluate(x)
def test_total_variance():
a = np.array([78, 12, 0.5, 2, 97, 33])
actual = total_variance(a)
expected = 0.19347
assert_allclose(actual, expected, rtol=1e-4)
def test_partial_first_order_variance():
a = np.array([78, 12, 0.5, 2, 97, 33])
actual = partial_first_order_variance(a)
expected = (len(a),)
assert_equal(a.shape, expected)
expected = np.array([0.000053, 0.001972, 0.148148, 0.037037, 0.000035, 0.000288])
assert_allclose(actual, expected, atol=1e-4, rtol=1e-4)
def test_sensitivity_index():
a = np.array([78, 12, 0.5, 2, 97, 33])
actual = sensitivity_index(a)
expected = np.array([0.000276, 0.010195, 0.765743,
0.191436, 0.000179, 0.001490])
assert_allclose(actual, expected, atol=1e-2, rtol=1e-6)
def test_total_sensitivity_index():
a = np.array([78, 12, 0.5, 2, 97, 33])
actual = total_sensitivity_index(a)
expected = np.array([0.030956547, 0.040875287, 0.796423551,
0.222116249, 0.030859879, 0.032170899])
assert_allclose(actual, expected, atol=1e-2, rtol=1e-6)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DigitalTwinInterfacesPatchInterfacesValue(Model):
"""DigitalTwinInterfacesPatchInterfacesValue.
:param properties: List of properties to update in an interface.
:type properties: dict[str,
~service.models.DigitalTwinInterfacesPatchInterfacesValuePropertiesValue]
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': '{DigitalTwinInterfacesPatchInterfacesValuePropertiesValue}'},
}
def __init__(self, **kwargs):
super(DigitalTwinInterfacesPatchInterfacesValue, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
|
"""Inconsistent spelling.
---
layout: post
source: Intelligent Editing Ltd.
source_url: http://bit.ly/1x3hYj7
title: Inconsistent spelling
date: 2014-06-10 12:31:19
categories: writing
---
Intelligent Editing Ltd. says:
> Some words have more than one correct spelling. American, British, Australian
and Canadian English all have their own preferences. Even within those, there
can be multiple spellings. For example, in the UK 'realise' is often preferred.
However, 'realize' has been used in British-English for centuries and is
preferred in the Oxford English Dictionary. However, no matter which spelling
is preferred, one thing is always wrong: you mustn't use two different
spellings in the same document.
"""
from proselint.tools import consistency_check, memoize
@memoize
def check(text):
"""Check the text."""
err = "consistency.spelling"
msg = "Inconsistent spelling of '{}' (vs. '{}')."
word_pairs = [
["advisor", "adviser"],
# ["analyse", "analyze"],
["centre", "center"],
["colour", "color"],
["emphasise", "emphasize"],
["finalise", "finalize"],
["focussed", "focused"],
["labour", "labor"],
["learnt", "learned"],
["organise", "organize"],
["organised", "organized"],
["organising", "organizing"],
["recognise", "recognize"],
]
return consistency_check(text, word_pairs, err, msg)
|
from setuptools import find_packages, setup
if __name__ == "__main__":
setup(
name="manipulathor",
packages=find_packages(),
version="0.0.1",
install_requires=[
"allenact==0.2.2",
"allenact_plugins[ithor]==0.2.2",
"setuptools",
],
)
|
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2021 Dinesh Pinto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
import logging
import os
import sys
from json import JSONDecodeError
import numpy as np
import pandas as pd
from tqdm import tqdm
from .infura_api import InfuraAPI
from .opensea_api import OpenSeaAPI
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
class NFTAnalytics(OpenSeaAPI):
def __init__(self, asset_contract_address: str):
super().__init__(asset_contract_address)
self.eth_api = InfuraAPI()
@staticmethod
def make_directories(folder_name: str):
""" Set up directories for data and results if they don't exist. """
data_folder = os.path.join("data", folder_name)
result_folder = os.path.join("results", folder_name)
if not os.path.isdir(data_folder):
logger.info(f"Making directoy {data_folder}")
os.makedirs(data_folder)
if not os.path.isdir(result_folder):
logger.info(f"Making directoy {result_folder}")
os.makedirs(result_folder)
return data_folder, result_folder
def fetch_data(self, max_offset: int = 10000, collection: str = None) -> list:
"""
Query OpenSea API for collection data, offset is shifted until max
offset is reached (i.e. number of items in a collection).
"""
local_assets = []
pbar = tqdm(range(0, max_offset + 1, 50))
for offset in pbar:
pbar.set_description(f"{offset}")
try:
asset_data = self.get_asset_data(offset=offset, limit=50, collection=collection)
except JSONDecodeError:
logger.error(f"Only fetched data till offset={offset - 1}. "
f"Warning={self.get_asset_data(offset=offset, limit=50)}")
return local_assets
if "assets" not in asset_data:
logger.error(f"Only fetched data till offset={offset - 1}. Warning={asset_data}")
return local_assets
for asset in asset_data["assets"]:
local_assets.append(asset)
return local_assets
def fetch_events(self, max_offset: int = 10000) -> list:
"""
Query OpenSea API for event data, offset is shifted until max
offset is reached (i.e. number of items in a collection).
"""
local_events = []
pbar = tqdm(range(0, max_offset + 1, 300))
for offset in pbar:
pbar.set_description(f"{offset}")
try:
event_data = self.get_event_data(offset=offset, limit=300)
except JSONDecodeError:
logger.error(f"Only fetched data till offset={offset - 1}. "
f"Warning={self.get_asset_data(offset=offset, limit=50)}")
return local_events
if "asset_events" not in event_data:
logger.error(f"Only fetched data till offset={offset - 1}. Warning={event_data}")
return local_events
for event in event_data["asset_events"]:
local_events.append(event)
return local_events
@staticmethod
def save_json(asset_data: list, filename: str = "data.json"):
with open(filename, 'w', encoding='utf-8') as f:
json.dump(asset_data, f, ensure_ascii=False, indent=4)
logger.info(f"Saved asset data to {filename}")
@staticmethod
def load_json(filename: str = "data.json") -> list:
with open(filename) as f:
asset_data = json.load(f)
return asset_data
@staticmethod
def get_trait_values_for_type(asset_data: list, trait_type: str) -> list:
""" Get all possible values of traits for a specific type of trait. """
trait_values = []
for asset in asset_data:
for traits in asset["traits"]:
if traits["trait_type"] == trait_type and traits["value"] not in trait_values:
trait_values.append(traits["value"])
return trait_values
def get_trait_type_median_price(self, asset_data: list, trait_type: str) -> dict:
""" Get the median price of a specific trait type. """
trait_value_prices = {}
for value in self.get_trait_values_for_type(asset_data, trait_type):
listing_prices_trait = []
for asset in asset_data:
if asset["sell_orders"]:
for traits in asset["traits"]:
if traits["trait_type"] == trait_type and traits["value"] == value:
listing_prices_trait.append(float(asset["sell_orders"][0]["base_price"]) / 1e18)
trait_value_prices[value] = np.median(np.array(listing_prices_trait))
return dict(sorted(trait_value_prices.items(), key=lambda item: item[1], reverse=True))
def get_median_prices(self, asset_data: list, traits_dict: dict) -> np.ndarray:
""" Get median prices of all trait types. """
median_prices = []
for trait_type, trait_value in traits_dict.items():
median_prices.append(self.get_trait_type_median_price(asset_data, trait_type)[trait_value])
return np.array(median_prices)
def get_traits_with_median_prices(self, asset_data: list, asset: dict) -> dict:
""" Get median prices of trait types for specific asset. """
traits = {}
for trait in asset["traits"]:
traits[trait["trait_type"]] = trait["value"]
trait_prices = {}
for trait_type, trait_value in traits.items():
price = self.get_trait_type_median_price(asset_data, trait_type)[trait_value]
trait_prices[trait_value + " " + trait_type] = price
return trait_prices
def get_nft_holdings(self, asset_data: list, asset_name: str, eth_balances: bool = True) \
-> pd.DataFrame:
""" Query the number of NFTs held and/or the ETH balances of addresses in a collection. """
nfts_held = {}
for asset in asset_data:
nfts_held[asset["owner"]["address"]] = 0
for asset in asset_data:
nfts_held[asset["owner"]["address"]] += 1
logger.info(f"Total NFTs in collection = {sum(nfts_held.values())}")
if eth_balances:
logger.info(f"Getting NFT holdings and ETH balances...")
df = pd.DataFrame(columns=["Address", asset_name, "ETH_balance"])
pbar = tqdm(nfts_held.items())
for idx, (address, num_nfts) in enumerate(pbar):
pbar.set_description(f"{idx}")
df.loc[idx] = [address, num_nfts, self.eth_api.get_eth_balance(address)]
else:
logger.info(f"Getting NFT holdings...")
df = pd.DataFrame(columns=["Address", asset_name])
pbar = tqdm(nfts_held.items())
for idx, (address, num_nfts) in enumerate(pbar):
pbar.set_description(f"{idx}")
df.loc[idx] = [address, num_nfts]
etherscan_links = []
for address in df["Address"]:
etherscan_links.append(f"https://etherscan.io/address/{address}")
df["Etherscan_link"] = etherscan_links
opensea_links = []
for address in df["Address"]:
opensea_links.append(f"https://opensea.io/{address}")
df["OpenSea_link"] = opensea_links
return df
@staticmethod
def calculate_rarity_df(asset_data: list, items_in_collection: int) -> pd.DataFrame:
"""
Calculate rarity of a particular trait.
Uses the formula from rarity tools, full article at:
raritytools.medium.com/ranking-rarity-understanding-rarity-calculation-methods-86ceaeb9b98c
Formula:
[Rarity Score for a Trait Value] =
1 / ([Number of Items with that Trait Value] / [Total Number of Items in Collection])
The total Rarity Score for an NFT is the sum of the Rarity Score of all of its trait values.
"""
df = pd.DataFrame(columns=["Name", "Price", "Rarity", "RarityPriceRatio"])
for idx, asset in enumerate(asset_data):
if asset["sell_orders"]:
if asset["sell_orders"][0]["payment_token_contract"]["symbol"] == "ETH":
price = float(asset["sell_orders"][0]["current_price"]) / 1e18
if price != 0:
rarity = 0
for trait in asset["traits"]:
trait_count = int(trait["trait_count"])
if trait_count != 0:
rarity += 1 / (trait_count / items_in_collection)
name = asset["name"]
df.loc[idx] = [name, price, rarity, rarity / price]
return df
|
"""
proxy patterns: 代理模式
为其它对象提供一种代理以控制对这个对象的操作
要素:
一个开放的方法集(interface)
实现相应方法集的proxy 对象
实现了相应方法集的类
应用:
远程代理, 为一个对象在不同地址空间提供局部代表, 这样就可以隐藏一个对象存在于不同地址空间的事实
哪么两个进程间, 是否可以通过这样的方式实现数据共享
虚拟代理, 根据需要创建开销很大的对象, 通过它存放实例化需要很长时间的真实对象
安全代理, 用来控制真实对象访问时的权限
"""
class GiveGift:
def give_dolls(self):
raise NotImplemented("give dolls not implementation")
def give_flowers(self):
raise NotImplemented("give flowers not implementation")
def give_give_chocolate(self):
raise NotImplemented("give chocolate not implementation")
class Pursuit(GiveGift):
def __init__(self, mm):
self.mm = mm
def give_dolls(self):
print(self.mm, "\t 送你娃娃")
def give_flowers(self):
print(self.mm, "\t 送你鲜花")
def give_give_chocolate(self):
print(self.mm, "\t 送你巧克力")
class Proxy(GiveGift):
def __init__(self, mm):
self.gg = Pursuit(mm)
def give_dolls(self):
self.gg.give_dolls()
def give_flowers(self):
self.gg.give_flowers()
def give_give_chocolate(self):
self.gg.give_give_chocolate()
def test():
mm = "娇娇"
print("hello" + "world")
proxy = Proxy(mm)
proxy.give_dolls()
proxy.give_flowers()
proxy.give_give_chocolate()
if __name__ == '__main__':
test()
|
from datetime import date
from pathlib import Path
from miranda.eccc import aggregate_nc_files, convert_hourly_flat_files
if __name__ == "__main__":
var_names = [
"atmospheric_pressure",
"wind_speed",
"relative_humidity",
"dry_bulb_temperature",
"freezing_rain",
"ice_pellet_presence",
"rainfall_amount",
"precipitation_flux",
]
station_file = "/home/tjs/Desktop/ec_data/Station Inventory EN.csv"
source_data = Path("/home/tjs/Desktop/ec_data/eccc_all")
convert_hourly_flat_files(
source_files=source_data, output_folder=source_data, variables=var_names
)
for var in var_names:
out_file = source_data.joinpath(
"{}_eccc_hourly_{}".format(var, date.today().strftime("%Y%m%d"))
)
aggregate_nc_files(
source_files=source_data,
output_file=out_file,
variables=var,
station_inventory=station_file,
)
|
# Generated by Django 2.1.2 on 2019-01-28 07:07
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Email",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("from_email", models.EmailField(max_length=200)),
("to_email", models.EmailField(max_length=200)),
("subject", models.CharField(max_length=200)),
("message", models.CharField(max_length=200)),
("file", models.FileField(null=True, upload_to="files/")),
("send_time", models.DateTimeField(auto_now_add=True)),
("status", models.CharField(default="sent", max_length=200)),
("important", models.BooleanField(default=False, max_length=10)),
],
options={"ordering": ["-id"],},
),
]
|
"""
Writes out hex colors from color scales provided in matplotlib
into JS file
python colors_from_mpl.py >> js/colorscales.js
"""
import itertools
import json
import numpy as np
import matplotlib.colors
import matplotlib.cm
# Have colormaps separated into categories:
# http://matplotlib.org/examples/color/colormaps_reference.html
cmap_names = [
('Perceptually Uniform Sequential', [
'viridis', 'plasma', 'inferno', 'magma']),
('Sequential', [
'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']),
('Sequential (2)', [
'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink',
'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',
'hot', 'afmhot', 'gist_heat', 'copper']),
('Diverging', [
'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',
'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic']),
('Qualitative', [
'Pastel1', 'Pastel2', 'Paired', 'Accent',
'Dark2', 'Set1', 'Set2', 'Set3',
'tab10', 'tab20', 'tab20b', 'tab20c']),
('Miscellaneous', [
'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern',
'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'hsv',
'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar'])
]
cm_names = [cat[1] for cat in cmap_names]
print("var mpl_scales = {")
for name in itertools.chain.from_iterable(cm_names):
cmap = matplotlib.cm.get_cmap(name)
values = np.linspace(0, 1, cmap.N)
rgba = cmap(values)
hex = np.apply_along_axis(matplotlib.colors.rgb2hex, axis=1, arr=rgba)
print(' "{}": {},\n'.format(name, json.dumps(hex.tolist())))
print("};")
|
# Copyright 2017 Rodeo FX. All rights reserved.
from .utils import dpiScale
from .utils import toPyObject
from .walterBaseTreeView import ACTIONS
from .walterBaseTreeView import BaseDelegate
from .walterBaseTreeView import BaseItem
from .walterBaseTreeView import BaseModel
from .walterBaseTreeView import BaseTreeView
from .walterBaseTreeView import NODE_BAR_COLOUR
from .walterLayersView import LayersItem
from .walterLayersView import LayersModel
from .walterLayersView import LayersView
from .walterComplexMenu import ComplexMenu
from .walterBaseVariantsMenu import BaseVariantAction
from .walterBaseVariantsMenu import BaseVariantSetMenu
from .walterBaseVariantsMenu import BaseVariantsMenu
|
#!/usr/bin/env python
"""
Example of running a prompt_toolkit application in an asyncssh server.
"""
import asyncio
import logging
import asyncssh
from pygments.lexers.html import HtmlLexer
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.contrib.ssh import PromptToolkitSSHServer, PromptToolkitSSHSession
from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.shortcuts import ProgressBar, print_formatted_text
from prompt_toolkit.shortcuts.dialogs import input_dialog, yes_no_dialog
from prompt_toolkit.shortcuts.prompt import PromptSession
animal_completer = WordCompleter(
[
"alligator",
"ant",
"ape",
"bat",
"bear",
"beaver",
"bee",
"bison",
"butterfly",
"cat",
"chicken",
"crocodile",
"dinosaur",
"dog",
"dolphin",
"dove",
"duck",
"eagle",
"elephant",
"fish",
"goat",
"gorilla",
"kangaroo",
"leopard",
"lion",
"mouse",
"rabbit",
"rat",
"snake",
"spider",
"turkey",
"turtle",
],
ignore_case=True,
)
async def interact(ssh_session: PromptToolkitSSHSession) -> None:
"""
The application interaction.
This will run automatically in a prompt_toolkit AppSession, which means
that any prompt_toolkit application (dialogs, prompts, etc...) will use the
SSH channel for input and output.
"""
prompt_session = PromptSession()
# Alias 'print_formatted_text', so that 'print' calls go to the SSH client.
print = print_formatted_text
print("We will be running a few prompt_toolkit applications through this ")
print("SSH connection.\n")
# Simple progress bar.
with ProgressBar() as pb:
for i in pb(range(50)):
await asyncio.sleep(0.1)
# Normal prompt.
text = await prompt_session.prompt_async("(normal prompt) Type something: ")
print("You typed", text)
# Prompt with auto completion.
text = await prompt_session.prompt_async(
"(autocompletion) Type an animal: ", completer=animal_completer
)
print("You typed", text)
# prompt with syntax highlighting.
text = await prompt_session.prompt_async(
"(HTML syntax highlighting) Type something: ", lexer=PygmentsLexer(HtmlLexer)
)
print("You typed", text)
# Show yes/no dialog.
await prompt_session.prompt_async("Showing yes/no dialog... [ENTER]")
await yes_no_dialog("Yes/no dialog", "Running over asyncssh").run_async()
# Show input dialog
await prompt_session.prompt_async("Showing input dialog... [ENTER]")
await input_dialog("Input dialog", "Running over asyncssh").run_async()
def main(port=8222):
# Set up logging.
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
loop = asyncio.get_event_loop()
loop.run_until_complete(
asyncssh.create_server(
lambda: PromptToolkitSSHServer(interact),
"",
port,
server_host_keys=["/etc/ssh/ssh_host_ecdsa_key"],
)
)
loop.run_forever()
if __name__ == "__main__":
main()
|
from .tool.func import *
def login_register_2(conn):
curs = conn.cursor()
if ban_check(None, 'login') == 1:
return re_error('/ban')
ip = ip_check()
admin = admin_check()
if admin != 1 and ip_or_user(ip) == 0:
return redirect('/user')
if admin != 1:
curs.execute(db_change('select data from other where name = "reg"'))
set_d = curs.fetchall()
if set_d and set_d[0][0] == 'on':
return re_error('/ban')
if flask.request.method == 'POST':
if captcha_post(flask.request.form.get('g-recaptcha-response', flask.request.form.get('g-recaptcha', ''))) == 1:
return re_error('/error/13')
else:
captcha_post('', 0)
user_id = flask.request.form.get('id', '')
user_pw = flask.request.form.get('pw', '')
user_repeat = flask.request.form.get('pw2', '')
if user_id == '' or user_pw == '':
return re_error('/error/27')
if user_pw != user_repeat:
return re_error('/error/20')
if re.search(r'(?:[^A-Za-zㄱ-힣0-9])', user_id):
return re_error('/error/8')
curs.execute(db_change('select html from html_filter where kind = "name"'))
set_d = curs.fetchall()
for i in set_d:
check_r = re.compile(i[0], re.I)
if check_r.search(user_id):
return re_error('/error/8')
if len(user_id) > 32:
return re_error('/error/7')
curs.execute(db_change("select id from user where id = ?"), [user_id])
if curs.fetchall():
return re_error('/error/6')
curs.execute(db_change("select id from user_application where id = ?"), [user_id])
if curs.fetchall():
return re_error('/error/6')
hashed = pw_encode(user_pw)
ans_q = flask.request.form.get('approval_question_answer', '')
curs.execute(db_change('select data from other where name = "requires_approval"'))
requires_approval = curs.fetchall()
requires_approval = requires_approval and requires_approval[0][0] == 'on'
requires_approval = None if admin == 1 else requires_approval
if requires_approval:
curs.execute(db_change('select data from other where name = "approval_question"'))
approval_question = curs.fetchall()
approval_question = approval_question[0][0] if approval_question and approval_question[0][0] else ''
else:
approval_question = ''
# c_id, c_pw, c_ans, c_que, c_key, c_type
flask.session['c_id'] = user_id
flask.session['c_pw'] = hashed
flask.session['c_type'] = 'register'
if requires_approval:
flask.session['c_ans'] = flask.request.form.get('approval_question_answer', '')
flask.session['c_que'] = approval_question
curs.execute(db_change('select data from other where name = "email_have"'))
sql_data = curs.fetchall()
if sql_data and sql_data[0][0] != '' and admin != 1:
flask.session['c_key'] = load_random_key(32)
return redirect('/need_email')
else:
flask.session['c_key'] = 'email_pass'
return redirect('/check_key')
else:
curs.execute(db_change('select data from other where name = "contract"'))
data = curs.fetchall()
contract = (data[0][0] + '<hr class="main_hr">') if data and data[0][0] != '' else ''
approval_question = ''
curs.execute(db_change('select data from other where name = "requires_approval"'))
requires_approval = curs.fetchall()
requires_approval = requires_approval and requires_approval[0][0] == 'on'
requires_approval = None if admin == 1 else requires_approval
if requires_approval:
curs.execute(db_change('select data from other where name = "approval_question"'))
data = curs.fetchall()
if data and data[0][0] != '':
approval_question = '''
<hr class="main_hr">
<span>''' + load_lang('approval_question') + ' : ' + data[0][0] + '''<span>
<hr class="main_hr">
<input placeholder="''' + load_lang('approval_question') + '''" name="approval_question_answer" type="text">
<hr class="main_hr">
'''
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('register'), wiki_set(), custom(), other2([0, 0])],
data = '''
<form method="post">
''' + contract + '''
<input placeholder="''' + load_lang('id') + '''" name="id" type="text">
<hr class="main_hr">
<input placeholder="''' + load_lang('password') + '''" name="pw" type="password">
<hr class="main_hr">
<input placeholder="''' + load_lang('password_confirm') + '''" name="pw2" type="password">
<hr class="main_hr">
''' + approval_question + '''
''' + captcha_get() + '''
<button type="submit">''' + load_lang('save') + '''</button>
''' + http_warrin() + '''
</form>
''',
menu = [['user', load_lang('return')]]
))
|
#!/usr/bin/env python3
# Copyright 2022 joseph
# See LICENSE file for licensing details.
#
# Learn more at: https://juju.is/docs/sdk
import logging
from ops.charm import CharmBase
from ops.framework import StoredState
from ops.main import main
from ops.model import ActiveStatus
from charms.service_discovery_operator.v0.event import DiscoveryEventCharmEvents
from charms.service_discovery_operator.v0.service_discovery import ServiceDiscovery
logger = logging.getLogger(__name__)
class ServiceDiscoveryCharm(CharmBase):
on = DiscoveryEventCharmEvents()
_stored = StoredState()
def __init__(self, *args):
super().__init__(*args)
self._stored.set_default(discovery_pid=None)
self._stored.set_default(discovery_payload=None)
self._service_discovery = ServiceDiscovery(self)
self.framework.observe(self.on.start, self._on_start)
self.framework.observe(self.on.discovery, self._on_discovery)
self.framework.observe(self.on.leader_elected, self._on_leader_elected)
def _on_start(self, event):
self.unit.status = ActiveStatus()
def _on_leader_elected(self, event):
if self.unit.is_leader():
self._service_discovery.start_discovery()
else:
self._service_discovery.stop_discovery()
def _on_discovery(self, event):
self.unit.status = ActiveStatus(self._read_discovery_payload())
def _read_discovery_payload(self):
with open(self.payload_file_name, 'r') as f:
return f.read()
@property
def unit_tag(self):
unit_num = self.unit.name.split("/")[-1]
return "unit-{}-{}".format(self.app.name, unit_num)
@property
def discovery_pid(self):
return self._stored.discovery_pid
@discovery_pid.setter
def discovery_pid(self, pid):
self._stored.discovery_pid = pid
@property
def payload_file_name(self):
return self._stored.payload_file_name
@payload_file_name.setter
def payload_file_name(self, file_name):
self._stored.payload_file_name = file_name
if __name__ == "__main__":
main(ServiceDiscoveryCharm)
|
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.translation import gettext_lazy as _
from . import managers
class CustomUser(AbstractUser):
username = models.CharField(
max_length=150, help_text=_("The username of the user."), unique=True
)
email = models.EmailField(help_text=_("Email of the user."), unique=True)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ["username"]
objects = managers.CustomUserManager()
class Meta:
ordering = ("id",)
def __str__(self):
return f"{self.email}'s account"
|
from reml import __version__
def test_version():
assert __version__ == "0.1.0"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.