hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f1300ee544f746d72bcb4f99dab11a92a54f94ce | 5,599 | py | Python | samples/smith_waterman/smith_waterman_main.py | mk864mk864/heterocl | 112031948af3653e30fd414e210a8f5a938579e9 | [
"Apache-2.0"
] | 236 | 2019-05-19T01:48:11.000Z | 2022-03-31T09:03:54.000Z | samples/smith_waterman/smith_waterman_main.py | mk864mk864/heterocl | 112031948af3653e30fd414e210a8f5a938579e9 | [
"Apache-2.0"
] | 248 | 2019-05-17T19:18:36.000Z | 2022-03-30T21:25:47.000Z | samples/smith_waterman/smith_waterman_main.py | AlgaPeng/heterocl-2 | b5197907d1fe07485466a63671a2a906a861c939 | [
"Apache-2.0"
] | 85 | 2019-05-17T20:09:27.000Z | 2022-02-28T20:19:00.000Z | """
HeteroCL Tutorial : Smith-Waterman Genomic Sequencing
=====================================================
**Author**: Yi-Hsiang Lai (seanlatias@github)
In this example, we demonstrate how to use a While loop in HeteroCL.
"""
import heterocl as hcl
import numpy as np
import time
lenA = 128
lenB = 128
num = 1024
penalty = -4
hcl.init()
dtype = hcl.UFixed(3)
mtype = hcl.Int(16)
def top(target=None):
def smith_waterman(seqA, seqB, consA, consB):
def similarity_score(a, b):
return hcl.select(a == b, 1, penalty)
def find_max(A, len_):
max_ = hcl.scalar(A[0], "max")
act_ = hcl.scalar(0, "act")
with hcl.for_(0, len_) as i:
with hcl.if_(A[i] > max_.v):
max_.v = A[i]
act_.v = i
return max_.v, act_.v
matrix_max = hcl.scalar(0, "maxtrix_max")
i_max = hcl.scalar(0, "i_max")
j_max = hcl.scalar(0, "j_max")
matrix = hcl.compute((lenA + 1, lenB + 1), lambda x, y: 0, "matrix")
action = hcl.compute(matrix.shape, lambda x, y: 3, "action")
def populate_matrix(i, j):
trace_back = hcl.compute((4,), lambda x: 0, "trace_back")
with hcl.if_(hcl.and_(i != 0, j != 0)):
trace_back[0] = matrix[i-1, j-1] + \
similarity_score(seqA[i-1], seqB[j-1])
trace_back[1] = matrix[i-1, j] + penalty
trace_back[2] = matrix[i, j-1] + penalty
trace_back[3] = 0
matrix[i, j], action[i, j] = find_max(trace_back, 4)
with hcl.if_(matrix[i, j] > matrix_max.v):
matrix_max.v = matrix[i, j]
i_max.v = i
j_max.v = j
P = hcl.mutate((lenA+1, lenB+1), lambda i, j: populate_matrix(i, j))
def align(curr_i, curr_j, next_i, next_j):
outA = hcl.scalar(0, "a")
outB = hcl.scalar(0, "b")
with hcl.if_(next_i.v == curr_i.v):
outA.v = 0
with hcl.else_():
outA.v = seqA[curr_i.v - 1]
with hcl.if_(next_j.v == curr_j.v):
outB.v = 0
with hcl.else_():
outB.v = seqB[curr_j.v - 1]
return outA.v, outB.v
def get_next(action, i, j):
act_ = hcl.scalar(action[i][j], "act")
next_i = hcl.scalar(0, "next_i")
next_j = hcl.scalar(0, "next_j")
with hcl.if_(act_.v == 0):
next_i.v = i - 1
next_j.v = j - 1
with hcl.elif_(act_.v == 1):
next_i.v = i - 1
next_j.v = j
with hcl.elif_(act_.v == 2):
next_i.v = i
next_j.v = j - 1
with hcl.else_():
next_i.v = i
next_j.v = j
return next_i.v, next_j.v
with hcl.Stage("T"):
curr_i = hcl.scalar(i_max.v, "curr_i")
curr_j = hcl.scalar(j_max.v, "curr_j")
next_i = hcl.scalar(0, "next_i")
next_j = hcl.scalar(0, "next_j")
next_i.v, next_j.v = get_next(action, curr_i.v, curr_j.v)
tick = hcl.scalar(0, "tick")
with hcl.while_(hcl.or_(curr_i.v != next_i.v,
curr_j.v != next_j.v)):
consA[tick.v], consB[tick.v] = \
align(curr_i, curr_j, next_i, next_j)
curr_i.v, curr_j.v = next_i.v, next_j.v
next_i.v, next_j.v = get_next(action, curr_i.v, curr_j.v)
tick.v += 1
def batch_sw(seqAs, seqBs, outAs, outBs):
hcl.mutate((num,),
lambda t: smith_waterman(seqAs[t], seqBs[t], outAs[t], outBs[t]),
"B")
seqAs = hcl.placeholder((num, lenA), "seqAs", dtype)
seqBs = hcl.placeholder((num, lenB,), "seqBs", dtype)
outAs = hcl.placeholder((num, lenA+lenB), "outAs", dtype)
outBs = hcl.placeholder((num, lenA+lenB), "outBs", dtype)
scheme = hcl.create_scheme([seqAs, seqBs, outAs, outBs], batch_sw)
scheme.downsize([batch_sw.B.matrix, batch_sw.B.action], mtype)
s = hcl.create_schedule_from_scheme(scheme)
o, p = s[batch_sw.B].split(batch_sw.B.axis[0], factor=32)
s[batch_sw.B].pipeline(o)
s[batch_sw.B].parallel(p)
return hcl.build(s, target=target)
###############################################################################
# Test the algorithm with random numbers
_seqA = hcl.asarray(np.random.randint(1, 5, size=(num, lenA)), dtype)
_seqB = hcl.asarray(np.random.randint(1, 5, size=(num, lenB)), dtype)
_consA = hcl.asarray(np.zeros((num, (lenA + lenB))), dtype)
_consB = hcl.asarray(np.zeros((num, (lenA + lenB))), dtype)
f = top()
start = time.time()
f(_seqA, _seqB, _consA, _consB)
total_time = time.time() - start
print("Kernel time (s): {:.2f}".format(total_time))
###############################################################################
# Test the algorithm with simple inputs
_seqA_np = np.ones((num, lenA))
for i in range(0, 4):
_seqA_np[0][i] = 2
_seqB_np = np.ones((num, lenB))
_seqA = hcl.asarray(_seqA_np, dtype)
_seqB = hcl.asarray(_seqB_np, dtype)
_consA = hcl.asarray(np.zeros((num, (lenA + lenB))), dtype)
_consB = hcl.asarray(np.zeros((num, (lenA + lenB))), dtype)
f(_seqA, _seqB, _consA, _consB)
_consA_np = _consA.asnumpy()
_consB_np = _consB.asnumpy()
for i in range(0, 256):
if i < 124:
assert _consA_np[0][i] == 1
else:
assert _consA_np[0][i] == 0
| 34.99375 | 81 | 0.512234 |
9ab249b0d6d10dbc978f7faaf063269fa8d0c506 | 33,476 | py | Python | python/ccxt/hitbtc.py | ZacharyATanenbaum/ccxt | 539a9e83dd2ca6a547103fd5910e0e776af5a48a | [
"MIT"
] | 2 | 2020-02-25T22:50:00.000Z | 2020-09-11T00:24:45.000Z | python/ccxt/hitbtc.py | rerefreshing/ccxt | 7c50f338dcb282c0aee4d69a1ac4ca47255fdf15 | [
"MIT"
] | 8 | 2018-09-04T05:28:15.000Z | 2018-12-21T08:10:35.000Z | python/ccxt/hitbtc.py | rerefreshing/ccxt | 7c50f338dcb282c0aee4d69a1ac4ca47255fdf15 | [
"MIT"
] | 2 | 2019-03-14T15:17:46.000Z | 2019-09-08T19:26:04.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
class hitbtc (Exchange):
def describe(self):
return self.deep_extend(super(hitbtc, self).describe(), {
'id': 'hitbtc',
'name': 'HitBTC',
'countries': ['HK'],
'rateLimit': 1500,
'version': '1',
'has': {
'CORS': False,
'fetchTrades': True,
'fetchTickers': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchOrderTrades': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766555-8eaec20e-5edc-11e7-9c5b-6dc69fc42f5e.jpg',
'api': 'http://api.hitbtc.com',
'www': 'https://hitbtc.com',
'referral': 'https://hitbtc.com/?ref_id=5a5d39a65d466',
'doc': 'https://github.com/hitbtc-com/hitbtc-api/blob/master/APIv1.md',
'fees': [
'https://hitbtc.com/fees-and-limits',
'https://support.hitbtc.com/hc/en-us/articles/115005148605-Fees-and-limits',
],
},
'api': {
'public': {
'get': [
'{symbol}/orderbook',
'{symbol}/ticker',
'{symbol}/trades',
'{symbol}/trades/recent',
'symbols',
'ticker',
'time',
],
},
'trading': {
'get': [
'balance',
'orders/active',
'orders/recent',
'order',
'trades/by/order',
'trades',
],
'post': [
'new_order',
'cancel_order',
'cancel_orders',
],
},
'payment': {
'get': [
'balance',
'address/{currency}',
'transactions',
'transactions/{transaction}',
],
'post': [
'transfer_to_trading',
'transfer_to_main',
'address/{currency}',
'payout',
],
},
},
# hardcoded fees are deprecated and should only be used when there's no other way to get fee info
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': -0.01 / 100,
'taker': 0.1 / 100,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BTC': 0.001,
'BCC': 0.0018,
'ETH': 0.00215,
'BCH': 0.0018,
'USDT': 100,
'DASH': 0.03,
'BTG': 0.0005,
'LTC': 0.003,
'ZEC': 0.0001,
'XMR': 0.09,
'1ST': 0.84,
'ADX': 5.7,
'AE': 6.7,
'AEON': 0.01006,
'AIR': 565,
'AMP': 9,
'ANT': 6.7,
'ARDR': 1,
'ARN': 18.5,
'ART': 26,
'ATB': 0.0004,
'ATL': 27,
'ATM': 504,
'ATS': 860,
'AVT': 1.9,
'BAS': 113,
'BCN': 0.1,
'DAO.Casino': 124, # id = 'BET'
'BKB': 46,
'BMC': 32,
'BMT': 100,
'BNT': 2.57,
'BQX': 4.7,
'BTM': 40,
'BTX': 0.04,
'BUS': 0.004,
'CCT': 115,
'CDT': 100,
'CDX': 30,
'CFI': 61,
'CLD': 0.88,
'CND': 574,
'CNX': 0.04,
'COSS': 65,
'CSNO': 16,
'CTR': 15,
'CTX': 146,
'CVC': 8.46,
'DBIX': 0.0168,
'DCN': 120000,
'DCT': 0.02,
'DDF': 342,
'DENT': 6240,
'DGB': 0.4,
'DGD': 0.01,
'DICE': 0.32,
'DLT': 0.26,
'DNT': 0.21,
'DOGE': 2,
'DOV': 34,
'DRPU': 24,
'DRT': 240,
'DSH': 0.017,
'EBET': 84,
'EBTC': 20,
'EBTCOLD': 6.6,
'ECAT': 14,
'EDG': 2,
'EDO': 2.9,
'ELE': 0.00172,
'ELM': 0.004,
'EMC': 0.03,
'EMGO': 14,
'ENJ': 163,
'EOS': 1.5,
'ERO': 34,
'ETBS': 15,
'ETC': 0.002,
'ETP': 0.004,
'EVX': 5.4,
'EXN': 456,
'FRD': 65,
'FUEL': 123.00105,
'FUN': 202.9598309,
'FYN': 1.849,
'FYP': 66.13,
'GNO': 0.0034,
'GUP': 4,
'GVT': 1.2,
'HAC': 144,
'HDG': 7,
'HGT': 1082,
'HPC': 0.4,
'HVN': 120,
'ICN': 0.55,
'ICO': 34,
'ICOS': 0.35,
'IND': 76,
'INDI': 5913,
'ITS': 15.0012,
'IXT': 11,
'KBR': 143,
'KICK': 112,
'LA': 41,
'LAT': 1.44,
'LIFE': 13000,
'LRC': 27,
'LSK': 0.3,
'LUN': 0.34,
'MAID': 5,
'MANA': 143,
'MCAP': 5.44,
'MIPS': 43,
'MNE': 1.33,
'MSP': 121,
'MTH': 92,
'MYB': 3.9,
'NDC': 165,
'NEBL': 0.04,
'NET': 3.96,
'NTO': 998,
'NXC': 13.39,
'NXT': 3,
'OAX': 15,
'ODN': 0.004,
'OMG': 2,
'OPT': 335,
'ORME': 2.8,
'OTN': 0.57,
'PAY': 3.1,
'PIX': 96,
'PLBT': 0.33,
'PLR': 114,
'PLU': 0.87,
'POE': 784,
'POLL': 3.5,
'PPT': 2,
'PRE': 32,
'PRG': 39,
'PRO': 41,
'PRS': 60,
'PTOY': 0.5,
'QAU': 63,
'QCN': 0.03,
'QTUM': 0.04,
'QVT': 64,
'REP': 0.02,
'RKC': 15,
'RVT': 14,
'SAN': 2.24,
'SBD': 0.03,
'SCL': 2.6,
'SISA': 1640,
'SKIN': 407,
'SMART': 0.4,
'SMS': 0.0375,
'SNC': 36,
'SNGLS': 4,
'SNM': 48,
'SNT': 233,
'STEEM': 0.01,
'STRAT': 0.01,
'STU': 14,
'STX': 11,
'SUB': 17,
'SUR': 3,
'SWT': 0.51,
'TAAS': 0.91,
'TBT': 2.37,
'TFL': 15,
'TIME': 0.03,
'TIX': 7.1,
'TKN': 1,
'TKR': 84,
'TNT': 90,
'TRST': 1.6,
'TRX': 1395,
'UET': 480,
'UGT': 15,
'VEN': 14,
'VERI': 0.037,
'VIB': 50,
'VIBE': 145,
'VOISE': 618,
'WEALTH': 0.0168,
'WINGS': 2.4,
'WTC': 0.75,
'XAUR': 3.23,
'XDN': 0.01,
'XEM': 15,
'XUC': 0.9,
'YOYOW': 140,
'ZAP': 24,
'ZRX': 23,
'ZSC': 191,
},
'deposit': {
'BTC': 0.0006,
'ETH': 0.003,
'BCH': 0,
'USDT': 0,
'BTG': 0,
'LTC': 0,
'ZEC': 0,
'XMR': 0,
'1ST': 0,
'ADX': 0,
'AE': 0,
'AEON': 0,
'AIR': 0,
'AMP': 0,
'ANT': 0,
'ARDR': 0,
'ARN': 0,
'ART': 0,
'ATB': 0,
'ATL': 0,
'ATM': 0,
'ATS': 0,
'AVT': 0,
'BAS': 0,
'BCN': 0,
'DAO.Casino': 0, # id = 'BET'
'BKB': 0,
'BMC': 0,
'BMT': 0,
'BNT': 0,
'BQX': 0,
'BTM': 0,
'BTX': 0,
'BUS': 0,
'CCT': 0,
'CDT': 0,
'CDX': 0,
'CFI': 0,
'CLD': 0,
'CND': 0,
'CNX': 0,
'COSS': 0,
'CSNO': 0,
'CTR': 0,
'CTX': 0,
'CVC': 0,
'DBIX': 0,
'DCN': 0,
'DCT': 0,
'DDF': 0,
'DENT': 0,
'DGB': 0,
'DGD': 0,
'DICE': 0,
'DLT': 0,
'DNT': 0,
'DOGE': 0,
'DOV': 0,
'DRPU': 0,
'DRT': 0,
'DSH': 0,
'EBET': 0,
'EBTC': 0,
'EBTCOLD': 0,
'ECAT': 0,
'EDG': 0,
'EDO': 0,
'ELE': 0,
'ELM': 0,
'EMC': 0,
'EMGO': 0,
'ENJ': 0,
'EOS': 0,
'ERO': 0,
'ETBS': 0,
'ETC': 0,
'ETP': 0,
'EVX': 0,
'EXN': 0,
'FRD': 0,
'FUEL': 0,
'FUN': 0,
'FYN': 0,
'FYP': 0,
'GNO': 0,
'GUP': 0,
'GVT': 0,
'HAC': 0,
'HDG': 0,
'HGT': 0,
'HPC': 0,
'HVN': 0,
'ICN': 0,
'ICO': 0,
'ICOS': 0,
'IND': 0,
'INDI': 0,
'ITS': 0,
'IXT': 0,
'KBR': 0,
'KICK': 0,
'LA': 0,
'LAT': 0,
'LIFE': 0,
'LRC': 0,
'LSK': 0,
'LUN': 0,
'MAID': 0,
'MANA': 0,
'MCAP': 0,
'MIPS': 0,
'MNE': 0,
'MSP': 0,
'MTH': 0,
'MYB': 0,
'NDC': 0,
'NEBL': 0,
'NET': 0,
'NTO': 0,
'NXC': 0,
'NXT': 0,
'OAX': 0,
'ODN': 0,
'OMG': 0,
'OPT': 0,
'ORME': 0,
'OTN': 0,
'PAY': 0,
'PIX': 0,
'PLBT': 0,
'PLR': 0,
'PLU': 0,
'POE': 0,
'POLL': 0,
'PPT': 0,
'PRE': 0,
'PRG': 0,
'PRO': 0,
'PRS': 0,
'PTOY': 0,
'QAU': 0,
'QCN': 0,
'QTUM': 0,
'QVT': 0,
'REP': 0,
'RKC': 0,
'RVT': 0,
'SAN': 0,
'SBD': 0,
'SCL': 0,
'SISA': 0,
'SKIN': 0,
'SMART': 0,
'SMS': 0,
'SNC': 0,
'SNGLS': 0,
'SNM': 0,
'SNT': 0,
'STEEM': 0,
'STRAT': 0,
'STU': 0,
'STX': 0,
'SUB': 0,
'SUR': 0,
'SWT': 0,
'TAAS': 0,
'TBT': 0,
'TFL': 0,
'TIME': 0,
'TIX': 0,
'TKN': 0,
'TKR': 0,
'TNT': 0,
'TRST': 0,
'TRX': 0,
'UET': 0,
'UGT': 0,
'VEN': 0,
'VERI': 0,
'VIB': 0,
'VIBE': 0,
'VOISE': 0,
'WEALTH': 0,
'WINGS': 0,
'WTC': 0,
'XAUR': 0,
'XDN': 0,
'XEM': 0,
'XUC': 0,
'YOYOW': 0,
'ZAP': 0,
'ZRX': 0,
'ZSC': 0,
},
},
},
'commonCurrencies': {
'BCC': 'BCC',
'BET': 'DAO.Casino',
'CAT': 'BitClave',
'DRK': 'DASH',
'EMGO': 'MGO',
'GET': 'Themis',
'LNC': 'LinkerCoin',
'UNC': 'Unigame',
'USD': 'USDT',
'XBT': 'BTC',
},
'options': {
'defaultTimeInForce': 'FOK',
},
})
def fetch_markets(self):
markets = self.publicGetSymbols()
result = []
for p in range(0, len(markets['symbols'])):
market = markets['symbols'][p]
id = market['symbol']
baseId = market['commodity']
quoteId = market['currency']
lot = self.safe_float(market, 'lot')
step = self.safe_float(market, 'step')
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
symbol = base + '/' + quote
result.append({
'info': market,
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'lot': lot,
'step': step,
'active': True,
'maker': self.safe_float(market, 'provideLiquidityRate'),
'taker': self.safe_float(market, 'takeLiquidityRate'),
'precision': {
'amount': self.precision_from_string(market['lot']),
'price': self.precision_from_string(market['step']),
},
'limits': {
'amount': {
'min': lot,
'max': None,
},
'price': {
'min': step,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
})
return result
def fetch_balance(self, params={}):
self.load_markets()
method = self.safe_string(params, 'type', 'trading')
method += 'GetBalance'
query = self.omit(params, 'type')
response = getattr(self, method)(query)
balances = response['balance']
result = {'info': balances}
for b in range(0, len(balances)):
balance = balances[b]
code = balance['currency_code']
currency = self.common_currency_code(code)
free = self.safe_float(balance, 'cash', 0.0)
free = self.safe_float(balance, 'balance', free)
used = self.safe_float(balance, 'reserved', 0.0)
account = {
'free': free,
'used': used,
'total': self.sum(free, used),
}
result[currency] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
orderbook = self.publicGetSymbolOrderbook(self.extend({
'symbol': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook)
def parse_ticker(self, ticker, market=None):
timestamp = ticker['timestamp']
symbol = None
if market:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': self.safe_float(ticker, 'open'),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': self.safe_float(ticker, 'volume_quote'),
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
tickers = self.publicGetTicker(params)
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
ticker = self.publicGetSymbolTicker(self.extend({
'symbol': market['id'],
}, params))
if 'message' in ticker:
raise ExchangeError(self.id + ' ' + ticker['message'])
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market=None):
if isinstance(trade, list):
return self.parse_public_trade(trade, market)
return self.parse_order_trade(trade, market)
def parse_public_trade(self, trade, market=None):
symbol = None
if market:
symbol = market['symbol']
return {
'info': trade,
'id': str(trade[0]),
'timestamp': trade[3],
'datetime': self.iso8601(trade[3]),
'symbol': symbol,
'type': None,
'side': trade[4],
'price': float(trade[1]),
'amount': float(trade[2]),
}
def parse_order_trade(self, trade, market=None):
symbol = None
if market:
symbol = market['symbol']
amount = self.safe_float(trade, 'execQuantity')
if market:
amount *= market['lot']
price = self.safe_float(trade, 'execPrice')
cost = price * amount
fee = {
'cost': self.safe_float(trade, 'fee'),
'currency': None,
'rate': None,
}
timestamp = trade['timestamp']
return {
'info': trade,
'id': trade['tradeId'],
'order': trade['clientOrderId'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': trade['side'],
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetSymbolTrades(self.extend({
'symbol': market['id'],
# 'from': 0,
# 'till': 100,
# 'by': 'ts', # or by trade_id
# 'sort': 'desc', # or asc
# 'start_index': 0,
# 'max_results': 1000,
# 'format_item': 'object',
# 'format_price': 'number',
# 'format_amount': 'number',
# 'format_tid': 'string',
# 'format_timestamp': 'millisecond',
# 'format_wrap': False,
'side': 'true',
}, params))
return self.parse_trades(response['trades'], market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
# check if amount can be evenly divided into lots
# they want integer quantity in lot units
quantity = float(amount) / market['lot']
wholeLots = int(round(quantity))
difference = quantity - wholeLots
if abs(difference) > market['step']:
raise ExchangeError(self.id + ' order amount should be evenly divisible by lot unit size of ' + str(market['lot']))
clientOrderId = self.milliseconds()
request = {
'clientOrderId': str(clientOrderId),
'symbol': market['id'],
'side': side,
'quantity': str(wholeLots), # quantity in integer lot units
'type': type,
}
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
else:
request['timeInForce'] = self.options['defaultTimeInForce']
response = self.tradingPostNewOrder(self.extend(request, params))
order = self.parse_order(response['ExecutionReport'], market)
if order['status'] == 'rejected':
raise InvalidOrder(self.id + ' order was rejected by the exchange ' + self.json(order))
return order
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
return self.tradingPostCancelOrder(self.extend({
'clientOrderId': id,
}, params))
def parse_order_status(self, status):
statuses = {
'new': 'open',
'partiallyFilled': 'open',
'filled': 'closed',
'canceled': 'canceled',
'rejected': 'rejected',
'expired': 'expired',
}
return self.safe_string(statuses, status)
def parse_order(self, order, market=None):
timestamp = self.safe_integer(order, 'lastTimestamp')
if timestamp is None:
timestamp = self.safe_integer(order, 'timestamp')
symbol = None
if not market:
market = self.markets_by_id[order['symbol']]
status = self.safe_string(order, 'orderStatus')
if status:
status = self.parse_order_status(status)
price = self.safe_float(order, 'orderPrice')
price = self.safe_float(order, 'price', price)
price = self.safe_float(order, 'avgPrice', price)
amount = self.safe_float(order, 'orderQuantity')
amount = self.safe_float(order, 'quantity', amount)
remaining = self.safe_float(order, 'quantityLeaves')
remaining = self.safe_float(order, 'leavesQuantity', remaining)
filled = None
cost = None
amountDefined = (amount is not None)
remainingDefined = (remaining is not None)
if market is not None:
symbol = market['symbol']
if amountDefined:
amount *= market['lot']
if remainingDefined:
remaining *= market['lot']
else:
marketId = self.safe_string(order, 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if amountDefined:
if remainingDefined:
filled = amount - remaining
cost = price * filled
feeCost = self.safe_float(order, 'fee')
feeCurrency = None
if market is not None:
symbol = market['symbol']
feeCurrency = market['quote']
fee = {
'cost': feeCost,
'currency': feeCurrency,
'rate': None,
}
return {
'id': str(order['clientOrderId']),
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': order['type'],
'side': order['side'],
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'fee': fee,
}
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
response = self.tradingGetOrder(self.extend({
'clientOrderId': id,
}, params))
if response['orders'][0]:
return self.parse_order(response['orders'][0])
raise OrderNotFound(self.id + ' fetchOrder() error: ' + self.response)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
statuses = ['new', 'partiallyFiiled']
market = None
request = {
'sort': 'desc',
'statuses': ','.join(statuses),
}
if symbol is not None:
market = self.market(symbol)
request['symbols'] = market['id']
response = self.tradingGetOrdersActive(self.extend(request, params))
return self.parse_orders(response['orders'], market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
statuses = ['filled', 'canceled', 'rejected', 'expired']
request = {
'sort': 'desc',
'statuses': ','.join(statuses),
'max_results': 1000,
}
if symbol is not None:
market = self.market(symbol)
request['symbols'] = market['id']
response = self.tradingGetOrdersRecent(self.extend(request, params))
return self.parse_orders(response['orders'], market, since, limit)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
response = self.tradingGetTradesByOrder(self.extend({
'clientOrderId': id,
}, params))
return self.parse_trades(response['trades'], market, since, limit)
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'currency_code': currency['id'],
'amount': amount,
'address': address,
}
if tag:
request['paymentId'] = tag
response = self.paymentPostPayout(self.extend(request, params))
return {
'info': response,
'id': response['transaction'],
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/' + 'api' + '/' + self.version + '/' + api + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce()
payload = {'nonce': nonce, 'apikey': self.apiKey}
query = self.extend(payload, query)
if method == 'GET':
url += '?' + self.urlencode(query)
else:
url += '?' + self.urlencode(payload)
auth = url
if method == 'POST':
if query:
body = self.urlencode(query)
auth += body
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'X-Signature': self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha512).lower(),
}
url = self.urls['api'] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'code' in response:
if 'ExecutionReport' in response:
if response['ExecutionReport']['orderRejectReason'] == 'orderExceedsLimit':
raise InsufficientFunds(self.id + ' ' + self.json(response))
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 36.386957 | 127 | 0.348727 |
d165f501c735abd70a8ec09dc2c9f6d36238d18b | 3,788 | py | Python | tests/dropna_test.py | adrienpacifico/vaex | 19f8055b7b963b72f6e15b3b2e106d1142ec7655 | [
"MIT"
] | null | null | null | tests/dropna_test.py | adrienpacifico/vaex | 19f8055b7b963b72f6e15b3b2e106d1142ec7655 | [
"MIT"
] | null | null | null | tests/dropna_test.py | adrienpacifico/vaex | 19f8055b7b963b72f6e15b3b2e106d1142ec7655 | [
"MIT"
] | null | null | null | from common import *
def test_dropna_objects(df_local_non_arrow):
ds = df_local_non_arrow
ds_dropped = ds.dropna(column_names=['obj'])
assert ds_dropped['obj'].values.mask.any() == False
float_elements = np.array([element for element in ds_dropped['obj'].values.data if isinstance(element, float)])
assert np.isnan(float_elements).any() == False, 'np.nan still exists in column'
def test_dropna(ds_local):
ds = ds_local
ds_copy = ds.copy()
ds_dropped = ds.dropna()
assert len(ds_dropped) == 6
ds_dropped = ds.dropna(drop_masked=False)
assert len(ds_dropped) == 8
assert np.isnan(ds_dropped['n'].values).any() == False
assert np.isnan(ds_dropped['nm'].values).any() == False
ds_dropped = ds.dropna(drop_nan=False)
assert len(ds_dropped) == 8
assert ds_dropped['m'].values.mask.any() == False
assert ds_dropped['nm'].values.mask.any() == False
assert ds_dropped['mi'].values.mask.any() == False
assert ds_dropped['obj'].values.mask.any() == False
ds_dropped = ds.dropna(column_names=['nm', 'mi'])
assert len(ds_dropped) == 8
assert ds_dropped['nm'].values.mask.any() == False
assert np.isnan(ds_dropped['nm'].values).any() == False
ds_dropped = ds.dropna(column_names=['obj'])
assert len(ds_dropped) == 8
assert ds_dropped['obj'].values.mask.any() == False
float_elements = np.array([element for element in ds_dropped['obj'].values.data if isinstance(element, float)])
assert np.isnan(float_elements).any() == False, 'np.nan still exists in column'
ds_dropped = ds.dropna(column_names=['nm', 'mi', 'obj'])
state = ds_dropped.state_get()
ds_copy.state_set(state)
assert len(ds_copy) == len(ds_dropped)
assert len(ds_copy) == 6
# equivalent of isna_test
def test_dropmissing():
s = vaex.string_column(["aap", None, "noot", "mies"])
o = ["aap", None, "noot", np.nan]
x = np.arange(4, dtype=np.float64)
x[2] = x[3] = np.nan
m = np.ma.array(x, mask=[0, 1, 0, 1])
df = vaex.from_arrays(x=x, m=m, s=s, o=o)
x = df.x.dropmissing().tolist()
assert (9 not in x)
assert np.any(np.isnan(x)), "nan is not a missing value"
m = df.m.dropmissing().tolist()
assert (m[:1] == [0])
assert np.isnan(m[1])
assert len(m) == 2
assert (df.s.dropmissing().tolist() == ["aap", "noot", "mies"])
assert (df.o.dropmissing().tolist()[:2] == ["aap", "noot"])
assert np.isnan(df.o.dropmissing().tolist()[2])
# equivalent of isna_test
def test_dropnan():
s = vaex.string_column(["aap", None, "noot", "mies"])
o = ["aap", None, "noot", np.nan]
x = np.arange(4, dtype=np.float64)
x[2] = x[3] = np.nan
m = np.ma.array(x, mask=[0, 1, 0, 1])
df = vaex.from_arrays(x=x, m=m, s=s, o=o)
x = df.x.dropnan().tolist()
assert x == [0, 1]
m = df.m.dropnan().tolist()
assert m == [0, None, None]
assert (df.s.dropnan().tolist() == ["aap", None, "noot", "mies"])
assert (df.o.dropnan().tolist() == ["aap", None, "noot"])
def test_dropna():
s = vaex.string_column(["aap", None, "noot", "mies"])
o = ["aap", None, "noot", np.nan]
x = np.arange(4, dtype=np.float64)
x[2] = x[3] = np.nan
m = np.ma.array(x, mask=[0, 1, 0, 1])
df = vaex.from_arrays(x=x, m=m, s=s, o=o)
x = df.x.dropna().tolist()
assert x == [0, 1]
m = df.m.dropna().tolist()
assert m == [0]
assert (df.s.dropna().tolist() == ["aap", "noot", "mies"])
assert (df.o.dropna().tolist() == ["aap", "noot"])
def test_dropna_all_columns():
x = [1, 2, 3, 4, 5]
y = ['dog', 'dog', None, 'cat', None]
df = vaex.from_arrays(x=x, y=y)
df_dropped = df.dropna()
assert df_dropped.x.tolist() == [1, 2, 4]
assert df_dropped.y.tolist() == ['dog', 'dog', 'cat']
| 35.735849 | 115 | 0.603221 |
fa5b13d01a6641883e52f3d641f65d1794f5827e | 78,472 | py | Python | localstack/services/awslambda/lambda_api.py | mwarthon/localstack | b748838113131b576e2be0b80a8f31e9bad81f3a | [
"Apache-2.0"
] | null | null | null | localstack/services/awslambda/lambda_api.py | mwarthon/localstack | b748838113131b576e2be0b80a8f31e9bad81f3a | [
"Apache-2.0"
] | null | null | null | localstack/services/awslambda/lambda_api.py | mwarthon/localstack | b748838113131b576e2be0b80a8f31e9bad81f3a | [
"Apache-2.0"
] | null | null | null | import re
import os
import importlib.machinery
import sys
import json
import uuid
import time
import base64
import hashlib
import logging
import functools
import threading
import traceback
from io import BytesIO
from datetime import datetime
from flask import Flask, Response, jsonify, request
from six.moves import cStringIO as StringIO
from six.moves.urllib.parse import urlparse
from localstack import config
from localstack.constants import APPLICATION_JSON, TEST_AWS_ACCOUNT_ID
from localstack.utils.aws import aws_stack, aws_responses
from localstack.utils.common import (
to_str, to_bytes, load_file, save_file, TMP_FILES, ensure_readable, short_uid, long_uid, json_safe,
mkdir, unzip, is_zip_file, run, run_safe, first_char_to_lower, run_for_max_seconds, parse_request_data,
timestamp_millis, timestamp, now_utc, safe_requests, FuncThread, isoformat_milliseconds, synchronized)
from localstack.services.awslambda import lambda_executors
from localstack.services.generic_proxy import RegionBackend
from localstack.services.awslambda.lambda_utils import (
DOTNET_LAMBDA_RUNTIMES, multi_value_dict_for_list, get_handler_file_from_name,
LAMBDA_DEFAULT_HANDLER, LAMBDA_DEFAULT_RUNTIME, LAMBDA_DEFAULT_STARTING_POSITION)
from localstack.utils.analytics import event_publisher
from localstack.utils.http_utils import parse_chunked_data
from localstack.utils.aws.aws_models import LambdaFunction, CodeSigningConfig
# logger
LOG = logging.getLogger(__name__)
# name pattern of IAM policies associated with Lambda functions
LAMBDA_POLICY_NAME_PATTERN = 'lambda_policy_%s'
# constants
APP_NAME = 'lambda_api'
PATH_ROOT = '/2015-03-31'
ARCHIVE_FILE_PATTERN = '%s/lambda.handler.*.jar' % config.TMP_FOLDER
LAMBDA_SCRIPT_PATTERN = '%s/lambda_script_*.py' % config.TMP_FOLDER
LAMBDA_ZIP_FILE_NAME = 'original_lambda_archive.zip'
LAMBDA_JAR_FILE_NAME = 'original_lambda_archive.jar'
# default timeout in seconds
LAMBDA_DEFAULT_TIMEOUT = 3
INVALID_PARAMETER_VALUE_EXCEPTION = 'InvalidParameterValueException'
VERSION_LATEST = '$LATEST'
FUNCTION_MAX_SIZE = 69905067
BATCH_SIZE_RANGES = {
'kafka': (100, 10000),
'kinesis': (100, 10000),
'dynamodb': (100, 1000),
'sqs': (10, 10)
}
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%f+00:00'
app = Flask(APP_NAME)
# mutex for access to CWD and ENV
EXEC_MUTEX = threading.RLock(1)
# whether to use Docker for execution
DO_USE_DOCKER = None
# start characters indicating that a lambda result should be parsed as JSON
JSON_START_CHAR_MAP = {
list: ('[',),
tuple: ('[',),
dict: ('{',),
str: ('"',),
bytes: ('"',),
bool: ('t', 'f'),
type(None): ('n',),
int: ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9'),
float: ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
}
POSSIBLE_JSON_TYPES = (str, bytes)
JSON_START_TYPES = tuple(set(JSON_START_CHAR_MAP.keys()) - set(POSSIBLE_JSON_TYPES))
JSON_START_CHARS = tuple(set(functools.reduce(lambda x, y: x + y, JSON_START_CHAR_MAP.values())))
# SQS listener thread settings
SQS_LISTENER_THREAD = {}
SQS_POLL_INTERVAL_SEC = 1
# lambda executor instance
LAMBDA_EXECUTOR = lambda_executors.AVAILABLE_EXECUTORS.get(config.LAMBDA_EXECUTOR, lambda_executors.DEFAULT_EXECUTOR)
# IAM policy constants
IAM_POLICY_VERSION = '2012-10-17'
# Whether to check if the handler function exists while creating lambda function
CHECK_HANDLER_ON_CREATION = False
class LambdaRegion(RegionBackend):
def __init__(self):
# map ARN strings to lambda function objects
self.lambdas = {}
# map ARN strings to CodeSigningConfig object
self.code_signing_configs = {}
# list of event source mappings for the API
self.event_source_mappings = []
class ClientError(Exception):
def __init__(self, msg, code=400):
super(ClientError, self).__init__(msg)
self.code = code
self.msg = msg
def get_response(self):
if isinstance(self.msg, Response):
return self.msg
return error_response(self.msg, self.code)
class LambdaContext(object):
DEFAULT_MEMORY_LIMIT = 1536
def __init__(self, func_details, qualifier=None, context=None):
self.function_name = func_details.name()
self.function_version = func_details.get_qualifier_version(qualifier)
self.client_context = context.get('client_context')
self.invoked_function_arn = func_details.arn()
if qualifier:
self.invoked_function_arn += ':' + qualifier
self.cognito_identity = context.get('identity')
self.aws_request_id = str(uuid.uuid4())
self.memory_limit_in_mb = func_details.memory_size or self.DEFAULT_MEMORY_LIMIT
self.log_group_name = '/aws/lambda/%s' % self.function_name
self.log_stream_name = '%s/[1]%s' % (timestamp(format='%Y/%m/%d'), short_uid())
def get_remaining_time_in_millis(self):
# TODO implement!
return 1000 * 60
def cleanup():
region = LambdaRegion.get()
region.lambdas = {}
region.event_source_mappings = []
LAMBDA_EXECUTOR.cleanup()
def func_arn(function_name, remove_qualifier=True):
parts = function_name.split(':function:')
if remove_qualifier and len(parts) > 1:
function_name = '%s:function:%s' % (parts[0], parts[1].split(':')[0])
return aws_stack.lambda_function_arn(function_name)
def func_qualifier(function_name, qualifier=None):
region = LambdaRegion.get()
arn = aws_stack.lambda_function_arn(function_name)
details = region.lambdas.get(arn)
if not details:
return details
if details.qualifier_exists(qualifier):
return '{}:{}'.format(arn, qualifier)
return arn
def check_batch_size_range(source_arn, batch_size=None):
source = source_arn.split(':')[2].lower()
source = 'kafka' if 'secretsmanager' in source else source
batch_size_entry = BATCH_SIZE_RANGES.get(source)
if not batch_size_entry:
raise ValueError(
INVALID_PARAMETER_VALUE_EXCEPTION, 'Unsupported event source type'
)
batch_size = batch_size or batch_size_entry[0]
if batch_size > batch_size_entry[1]:
raise ValueError(
INVALID_PARAMETER_VALUE_EXCEPTION,
'BatchSize {} exceeds the max of {}'.format(batch_size, batch_size_entry[1])
)
return batch_size
def add_function_mapping(lambda_name, lambda_handler, lambda_cwd=None):
region = LambdaRegion.get()
arn = func_arn(lambda_name)
lambda_details = region.lambdas[arn]
lambda_details.versions.get(VERSION_LATEST)['Function'] = lambda_handler
lambda_details.cwd = lambda_cwd or lambda_details.cwd
def build_mapping_obj(data):
mapping = {}
function_name = data['FunctionName']
enabled = data.get('Enabled', True)
batch_size = data.get('BatchSize')
mapping['UUID'] = str(uuid.uuid4())
mapping['FunctionArn'] = func_arn(function_name)
mapping['LastProcessingResult'] = 'OK'
mapping['StateTransitionReason'] = 'User action'
mapping['LastModified'] = float(time.mktime(datetime.utcnow().timetuple()))
mapping['State'] = 'Enabled' if enabled in [True, None] else 'Disabled'
if 'SelfManagedEventSource' in data:
source_arn = data['SourceAccessConfigurations'][0]['URI']
mapping['SelfManagedEventSource'] = data['SelfManagedEventSource']
mapping['Topics'] = data['Topics']
mapping['SourceAccessConfigurations'] = data['SourceAccessConfigurations']
else:
source_arn = data['EventSourceArn']
mapping['EventSourceArn'] = source_arn
mapping['StartingPosition'] = LAMBDA_DEFAULT_STARTING_POSITION
batch_size = check_batch_size_range(source_arn, batch_size)
mapping['BatchSize'] = batch_size
return mapping
def add_event_source(data):
region = LambdaRegion.get()
mapping = build_mapping_obj(data)
region.event_source_mappings.append(mapping)
return mapping
def update_event_source(uuid_value, data):
region = LambdaRegion.get()
function_name = data.get('FunctionName') or ''
batch_size = None
enabled = data.get('Enabled', True)
for mapping in region.event_source_mappings:
if uuid_value == mapping['UUID']:
if function_name:
mapping['FunctionArn'] = func_arn(function_name)
batch_size = data.get('BatchSize')
if 'SelfManagedEventSource' in mapping:
batch_size = check_batch_size_range(
mapping['SourceAccessConfigurations'][0]['URI'],
batch_size or mapping['BatchSize'])
else:
batch_size = check_batch_size_range(mapping['EventSourceArn'], batch_size or mapping['BatchSize'])
mapping['State'] = 'Enabled' if enabled in [True, None] else 'Disabled'
mapping['LastModified'] = float(time.mktime(datetime.utcnow().timetuple()))
mapping['BatchSize'] = batch_size
if 'SourceAccessConfigurations' in (mapping and data):
mapping['SourceAccessConfigurations'] = data['SourceAccessConfigurations']
return mapping
return {}
def delete_event_source(uuid_value):
region = LambdaRegion.get()
for i, m in enumerate(region.event_source_mappings):
if uuid_value == m['UUID']:
return region.event_source_mappings.pop(i)
return {}
@synchronized(lock=EXEC_MUTEX)
def use_docker():
global DO_USE_DOCKER
if DO_USE_DOCKER is None:
DO_USE_DOCKER = False
if 'docker' in config.LAMBDA_EXECUTOR:
try:
run('docker images', print_error=False)
DO_USE_DOCKER = True
except Exception:
pass
return DO_USE_DOCKER
def fix_proxy_path_params(path_params):
proxy_path_param_value = path_params.get('proxy+')
if not proxy_path_param_value:
return
del path_params['proxy+']
path_params['proxy'] = proxy_path_param_value
def message_attributes_to_lower(message_attrs):
""" Convert message attribute details (first characters) to lower case (e.g., stringValue, dataType). """
message_attrs = message_attrs or {}
for _, attr in message_attrs.items():
if not isinstance(attr, dict):
continue
for key, value in dict(attr).items():
attr[first_char_to_lower(key)] = attr.pop(key)
return message_attrs
def process_apigateway_invocation(func_arn, path, payload, stage, api_id, headers={},
resource_path=None, method=None, path_params={}, query_string_params=None,
stage_variables={}, request_context={}, event_context={}):
try:
resource_path = resource_path or path
event = construct_invocation_event(method, path, headers, payload, query_string_params)
path_params = dict(path_params)
fix_proxy_path_params(path_params)
event['pathParameters'] = path_params
event['resource'] = resource_path
event['requestContext'] = request_context
event['stageVariables'] = stage_variables
LOG.debug('Running Lambda function %s from API Gateway invocation: %s %s' % (func_arn, method or 'GET', path))
asynchronous = not config.SYNCHRONOUS_API_GATEWAY_EVENTS
inv_result = run_lambda(func_arn=func_arn, event=event, context=event_context, asynchronous=asynchronous)
return inv_result.result
except Exception as e:
LOG.warning('Unable to run Lambda function on API Gateway message: %s %s' % (e, traceback.format_exc()))
def construct_invocation_event(method, path, headers, data, query_string_params={}):
query_string_params = query_string_params or parse_request_data(method, path, '')
event = {
'path': path,
'headers': dict(headers),
'multiValueHeaders': multi_value_dict_for_list(headers),
'body': data,
'isBase64Encoded': False,
'httpMethod': method,
'queryStringParameters': query_string_params,
'multiValueQueryStringParameters': multi_value_dict_for_list(query_string_params)
}
return event
def process_sns_notification(func_arn, topic_arn, subscription_arn, message, message_id,
message_attributes, unsubscribe_url, subject='',):
event = {
'Records': [{
'EventSource': 'localstack:sns',
'EventVersion': '1.0',
'EventSubscriptionArn': subscription_arn,
'Sns': {
'Type': 'Notification',
'MessageId': message_id,
'TopicArn': topic_arn,
'Subject': subject,
'Message': message,
'Timestamp': timestamp_millis(),
'SignatureVersion': '1',
# TODO Add a more sophisticated solution with an actual signature
# Hardcoded
'Signature': 'EXAMPLEpH+..',
'SigningCertUrl': 'https://sns.us-east-1.amazonaws.com/SimpleNotificationService-000000000.pem',
'UnsubscribeUrl': unsubscribe_url,
'MessageAttributes': message_attributes
}
}]
}
inv_result = run_lambda(func_arn=func_arn, event=event, context={}, asynchronous=not config.SYNCHRONOUS_SNS_EVENTS)
return inv_result.result
def process_kinesis_records(records, stream_name):
def chunks(lst, n):
# Yield successive n-sized chunks from lst.
for i in range(0, len(lst), n):
yield lst[i:i + n]
# feed records into listening lambdas
try:
stream_arn = aws_stack.kinesis_stream_arn(stream_name)
sources = get_event_sources(source_arn=stream_arn)
for source in sources:
arn = source['FunctionArn']
for chunk in chunks(records, source['BatchSize']):
event = {
'Records': [
{
'eventID': 'shardId-000000000000:{0}'.format(rec['sequenceNumber']),
'eventSourceARN': stream_arn,
'eventSource': 'aws:kinesis',
'eventVersion': '1.0',
'eventName': 'aws:kinesis:record',
'invokeIdentityArn': 'arn:aws:iam::{0}:role/lambda-role'.format(TEST_AWS_ACCOUNT_ID),
'awsRegion': aws_stack.get_region(),
'kinesis': rec
}
for rec in chunk
]
}
run_lambda(func_arn=arn, event=event, context={}, asynchronous=not config.SYNCHRONOUS_KINESIS_EVENTS)
except Exception as e:
LOG.warning('Unable to run Lambda function on Kinesis records: %s %s' % (e, traceback.format_exc()))
def start_lambda_sqs_listener():
if SQS_LISTENER_THREAD:
return
def send_event_to_lambda(queue_arn, queue_url, lambda_arn, messages, region):
def delete_messages(result, func_arn, event, error=None, dlq_sent=None, **kwargs):
if error and not dlq_sent:
# Skip deleting messages from the queue in case of processing errors AND if
# the message has not yet been sent to a dead letter queue (DLQ).
# We'll pick them up and retry next time they become available on the queue.
return
sqs_client = aws_stack.connect_to_service('sqs')
entries = [{'Id': r['receiptHandle'], 'ReceiptHandle': r['receiptHandle']} for r in records]
try:
sqs_client.delete_message_batch(QueueUrl=queue_url, Entries=entries)
except Exception as e:
LOG.info('Unable to delete Lambda events from SQS queue ' +
'(please check SQS visibility timeout settings): %s - %s' % (entries, e))
records = []
for msg in messages:
message_attrs = message_attributes_to_lower(msg.get('MessageAttributes'))
records.append({
'body': msg['Body'],
'receiptHandle': msg['ReceiptHandle'],
'md5OfBody': msg['MD5OfBody'],
'eventSourceARN': queue_arn,
'eventSource': lambda_executors.EVENT_SOURCE_SQS,
'awsRegion': region,
'messageId': msg['MessageId'],
'attributes': msg.get('Attributes', {}),
'messageAttributes': message_attrs,
'md5OfMessageAttributes': msg.get('MD5OfMessageAttributes'),
'sqs': True,
})
event = {'Records': records}
# TODO implement retries, based on "RedrivePolicy.maxReceiveCount" in the queue settings
res = run_lambda(func_arn=lambda_arn, event=event, context={},
asynchronous=True, callback=delete_messages)
if isinstance(res, lambda_executors.InvocationResult) and getattr(res.result, 'status_code', 0) >= 400:
return False
return True
def listener_loop(*args):
while True:
try:
sources = get_event_sources(source_arn=r'.*:sqs:.*')
if not sources:
# Temporarily disable polling if no event sources are configured
# anymore. The loop will get restarted next time a message
# arrives and if an event source is configured.
SQS_LISTENER_THREAD.pop('_thread_')
return
unprocessed_messages = {}
sqs_client = aws_stack.connect_to_service('sqs')
for source in sources:
queue_arn = source['EventSourceArn']
lambda_arn = source['FunctionArn']
batch_size = max(min(source.get('BatchSize', 1), 10), 1)
try:
region_name = queue_arn.split(':')[3]
queue_url = aws_stack.sqs_queue_url_for_arn(queue_arn)
messages = unprocessed_messages.pop(queue_arn, None)
if not messages:
result = sqs_client.receive_message(
QueueUrl=queue_url,
MessageAttributeNames=['All'],
MaxNumberOfMessages=batch_size
)
messages = result.get('Messages')
if not messages:
continue
LOG.debug('Sending event from event source %s to Lambda %s' % (queue_arn, lambda_arn))
res = send_event_to_lambda(queue_arn, queue_url, lambda_arn, messages, region=region_name)
if not res:
unprocessed_messages[queue_arn] = messages
except Exception as e:
LOG.debug('Unable to poll SQS messages for queue %s: %s' % (queue_arn, e))
except Exception:
pass
finally:
time.sleep(SQS_POLL_INTERVAL_SEC)
LOG.debug('Starting SQS message polling thread for Lambda API')
SQS_LISTENER_THREAD['_thread_'] = FuncThread(listener_loop)
SQS_LISTENER_THREAD['_thread_'].start()
def process_sqs_message(queue_name, region_name=None):
# feed message into the first listening lambda (message should only get processed once)
try:
region_name = region_name or aws_stack.get_region()
queue_arn = aws_stack.sqs_queue_arn(queue_name, region_name=region_name)
sources = get_event_sources(source_arn=queue_arn)
arns = [s.get('FunctionArn') for s in sources]
source = (sources or [None])[0]
if not source:
return False
LOG.debug('Found %s source mappings for event from SQS queue %s: %s' % (len(arns), queue_arn, arns))
start_lambda_sqs_listener()
return True
except Exception as e:
LOG.warning('Unable to run Lambda function on SQS messages: %s %s' % (e, traceback.format_exc()))
def get_event_sources(func_name=None, source_arn=None):
region = LambdaRegion.get()
result = []
for m in region.event_source_mappings:
if not func_name or (m['FunctionArn'] in [func_name, func_arn(func_name)]):
if _arn_match(mapped=m['EventSourceArn'], searched=source_arn):
result.append(m)
return result
def _arn_match(mapped, searched):
if not searched or mapped == searched:
return True
# Some types of ARNs can end with a path separated by slashes, for
# example the ARN of a DynamoDB stream is tableARN/stream/ID. It's
# a little counterintuitive that a more specific mapped ARN can
# match a less specific ARN on the event, but some integration tests
# rely on it for things like subscribing to a stream and matching an
# event labeled with the table ARN.
if re.match(r'^%s$' % searched, mapped):
return True
if mapped.startswith(searched):
suffix = mapped[len(searched):]
return suffix[0] == '/'
return False
def get_function_version(arn, version):
region = LambdaRegion.get()
func = region.lambdas.get(arn)
return format_func_details(func, version=version, always_add_version=True)
def publish_new_function_version(arn):
region = LambdaRegion.get()
func_details = region.lambdas.get(arn)
versions = func_details.versions
max_version_number = func_details.max_version()
next_version_number = max_version_number + 1
latest_hash = versions.get(VERSION_LATEST).get('CodeSha256')
max_version = versions.get(str(max_version_number))
max_version_hash = max_version.get('CodeSha256') if max_version else ''
if latest_hash != max_version_hash:
versions[str(next_version_number)] = {
'CodeSize': versions.get(VERSION_LATEST).get('CodeSize'),
'CodeSha256': versions.get(VERSION_LATEST).get('CodeSha256'),
'Function': versions.get(VERSION_LATEST).get('Function'),
'RevisionId': str(uuid.uuid4())
}
max_version_number = next_version_number
return get_function_version(arn, str(max_version_number))
def do_list_versions(arn):
region = LambdaRegion.get()
versions = [get_function_version(arn, version) for version in region.lambdas.get(arn).versions.keys()]
return sorted(versions, key=lambda k: str(k.get('Version')))
def do_update_alias(arn, alias, version, description=None):
region = LambdaRegion.get()
new_alias = {
'AliasArn': arn + ':' + alias,
'FunctionVersion': version,
'Name': alias,
'Description': description or '',
'RevisionId': str(uuid.uuid4())
}
region.lambdas.get(arn).aliases[alias] = new_alias
return new_alias
def run_lambda(func_arn, event, context={}, version=None,
suppress_output=False, asynchronous=False, callback=None):
region_name = func_arn.split(':')[3]
region = LambdaRegion.get(region_name)
if suppress_output:
stdout_ = sys.stdout
stderr_ = sys.stderr
stream = StringIO()
sys.stdout = stream
sys.stderr = stream
try:
func_arn = aws_stack.fix_arn(func_arn)
func_details = region.lambdas.get(func_arn)
if not func_details:
LOG.debug('Unable to find details for Lambda %s in region %s' % (func_arn, region_name))
result = not_found_error(msg='The resource specified in the request does not exist.')
return lambda_executors.InvocationResult(result)
# forward invocation to external endpoint, if configured
invocation_type = 'Event' if asynchronous else 'RequestResponse'
invoke_result = forward_to_external_url(func_details, event, context, invocation_type)
if invoke_result is not None:
return invoke_result
context = LambdaContext(func_details, version, context)
result = LAMBDA_EXECUTOR.execute(func_arn, func_details, event, context=context,
version=version, asynchronous=asynchronous, callback=callback)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
response = {
'errorType': str(exc_type.__name__),
'errorMessage': str(e),
'stackTrace': traceback.format_tb(exc_traceback)
}
LOG.info('Error executing Lambda function %s: %s %s' % (func_arn, e, traceback.format_exc()))
log_output = e.log_output if isinstance(e, lambda_executors.InvocationException) else ''
return lambda_executors.InvocationResult(Response(json.dumps(response), status=500), log_output)
finally:
if suppress_output:
sys.stdout = stdout_
sys.stderr = stderr_
return result
def load_source(name, file):
return importlib.machinery.SourceFileLoader(name, file).load_module()
def exec_lambda_code(script, handler_function='handler', lambda_cwd=None, lambda_env=None):
if lambda_cwd or lambda_env:
EXEC_MUTEX.acquire()
if lambda_cwd:
previous_cwd = os.getcwd()
os.chdir(lambda_cwd)
sys.path = [lambda_cwd] + sys.path
if lambda_env:
previous_env = dict(os.environ)
os.environ.update(lambda_env)
# generate lambda file name
lambda_id = 'l_%s' % short_uid()
lambda_file = LAMBDA_SCRIPT_PATTERN.replace('*', lambda_id)
save_file(lambda_file, script)
# delete temporary .py and .pyc files on exit
TMP_FILES.append(lambda_file)
TMP_FILES.append('%sc' % lambda_file)
try:
pre_sys_modules_keys = set(sys.modules.keys())
try:
handler_module = load_source(lambda_id, lambda_file)
module_vars = handler_module.__dict__
finally:
# the above import can bring files for the function
# (eg settings.py) into the global namespace. subsequent
# calls can pick up file from another function, causing
# general issues.
post_sys_modules_keys = set(sys.modules.keys())
for key in post_sys_modules_keys:
if key not in pre_sys_modules_keys:
sys.modules.pop(key)
except Exception as e:
LOG.error('Unable to exec: %s %s' % (script, traceback.format_exc()))
raise e
finally:
if lambda_cwd or lambda_env:
if lambda_cwd:
os.chdir(previous_cwd)
sys.path.pop(0)
if lambda_env:
os.environ = previous_env
EXEC_MUTEX.release()
return module_vars[handler_function]
def get_handler_function_from_name(handler_name, runtime=None):
runtime = runtime or LAMBDA_DEFAULT_RUNTIME
if runtime.startswith(tuple(DOTNET_LAMBDA_RUNTIMES)):
return handler_name.split(':')[-1]
return handler_name.split('.')[-1]
def error_response(msg, code=500, error_type='InternalFailure'):
LOG.debug(msg)
return aws_responses.flask_error_response_json(msg, code=code, error_type=error_type)
def get_zip_bytes(function_code):
"""Returns the ZIP file contents from a FunctionCode dict.
:type function_code: dict
:param function_code: https://docs.aws.amazon.com/lambda/latest/dg/API_FunctionCode.html
:returns: bytes of the Zip file.
"""
function_code = function_code or {}
if 'S3Bucket' in function_code:
s3_client = aws_stack.connect_to_service('s3')
bytes_io = BytesIO()
try:
s3_client.download_fileobj(function_code['S3Bucket'], function_code['S3Key'], bytes_io)
zip_file_content = bytes_io.getvalue()
except Exception as e:
raise ClientError('Unable to fetch Lambda archive from S3: %s' % e, 404)
elif 'ZipFile' in function_code:
zip_file_content = function_code['ZipFile']
zip_file_content = base64.b64decode(zip_file_content)
elif 'ImageUri' in function_code:
zip_file_content = None
else:
raise ClientError('No valid Lambda archive specified: %s' % list(function_code.keys()))
return zip_file_content
def get_java_handler(zip_file_content, main_file, func_details=None):
"""Creates a Java handler from an uploaded ZIP or JAR.
:type zip_file_content: bytes
:param zip_file_content: ZIP file bytes.
:type handler: str
:param handler: The lambda handler path.
:type main_file: str
:param main_file: Filepath to the uploaded ZIP or JAR file.
:returns: function or flask.Response
"""
if is_zip_file(zip_file_content):
def execute(event, context):
result = lambda_executors.EXECUTOR_LOCAL.execute_java_lambda(
event, context, main_file=main_file, func_details=func_details)
return result
return execute
raise ClientError(error_response(
'Unable to extract Java Lambda handler - file is not a valid zip/jar file (%s, %s bytes)' %
(main_file, len(zip_file_content or '')), 400, error_type='ValidationError'))
def set_archive_code(code, lambda_name, zip_file_content=None):
region = LambdaRegion.get()
# get metadata
lambda_arn = func_arn(lambda_name)
lambda_details = region.lambdas[lambda_arn]
is_local_mount = code.get('S3Bucket') == config.BUCKET_MARKER_LOCAL
if is_local_mount and config.LAMBDA_REMOTE_DOCKER:
msg = 'Please note that Lambda mounts (bucket name "%s") cannot be used with LAMBDA_REMOTE_DOCKER=1'
raise Exception(msg % config.BUCKET_MARKER_LOCAL)
# Stop/remove any containers that this arn uses.
LAMBDA_EXECUTOR.cleanup(lambda_arn)
if is_local_mount:
# Mount or use a local folder lambda executors can reference
# WARNING: this means we're pointing lambda_cwd to a local path in the user's
# file system! We must ensure that there is no data loss (i.e., we must *not* add
# this folder to TMP_FILES or similar).
lambda_details.cwd = code.get('S3Key')
return code['S3Key']
# get file content
zip_file_content = zip_file_content or get_zip_bytes(code)
if not zip_file_content:
return
# Save the zip file to a temporary file that the lambda executors can reference
code_sha_256 = base64.standard_b64encode(hashlib.sha256(zip_file_content).digest())
latest_version = lambda_details.get_version(VERSION_LATEST)
latest_version['CodeSize'] = len(zip_file_content)
latest_version['CodeSha256'] = code_sha_256.decode('utf-8')
tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid())
mkdir(tmp_dir)
tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
save_file(tmp_file, zip_file_content)
TMP_FILES.append(tmp_dir)
lambda_details.cwd = tmp_dir
return tmp_dir
def set_function_code(code, lambda_name, lambda_cwd=None):
def _set_and_configure():
lambda_handler = do_set_function_code(code, lambda_name, lambda_cwd=lambda_cwd)
add_function_mapping(lambda_name, lambda_handler, lambda_cwd)
# unzipping can take some time - limit the execution time to avoid client/network timeout issues
run_for_max_seconds(25, _set_and_configure)
return {'FunctionName': lambda_name}
def do_set_function_code(code, lambda_name, lambda_cwd=None):
def generic_handler(event, context):
raise ClientError(('Unable to find executor for Lambda function "%s". Note that ' +
'Node.js, Golang, and .Net Core Lambdas currently require LAMBDA_EXECUTOR=docker') % lambda_name)
region = LambdaRegion.get()
arn = func_arn(lambda_name)
lambda_details = region.lambdas[arn]
runtime = lambda_details.runtime
lambda_environment = lambda_details.envvars
handler_name = lambda_details.handler = lambda_details.handler or LAMBDA_DEFAULT_HANDLER
code_passed = code
code = code or lambda_details.code
is_local_mount = code.get('S3Bucket') == config.BUCKET_MARKER_LOCAL
zip_file_content = None
if code_passed:
lambda_cwd = lambda_cwd or set_archive_code(code_passed, lambda_name)
if not is_local_mount:
# Save the zip file to a temporary file that the lambda executors can reference
zip_file_content = get_zip_bytes(code_passed)
else:
lambda_cwd = lambda_cwd or lambda_details.cwd
if not lambda_cwd:
return
# get local lambda working directory
tmp_file = os.path.join(lambda_cwd, LAMBDA_ZIP_FILE_NAME)
if not zip_file_content:
zip_file_content = load_file(tmp_file, mode='rb')
# Set the appropriate lambda handler.
lambda_handler = generic_handler
is_java = lambda_executors.is_java_lambda(runtime)
if is_java:
# The Lambda executors for Docker subclass LambdaExecutorContainers, which
# runs Lambda in Docker by passing all *.jar files in the function working
# directory as part of the classpath. Obtain a Java handler function below.
lambda_handler = get_java_handler(zip_file_content, tmp_file, func_details=lambda_details)
if not is_local_mount:
# Lambda code must be uploaded in Zip format
if not is_zip_file(zip_file_content):
raise ClientError(
'Uploaded Lambda code for runtime ({}) is not in Zip format'.format(runtime))
# Unzip the Lambda archive contents
unzip(tmp_file, lambda_cwd)
# Obtain handler details for any non-Java Lambda function
if not is_java:
handler_file = get_handler_file_from_name(handler_name, runtime=runtime)
handler_function = get_handler_function_from_name(handler_name, runtime=runtime)
main_file = '%s/%s' % (lambda_cwd, handler_file)
if CHECK_HANDLER_ON_CREATION and not os.path.exists(main_file):
# Raise an error if (1) this is not a local mount lambda, or (2) we're
# running Lambdas locally (not in Docker), or (3) we're using remote Docker.
# -> We do *not* want to raise an error if we're using local mount in non-remote Docker
if not is_local_mount or not use_docker() or config.LAMBDA_REMOTE_DOCKER:
file_list = run('cd "%s"; du -d 3 .' % lambda_cwd)
config_debug = ('Config for local mount, docker, remote: "%s", "%s", "%s"' %
(is_local_mount, use_docker(), config.LAMBDA_REMOTE_DOCKER))
LOG.debug('Lambda archive content:\n%s' % file_list)
raise ClientError(error_response(
'Unable to find handler script (%s) in Lambda archive. %s' % (main_file, config_debug),
400, error_type='ValidationError'))
if runtime.startswith('python') and not use_docker():
try:
# make sure the file is actually readable, then read contents
ensure_readable(main_file)
zip_file_content = load_file(main_file, mode='rb')
# extract handler
lambda_handler = exec_lambda_code(
zip_file_content,
handler_function=handler_function,
lambda_cwd=lambda_cwd,
lambda_env=lambda_environment)
except Exception as e:
raise ClientError('Unable to get handler function from lambda code.', e)
return lambda_handler
def do_list_functions():
funcs = []
region = LambdaRegion.get()
this_region = aws_stack.get_region()
for f_arn, func in region.lambdas.items():
if type(func) != LambdaFunction:
continue
# filter out functions of current region
func_region = f_arn.split(':')[3]
if func_region != this_region:
continue
func_name = f_arn.split(':function:')[-1]
arn = func_arn(func_name)
func_details = region.lambdas.get(arn)
if not func_details:
# this can happen if we're accessing Lambdas from a different region (ARN mismatch)
continue
details = format_func_details(func_details)
details['Tags'] = func.tags
funcs.append(details)
return funcs
def format_func_details(func_details, version=None, always_add_version=False):
version = version or VERSION_LATEST
func_version = func_details.get_version(version)
result = {
'CodeSha256': func_version.get('CodeSha256'),
'Role': func_details.role,
'KMSKeyArn': func_details.kms_key_arn,
'Version': version,
'VpcConfig': func_details.vpc_config,
'FunctionArn': func_details.arn(),
'FunctionName': func_details.name(),
'CodeSize': func_version.get('CodeSize'),
'Handler': func_details.handler,
'Runtime': func_details.runtime,
'Timeout': func_details.timeout,
'Description': func_details.description,
'MemorySize': func_details.memory_size,
'LastModified': isoformat_milliseconds(func_details.last_modified) + '+0000',
'TracingConfig': {'Mode': 'PassThrough'},
'RevisionId': func_version.get('RevisionId'),
'State': 'Active',
'LastUpdateStatus': 'Successful',
'PackageType': func_details.package_type,
'ImageConfig': getattr(func_details, 'image_config', None)
}
if func_details.dead_letter_config:
result['DeadLetterConfig'] = func_details.dead_letter_config
if func_details.envvars:
result['Environment'] = {
'Variables': func_details.envvars
}
if (always_add_version or version != VERSION_LATEST) and len(result['FunctionArn'].split(':')) <= 7:
result['FunctionArn'] += ':%s' % version
return result
def forward_to_external_url(func_details, event, context, invocation_type):
""" If LAMBDA_FORWARD_URL is configured, forward the invocation of this Lambda to the configured URL. """
if not config.LAMBDA_FORWARD_URL:
return
func_name = func_details.name()
url = '%s%s/functions/%s/invocations' % (config.LAMBDA_FORWARD_URL, PATH_ROOT, func_name)
headers = aws_stack.mock_aws_request_headers('lambda')
headers['X-Amz-Invocation-Type'] = invocation_type
headers['X-Amz-Log-Type'] = 'Tail'
client_context = context.get('client_context')
if client_context:
headers['X-Amz-Client-Context'] = client_context
data = json.dumps(event) if isinstance(event, dict) else str(event)
LOG.debug('Forwarding Lambda invocation to LAMBDA_FORWARD_URL: %s' % config.LAMBDA_FORWARD_URL)
result = safe_requests.post(url, data, headers=headers)
content = run_safe(lambda: to_str(result.content)) or result.content
LOG.debug('Received result from external Lambda endpoint (status %s): %s' % (result.status_code, content))
result = aws_responses.requests_to_flask_response(result)
result = lambda_executors.InvocationResult(result)
return result
def forward_to_fallback_url(func_arn, data):
""" If LAMBDA_FALLBACK_URL is configured, forward the invocation of this non-existing
Lambda to the configured URL. """
if not config.LAMBDA_FALLBACK_URL:
return
lambda_name = aws_stack.lambda_function_name(func_arn)
if config.LAMBDA_FALLBACK_URL.startswith('dynamodb://'):
table_name = urlparse(config.LAMBDA_FALLBACK_URL.replace('dynamodb://', 'http://')).netloc
dynamodb = aws_stack.connect_to_service('dynamodb')
item = {
'id': {'S': short_uid()},
'timestamp': {'N': str(now_utc())},
'payload': {'S': data},
'function_name': {'S': lambda_name}
}
aws_stack.create_dynamodb_table(table_name, partition_key='id')
dynamodb.put_item(TableName=table_name, Item=item)
return ''
if re.match(r'^https?://.+', config.LAMBDA_FALLBACK_URL):
headers = {
'lambda-function-name': lambda_name,
'Content-Type': APPLICATION_JSON
}
response = safe_requests.post(config.LAMBDA_FALLBACK_URL, data, headers=headers)
content = response.content
try:
# parse the response into a dictionary to get details
# like function error etc.
content = json.loads(content)
except Exception:
pass
return content
raise ClientError('Unexpected value for LAMBDA_FALLBACK_URL: %s' % config.LAMBDA_FALLBACK_URL)
def get_lambda_policy(function, qualifier=None):
iam_client = aws_stack.connect_to_service('iam')
policies = iam_client.list_policies(Scope='Local', MaxItems=500)['Policies']
docs = []
for p in policies:
# !TODO: Cache policy documents instead of running N+1 API calls here!
versions = iam_client.list_policy_versions(PolicyArn=p['Arn'])['Versions']
default_version = [v for v in versions if v.get('IsDefaultVersion')]
versions = default_version or versions
doc = versions[0]['Document']
doc = doc if isinstance(doc, dict) else json.loads(doc)
if not isinstance(doc['Statement'], list):
doc['Statement'] = [doc['Statement']]
for stmt in doc['Statement']:
stmt['Principal'] = stmt.get('Principal') or {'AWS': TEST_AWS_ACCOUNT_ID}
doc['PolicyArn'] = p['Arn']
doc['Id'] = 'default'
docs.append(doc)
res_qualifier = func_qualifier(function, qualifier)
policy = [d for d in docs if d['Statement'][0]['Resource'] == res_qualifier]
return (policy or [None])[0]
def lookup_function(function, region, request_url):
result = {
'Configuration': function,
'Code': {
'Location': '%s/code' % request_url
},
'Tags': function['Tags']
}
lambda_details = region.lambdas.get(function['FunctionArn'])
if lambda_details.concurrency is not None:
result['Concurrency'] = lambda_details.concurrency
return jsonify(result)
def not_found_error(ref=None, msg=None):
if not msg:
msg = 'The resource you requested does not exist.'
if ref:
msg = '%s not found: %s' % ('Function' if ':function:' in ref else 'Resource', ref)
return error_response(msg, 404, error_type='ResourceNotFoundException')
# ------------
# API METHODS
# ------------
@app.before_request
def before_request():
# fix to enable chunked encoding, as this is used by some Lambda clients
transfer_encoding = request.headers.get('Transfer-Encoding', '').lower()
if transfer_encoding == 'chunked':
request.environ['wsgi.input_terminated'] = True
@app.route('%s/functions' % PATH_ROOT, methods=['POST'])
def create_function():
""" Create new function
---
operationId: 'createFunction'
parameters:
- name: 'request'
in: body
"""
region = LambdaRegion.get()
arn = 'n/a'
try:
if len(request.data) > FUNCTION_MAX_SIZE:
return error_response('Request size (%s) must be smaller than %s bytes for the CreateFunction operation' %
(len(request.data), FUNCTION_MAX_SIZE), 413, error_type='RequestEntityTooLargeException')
data = json.loads(to_str(request.data))
lambda_name = data['FunctionName']
event_publisher.fire_event(event_publisher.EVENT_LAMBDA_CREATE_FUNC,
payload={'n': event_publisher.get_hash(lambda_name)})
arn = func_arn(lambda_name)
if arn in region.lambdas:
return error_response('Function already exist: %s' %
lambda_name, 409, error_type='ResourceConflictException')
region.lambdas[arn] = func_details = LambdaFunction(arn)
func_details.versions = {VERSION_LATEST: {'RevisionId': str(uuid.uuid4())}}
func_details.vpc_config = data.get('VpcConfig', {})
func_details.last_modified = datetime.utcnow()
func_details.description = data.get('Description', '')
func_details.handler = data.get('Handler')
func_details.runtime = data.get('Runtime')
func_details.envvars = data.get('Environment', {}).get('Variables', {})
func_details.tags = data.get('Tags', {})
func_details.timeout = data.get('Timeout', LAMBDA_DEFAULT_TIMEOUT)
func_details.role = data['Role']
func_details.kms_key_arn = data.get('KMSKeyArn')
func_details.memory_size = data.get('MemorySize')
func_details.code_signing_config_arn = data.get('CodeSigningConfigArn')
func_details.code = data['Code']
func_details.package_type = data.get('PackageType') or 'Zip'
func_details.image_config = data.get('ImageConfig', {})
func_details.set_dead_letter_config(data)
result = set_function_code(func_details.code, lambda_name)
if isinstance(result, Response):
del region.lambdas[arn]
return result
# remove content from code attribute, if present
func_details.code.pop('ZipFile', None)
# prepare result
result.update(format_func_details(func_details))
if data.get('Publish'):
result['Version'] = publish_new_function_version(arn)['Version']
return jsonify(result or {})
except Exception as e:
region.lambdas.pop(arn, None)
if isinstance(e, ClientError):
return e.get_response()
return error_response('Unknown error: %s %s' % (e, traceback.format_exc()))
@app.route('%s/functions/<function>' % PATH_ROOT, methods=['GET'])
def get_function(function):
""" Get details for a single function
---
operationId: 'getFunction'
parameters:
- name: 'request'
in: body
- name: 'function'
in: path
"""
region = LambdaRegion.get()
funcs = do_list_functions()
for func in funcs:
if function == func['FunctionName']:
return lookup_function(func, region, request.url)
elif function in func['FunctionArn']:
return lookup_function(func, region, request.url)
return not_found_error(func_arn(function))
@app.route('%s/functions/' % PATH_ROOT, methods=['GET'])
def list_functions():
""" List functions
---
operationId: 'listFunctions'
parameters:
- name: 'request'
in: body
"""
funcs = do_list_functions()
result = {
'Functions': funcs
}
return jsonify(result)
@app.route('%s/functions/<function>' % PATH_ROOT, methods=['DELETE'])
def delete_function(function):
""" Delete an existing function
---
operationId: 'deleteFunction'
parameters:
- name: 'request'
in: body
"""
region = LambdaRegion.get()
arn = func_arn(function)
# Stop/remove any containers that this arn uses.
LAMBDA_EXECUTOR.cleanup(arn)
try:
region.lambdas.pop(arn)
except KeyError:
return not_found_error(func_arn(function))
event_publisher.fire_event(event_publisher.EVENT_LAMBDA_DELETE_FUNC,
payload={'n': event_publisher.get_hash(function)})
i = 0
while i < len(region.event_source_mappings):
mapping = region.event_source_mappings[i]
if mapping['FunctionArn'] == arn:
del region.event_source_mappings[i]
i -= 1
i += 1
result = {}
return jsonify(result)
@app.route('%s/functions/<function>/code' % PATH_ROOT, methods=['PUT'])
def update_function_code(function):
""" Update the code of an existing function
---
operationId: 'updateFunctionCode'
parameters:
- name: 'request'
in: body
"""
region = LambdaRegion.get()
arn = func_arn(function)
if arn not in region.lambdas:
return error_response('Function not found: %s' %
arn, 400, error_type='ResourceNotFoundException')
data = json.loads(to_str(request.data))
result = set_function_code(data, function)
func_details = region.lambdas.get(arn)
result.update(format_func_details(func_details))
if data.get('Publish'):
result['Version'] = publish_new_function_version(arn)['Version']
if isinstance(result, Response):
return result
return jsonify(result or {})
@app.route('%s/functions/<function>/code' % PATH_ROOT, methods=['GET'])
def get_function_code(function):
""" Get the code of an existing function
---
operationId: 'getFunctionCode'
parameters:
"""
region = LambdaRegion.get()
arn = func_arn(function)
func_details = region.lambdas.get(arn)
if not func_details:
return not_found_error(arn)
lambda_cwd = func_details.cwd
tmp_file = '%s/%s' % (lambda_cwd, LAMBDA_ZIP_FILE_NAME)
return Response(load_file(tmp_file, mode='rb'),
mimetype='application/zip',
headers={'Content-Disposition': 'attachment; filename=lambda_archive.zip'})
@app.route('%s/functions/<function>/configuration' % PATH_ROOT, methods=['GET'])
def get_function_configuration(function):
""" Get the configuration of an existing function
---
operationId: 'getFunctionConfiguration'
parameters:
"""
region = LambdaRegion.get()
arn = func_arn(function)
lambda_details = region.lambdas.get(arn)
if not lambda_details:
return not_found_error(arn)
result = format_func_details(lambda_details)
return jsonify(result)
@app.route('%s/functions/<function>/configuration' % PATH_ROOT, methods=['PUT'])
def update_function_configuration(function):
""" Update the configuration of an existing function
---
operationId: 'updateFunctionConfiguration'
parameters:
- name: 'request'
in: body
"""
region = LambdaRegion.get()
data = json.loads(to_str(request.data))
arn = func_arn(function)
# Stop/remove any containers that this arn uses.
LAMBDA_EXECUTOR.cleanup(arn)
lambda_details = region.lambdas.get(arn)
if not lambda_details:
return error_response('Unable to find Lambda function ARN "%s"' % arn,
404, error_type='ResourceNotFoundException')
if data.get('Handler'):
lambda_details.handler = data['Handler']
if data.get('Runtime'):
lambda_details.runtime = data['Runtime']
lambda_details.set_dead_letter_config(data)
env_vars = data.get('Environment', {}).get('Variables')
if env_vars is not None:
lambda_details.envvars = env_vars
if data.get('Timeout'):
lambda_details.timeout = data['Timeout']
if data.get('Role'):
lambda_details.role = data['Role']
if data.get('MemorySize'):
lambda_details.memory_size = data['MemorySize']
if data.get('Description'):
lambda_details.description = data['Description']
if data.get('VpcConfig'):
lambda_details.vpc_config = data['VpcConfig']
if data.get('KMSKeyArn'):
lambda_details.kms_key_arn = data['KMSKeyArn']
result = data
func_details = region.lambdas.get(arn)
result.update(format_func_details(func_details))
return jsonify(result)
def generate_policy_statement(sid, action, arn, sourcearn, principal):
statement = {
'Sid': sid,
'Effect': 'Allow',
'Action': action,
'Resource': arn,
}
# Adds SourceArn only if SourceArn is present
if sourcearn:
condition = {
'ArnLike': {
'AWS:SourceArn': sourcearn
}
}
statement['Condition'] = condition
# Adds Principal only if Principal is present
if principal:
principal = {
'Service': principal
}
statement['Principal'] = principal
return statement
def generate_policy(sid, action, arn, sourcearn, principal):
new_statement = generate_policy_statement(sid, action, arn, sourcearn, principal)
policy = {
'Version': IAM_POLICY_VERSION,
'Id': 'LambdaFuncAccess-%s' % sid,
'Statement': [new_statement]
}
return policy
@app.route('%s/functions/<function>/policy' % PATH_ROOT, methods=['POST'])
def add_permission(function):
arn = func_arn(function)
qualifier = request.args.get('Qualifier')
q_arn = func_qualifier(function, qualifier)
result = add_permission_policy_statement(function, arn, q_arn)
return result
def add_permission_policy_statement(resource_name, resource_arn, resource_arn_qualified):
region = LambdaRegion.get()
data = json.loads(to_str(request.data))
iam_client = aws_stack.connect_to_service('iam')
sid = data.get('StatementId')
action = data.get('Action')
principal = data.get('Principal')
sourcearn = data.get('SourceArn')
previous_policy = get_lambda_policy(resource_name)
if resource_arn not in region.lambdas:
return not_found_error(resource_arn)
if not re.match(r'lambda:[*]|lambda:[a-zA-Z]+|[*]', action):
return error_response('1 validation error detected: Value "%s" at "action" failed to satisfy '
'constraint: Member must satisfy regular expression pattern: '
'(lambda:[*]|lambda:[a-zA-Z]+|[*])' % action,
400, error_type='ValidationException')
new_policy = generate_policy(sid, action, resource_arn_qualified, sourcearn, principal)
if previous_policy:
statment_with_sid = next((statement for statement in previous_policy['Statement'] if statement['Sid'] == sid),
None)
if statment_with_sid:
msg = ('The statement id (%s) provided already exists. Please provide a new statement id, '
'or remove the existing statement.') % sid
return error_response(msg, 400, error_type='ResourceConflictException')
new_policy['Statement'].extend(previous_policy['Statement'])
iam_client.delete_policy(PolicyArn=previous_policy['PolicyArn'])
policy_name = LAMBDA_POLICY_NAME_PATTERN % resource_name
LOG.debug('Creating IAM policy "%s" for Lambda resource %s' % (policy_name, resource_arn))
iam_client.create_policy(PolicyName=policy_name, PolicyDocument=json.dumps(new_policy),
Description='Policy for Lambda function "%s"' % resource_name)
result = {'Statement': json.dumps(new_policy['Statement'][0])}
return jsonify(result)
@app.route('%s/functions/<function>/policy/<statement>' % PATH_ROOT, methods=['DELETE'])
def remove_permission(function, statement):
qualifier = request.args.get('Qualifier')
iam_client = aws_stack.connect_to_service('iam')
policy = get_lambda_policy(function)
if not policy:
return error_response('Unable to find policy for Lambda function "%s"' % function,
404, error_type='ResourceNotFoundException')
iam_client.delete_policy(PolicyArn=policy['PolicyArn'])
result = {
'FunctionName': function,
'Qualifier': qualifier,
'StatementId': policy['Statement'][0]['Sid'],
}
return jsonify(result)
@app.route('%s/functions/<function>/policy' % PATH_ROOT, methods=['GET'])
def get_policy(function):
qualifier = request.args.get('Qualifier')
policy = get_lambda_policy(function, qualifier)
if not policy:
return error_response('The resource you requested does not exist.',
404, error_type='ResourceNotFoundException')
return jsonify({'Policy': json.dumps(policy), 'RevisionId': 'test1234'})
@app.route('%s/functions/<function>/invocations' % PATH_ROOT, methods=['POST'])
def invoke_function(function):
""" Invoke an existing function
---
operationId: 'invokeFunction'
parameters:
- name: 'request'
in: body
"""
# function here can either be an arn or a function name
arn = func_arn(function)
# arn can also contain a qualifier, extract it from there if so
m = re.match('(arn:aws:lambda:.*:.*:function:[a-zA-Z0-9-_]+)(:.*)?', arn)
if m and m.group(2):
qualifier = m.group(2)[1:]
arn = m.group(1)
else:
qualifier = request.args.get('Qualifier')
data = request.get_data()
if data:
try:
data = to_str(data)
data = json.loads(data)
except Exception:
try:
# try to read chunked content
data = json.loads(parse_chunked_data(data))
except Exception:
return error_response('The payload is not JSON: %s' % data, 415,
error_type='UnsupportedMediaTypeException')
# Default invocation type is RequestResponse
invocation_type = request.headers.get('X-Amz-Invocation-Type', 'RequestResponse')
log_type = request.headers.get('X-Amz-Log-Type')
def _create_response(invocation_result, status_code=200, headers={}):
""" Create the final response for the given invocation result. """
if not isinstance(invocation_result, lambda_executors.InvocationResult):
invocation_result = lambda_executors.InvocationResult(invocation_result)
result = invocation_result.result
log_output = invocation_result.log_output
details = {
'StatusCode': status_code,
'Payload': result,
'Headers': headers
}
if isinstance(result, Response):
details['Payload'] = to_str(result.data)
if result.status_code >= 400:
details['FunctionError'] = 'Unhandled'
if isinstance(result, (str, bytes)):
try:
result = json.loads(to_str(result))
except Exception:
pass
if isinstance(result, dict):
for key in ('StatusCode', 'Payload', 'FunctionError'):
if result.get(key):
details[key] = result[key]
# Try to parse payload as JSON
was_json = False
payload = details['Payload']
if payload and isinstance(payload, POSSIBLE_JSON_TYPES) and payload[0] in JSON_START_CHARS:
try:
details['Payload'] = json.loads(details['Payload'])
was_json = True
except Exception:
pass
# Set error headers
if details.get('FunctionError'):
details['Headers']['X-Amz-Function-Error'] = str(details['FunctionError'])
# LogResult contains the last 4KB (~4k characters) of log outputs
logs = log_output[-4000:] if log_type == 'Tail' else ''
details['Headers']['X-Amz-Log-Result'] = base64.b64encode(to_bytes(logs))
details['Headers']['X-Amz-Executed-Version'] = str(qualifier or VERSION_LATEST)
# Construct response object
response_obj = details['Payload']
if was_json or isinstance(response_obj, JSON_START_TYPES):
response_obj = json_safe(response_obj)
# Content-type header is not required since jsonify automatically adds it
response_obj = jsonify(response_obj)
else:
response_obj = str(response_obj)
details['Headers']['Content-Type'] = 'text/plain'
return response_obj, details['StatusCode'], details['Headers']
# check if this lambda function exists
not_found = None
region = LambdaRegion.get()
if arn not in region.lambdas:
not_found = not_found_error(arn)
elif qualifier and not region.lambdas.get(arn).qualifier_exists(qualifier):
not_found = not_found_error('{0}:{1}'.format(arn, qualifier))
if not_found:
try:
forward_result = forward_to_fallback_url(arn, json.dumps(data))
if forward_result is not None:
return _create_response(forward_result)
except Exception as e:
LOG.debug('Unable to forward Lambda invocation to fallback URL: "%s" - %s' % (data, e))
return not_found
if invocation_type == 'RequestResponse':
context = {'client_context': request.headers.get('X-Amz-Client-Context')}
result = run_lambda(func_arn=arn, event=data, context=context, asynchronous=False, version=qualifier)
return _create_response(result)
elif invocation_type == 'Event':
run_lambda(func_arn=arn, event=data, context={}, asynchronous=True, version=qualifier)
return _create_response('', status_code=202)
elif invocation_type == 'DryRun':
# Assume the dry run always passes.
return _create_response('', status_code=204)
return error_response('Invocation type not one of: RequestResponse, Event or DryRun',
code=400, error_type='InvalidParameterValueException')
@app.route('%s/event-source-mappings' % PATH_ROOT, methods=['GET'], strict_slashes=False)
def get_event_source_mappings():
""" List event source mappings
---
operationId: 'listEventSourceMappings'
"""
region = LambdaRegion.get()
event_source_arn = request.args.get('EventSourceArn')
function_name = request.args.get('FunctionName')
mappings = region.event_source_mappings
if event_source_arn:
mappings = [m for m in mappings if event_source_arn == m.get('EventSourceArn')]
if function_name:
function_arn = func_arn(function_name)
mappings = [m for m in mappings if function_arn == m.get('FunctionArn')]
response = {
'EventSourceMappings': mappings
}
return jsonify(response)
@app.route('%s/event-source-mappings/<mapping_uuid>' % PATH_ROOT, methods=['GET'])
def get_event_source_mapping(mapping_uuid):
""" Get an existing event source mapping
---
operationId: 'getEventSourceMapping'
parameters:
- name: 'request'
in: body
"""
region = LambdaRegion.get()
mappings = region.event_source_mappings
mappings = [m for m in mappings if mapping_uuid == m.get('UUID')]
if len(mappings) == 0:
return not_found_error()
return jsonify(mappings[0])
@app.route('%s/event-source-mappings' % PATH_ROOT, methods=['POST'], strict_slashes=False)
def create_event_source_mapping():
""" Create new event source mapping
---
operationId: 'createEventSourceMapping'
parameters:
- name: 'request'
in: body
"""
data = json.loads(to_str(request.data))
try:
mapping = add_event_source(data)
return jsonify(mapping)
except ValueError as error:
error_type, message = error.args
return error_response(message, code=400, error_type=error_type)
@app.route('%s/event-source-mappings/<mapping_uuid>' % PATH_ROOT, methods=['PUT'])
def update_event_source_mapping(mapping_uuid):
""" Update an existing event source mapping
---
operationId: 'updateEventSourceMapping'
parameters:
- name: 'request'
in: body
"""
data = json.loads(to_str(request.data))
if not mapping_uuid:
return jsonify({})
try:
mapping = update_event_source(mapping_uuid, data)
return jsonify(mapping)
except ValueError as error:
error_type, message = error.args
return error_response(message, code=400, error_type=error_type)
@app.route('%s/event-source-mappings/<mapping_uuid>' % PATH_ROOT, methods=['DELETE'])
def delete_event_source_mapping(mapping_uuid):
""" Delete an event source mapping
---
operationId: 'deleteEventSourceMapping'
"""
if not mapping_uuid:
return jsonify({})
mapping = delete_event_source(mapping_uuid)
return jsonify(mapping)
@app.route('%s/functions/<function>/versions' % PATH_ROOT, methods=['POST'])
def publish_version(function):
region = LambdaRegion.get()
arn = func_arn(function)
if arn not in region.lambdas:
return not_found_error(arn)
return jsonify(publish_new_function_version(arn))
@app.route('%s/functions/<function>/versions' % PATH_ROOT, methods=['GET'])
def list_versions(function):
region = LambdaRegion.get()
arn = func_arn(function)
if arn not in region.lambdas:
return not_found_error(arn)
return jsonify({'Versions': do_list_versions(arn)})
@app.route('%s/functions/<function>/aliases' % PATH_ROOT, methods=['POST'])
def create_alias(function):
region = LambdaRegion.get()
arn = func_arn(function)
if arn not in region.lambdas:
return not_found_error(arn)
data = json.loads(request.data)
alias = data.get('Name')
if alias in region.lambdas.get(arn).aliases:
return error_response('Alias already exists: %s' % arn + ':' + alias, 404,
error_type='ResourceConflictException')
version = data.get('FunctionVersion')
description = data.get('Description')
return jsonify(do_update_alias(arn, alias, version, description))
@app.route('%s/functions/<function>/aliases/<name>' % PATH_ROOT, methods=['PUT'])
def update_alias(function, name):
region = LambdaRegion.get()
arn = func_arn(function)
if arn not in region.lambdas:
return not_found_error(arn)
if name not in region.lambdas.get(arn).aliases:
return not_found_error(msg='Alias not found: %s:%s' % (arn, name))
current_alias = region.lambdas.get(arn).aliases.get(name)
data = json.loads(request.data)
version = data.get('FunctionVersion') or current_alias.get('FunctionVersion')
description = data.get('Description') or current_alias.get('Description')
return jsonify(do_update_alias(arn, name, version, description))
@app.route('%s/functions/<function>/aliases/<name>' % PATH_ROOT, methods=['GET'])
def get_alias(function, name):
region = LambdaRegion.get()
arn = func_arn(function)
if arn not in region.lambdas:
return not_found_error(arn)
if name not in region.lambdas.get(arn).aliases:
return not_found_error(msg='Alias not found: %s:%s' % (arn, name))
return jsonify(region.lambdas.get(arn).aliases.get(name))
@app.route('%s/functions/<function>/aliases' % PATH_ROOT, methods=['GET'])
def list_aliases(function):
region = LambdaRegion.get()
arn = func_arn(function)
if arn not in region.lambdas:
return not_found_error(arn)
return jsonify({'Aliases': sorted(region.lambdas.get(arn).aliases.values(),
key=lambda x: x['Name'])})
@app.route('/<version>/functions/<function>/concurrency', methods=['GET', 'PUT', 'DELETE'])
def function_concurrency(version, function):
region = LambdaRegion.get()
# the version for put_concurrency != PATH_ROOT, at the time of this
# writing it's: /2017-10-31 for this endpoint
# https://docs.aws.amazon.com/lambda/latest/dg/API_PutFunctionConcurrency.html
arn = func_arn(function)
lambda_details = region.lambdas.get(arn)
if not lambda_details:
return not_found_error(arn)
if request.method == 'GET':
data = lambda_details.concurrency
if request.method == 'PUT':
data = json.loads(request.data)
lambda_details.concurrency = data
if request.method == 'DELETE':
lambda_details.concurrency = None
return Response('', status=204)
return jsonify(data)
@app.route('/<version>/tags/<arn>', methods=['GET'])
def list_tags(version, arn):
region = LambdaRegion.get()
func_details = region.lambdas.get(arn)
if not func_details:
return not_found_error(arn)
result = {'Tags': func_details.tags}
return jsonify(result)
@app.route('/<version>/tags/<arn>', methods=['POST'])
def tag_resource(version, arn):
region = LambdaRegion.get()
data = json.loads(request.data)
tags = data.get('Tags', {})
if tags:
func_details = region.lambdas.get(arn)
if not func_details:
return not_found_error(arn)
if func_details:
func_details.tags.update(tags)
return jsonify({})
@app.route('/<version>/tags/<arn>', methods=['DELETE'])
def untag_resource(version, arn):
region = LambdaRegion.get()
tag_keys = request.args.getlist('tagKeys')
func_details = region.lambdas.get(arn)
if not func_details:
return not_found_error(arn)
for tag_key in tag_keys:
func_details.tags.pop(tag_key, None)
return jsonify({})
@app.route('/2019-09-25/functions/<function>/event-invoke-config', methods=['PUT', 'POST'])
def put_function_event_invoke_config(function):
# TODO: resouce validation required to check if resource exists
""" Add/Updates the configuration for asynchronous invocation for a function
---
operationId: PutFunctionEventInvokeConfig | UpdateFunctionEventInvokeConfig
parameters:
- name: 'function'
in: path
- name: 'qualifier'
in: path
- name: 'request'
in: body
"""
region = LambdaRegion.get()
data = json.loads(to_str(request.data))
function_arn = func_arn(function)
lambda_obj = region.lambdas[function_arn]
if request.method == 'PUT':
response = lambda_obj.clear_function_event_invoke_config()
response = lambda_obj.put_function_event_invoke_config(data)
return jsonify({
'LastModified': response.last_modified.strftime(DATE_FORMAT),
'FunctionArn': str(function_arn),
'MaximumRetryAttempts': response.max_retry_attempts,
'MaximumEventAgeInSeconds': response.max_event_age,
'DestinationConfig': {
'OnSuccess': {
'Destination': str(response.on_successful_invocation)
},
'OnFailure': {
'Destination': str(response.on_failed_invocation)
}
}
})
@app.route('/2019-09-25/functions/<function>/event-invoke-config', methods=['GET'])
def get_function_event_invoke_config(function):
""" Retrieves the configuration for asynchronous invocation for a function
---
operationId: GetFunctionEventInvokeConfig
parameters:
- name: 'function'
in: path
- name: 'qualifier'
in: path
- name: 'request'
in: body
"""
region = LambdaRegion.get()
try:
function_arn = func_arn(function)
lambda_obj = region.lambdas[function_arn]
except Exception as e:
return error_response(str(e), 400)
response = lambda_obj.get_function_event_invoke_config()
if not response:
msg = "The function %s doesn't have an EventInvokeConfig" % function_arn
return error_response(msg, 404, error_type='ResourceNotFoundException')
return jsonify(response)
@app.route('/2019-09-25/functions/<function>/event-invoke-config', methods=['DELETE'])
def delete_function_event_invoke_config(function):
region = LambdaRegion.get()
try:
function_arn = func_arn(function)
lambda_obj = region.lambdas[function_arn]
except Exception as e:
return error_response(str(e), 400)
lambda_obj.clear_function_event_invoke_config()
return Response('', status=204)
@app.route('/2020-06-30/functions/<function>/code-signing-config', methods=['GET'])
def get_function_code_signing_config(function):
region = LambdaRegion.get()
function_arn = func_arn(function)
if function_arn not in region.lambdas:
msg = 'Function not found: %s' % (function_arn)
return error_response(msg, 404, error_type='ResourceNotFoundException')
lambda_obj = region.lambdas[function_arn]
if not lambda_obj.code_signing_config_arn:
arn = None
function = None
else:
arn = lambda_obj.code_signing_config_arn
result = {
'CodeSigningConfigArn': arn,
'FunctionName': function
}
return Response(json.dumps(result), status=200)
@app.route('/2020-06-30/functions/<function>/code-signing-config', methods=['PUT'])
def put_function_code_signing_config(function):
region = LambdaRegion.get()
data = json.loads(request.data)
arn = data.get('CodeSigningConfigArn')
if arn not in region.code_signing_configs:
msg = """The code signing configuration cannot be found.
Check that the provided configuration is not deleted: %s.""" % (arn)
return error_response(msg, 404, error_type='CodeSigningConfigNotFoundException')
function_arn = func_arn(function)
if function_arn not in region.lambdas:
msg = 'Function not found: %s' % (function_arn)
return error_response(msg, 404, error_type='ResourceNotFoundException')
lambda_obj = region.lambdas[function_arn]
if data.get('CodeSigningConfigArn'):
lambda_obj.code_signing_config_arn = arn
result = {
'CodeSigningConfigArn': arn,
'FunctionName': function
}
return Response(json.dumps(result), status=200)
@app.route('/2020-06-30/functions/<function>/code-signing-config', methods=['DELETE'])
def delete_function_code_signing_config(function):
region = LambdaRegion.get()
function_arn = func_arn(function)
if function_arn not in region.lambdas:
msg = 'Function not found: %s' % (function_arn)
return error_response(msg, 404, error_type='ResourceNotFoundException')
lambda_obj = region.lambdas[function_arn]
lambda_obj.code_signing_config_arn = None
return Response('', status=204)
@app.route('/2020-04-22/code-signing-configs/', methods=['POST'])
def create_code_signing_config():
region = LambdaRegion.get()
data = json.loads(request.data)
signing_profile_version_arns = data.get('AllowedPublishers').get('SigningProfileVersionArns')
code_signing_id = 'csc-%s' % long_uid().replace('-', '')[0:17]
arn = aws_stack.code_signing_arn(code_signing_id)
region.code_signing_configs[arn] = CodeSigningConfig(arn, code_signing_id, signing_profile_version_arns)
code_signing_obj = region.code_signing_configs[arn]
if data.get('Description'):
code_signing_obj.description = data['Description']
if data.get('CodeSigningPolicies', {}).get('UntrustedArtifactOnDeployment'):
code_signing_obj.untrusted_artifact_on_deployment = data['CodeSigningPolicies']['UntrustedArtifactOnDeployment']
code_signing_obj.last_modified = isoformat_milliseconds(datetime.utcnow()) + '+0000'
result = {
'CodeSigningConfig': {
'AllowedPublishers': {
'SigningProfileVersionArns': code_signing_obj.signing_profile_version_arns
},
'CodeSigningConfigArn': code_signing_obj.arn,
'CodeSigningConfigId': code_signing_obj.id,
'CodeSigningPolicies': {
'UntrustedArtifactOnDeployment': code_signing_obj.untrusted_artifact_on_deployment
},
'Description': code_signing_obj.description,
'LastModified': code_signing_obj.last_modified
}
}
return Response(json.dumps(result), status=201)
@app.route('/2020-04-22/code-signing-configs/<arn>', methods=['GET'])
def get_code_signing_config(arn):
region = LambdaRegion.get()
try:
code_signing_obj = region.code_signing_configs[arn]
except KeyError:
msg = 'The Lambda code signing configuration %s can not be found.' % arn
return error_response(msg, 404, error_type='ResourceNotFoundException')
result = {
'CodeSigningConfig': {
'AllowedPublishers': {
'SigningProfileVersionArns': code_signing_obj.signing_profile_version_arns
},
'CodeSigningConfigArn': code_signing_obj.arn,
'CodeSigningConfigId': code_signing_obj.id,
'CodeSigningPolicies': {
'UntrustedArtifactOnDeployment': code_signing_obj.untrusted_artifact_on_deployment
},
'Description': code_signing_obj.description,
'LastModified': code_signing_obj.last_modified
}
}
return Response(json.dumps(result), status=200)
@app.route('/2020-04-22/code-signing-configs/<arn>', methods=['DELETE'])
def delete_code_signing_config(arn):
region = LambdaRegion.get()
try:
region.code_signing_configs.pop(arn)
except KeyError:
msg = 'The Lambda code signing configuration %s can not be found.' % (arn)
return error_response(msg, 404, error_type='ResourceNotFoundException')
return Response('', status=204)
@app.route('/2020-04-22/code-signing-configs/<arn>', methods=['PUT'])
def update_code_signing_config(arn):
region = LambdaRegion.get()
try:
code_signing_obj = region.code_signing_configs[arn]
except KeyError:
msg = 'The Lambda code signing configuration %s can not be found.' % (arn)
return error_response(msg, 404, error_type='ResourceNotFoundException')
data = json.loads(request.data)
is_updated = False
if data.get('Description'):
code_signing_obj.description = data['Description']
is_updated = True
if data.get('AllowedPublishers', {}).get('SigningProfileVersionArns'):
code_signing_obj.signing_profile_version_arns = data['AllowedPublishers']['SigningProfileVersionArns']
is_updated = True
if data.get('CodeSigningPolicies', {}).get('UntrustedArtifactOnDeployment'):
code_signing_obj.untrusted_artifact_on_deployment = data['CodeSigningPolicies']['UntrustedArtifactOnDeployment']
is_updated = True
if is_updated:
code_signing_obj.last_modified = isoformat_milliseconds(datetime.utcnow()) + '+0000'
result = {
'CodeSigningConfig': {
'AllowedPublishers': {
'SigningProfileVersionArns': code_signing_obj.signing_profile_version_arns
},
'CodeSigningConfigArn': code_signing_obj.arn,
'CodeSigningConfigId': code_signing_obj.id,
'CodeSigningPolicies': {
'UntrustedArtifactOnDeployment': code_signing_obj.untrusted_artifact_on_deployment
},
'Description': code_signing_obj.description,
'LastModified': code_signing_obj.last_modified
}
}
return Response(json.dumps(result), status=200)
def serve(port, quiet=True):
from localstack.services import generic_proxy # moved here to fix circular import errors
# initialize the Lambda executor
LAMBDA_EXECUTOR.startup()
generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet)
| 38.982613 | 120 | 0.655826 |
9077cc286634a7de9c3133d078cc5c6a5aa4cbaf | 404 | py | Python | oryxerp/utils/cron.py | oryxsolutions/oryx | 4a0a83e62d676cb24a102896b0e49ec4877b3712 | [
"MIT"
] | null | null | null | oryxerp/utils/cron.py | oryxsolutions/oryx | 4a0a83e62d676cb24a102896b0e49ec4877b3712 | [
"MIT"
] | null | null | null | oryxerp/utils/cron.py | oryxsolutions/oryx | 4a0a83e62d676cb24a102896b0e49ec4877b3712 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from datetime import datetime
from erpnext.accounts.doctype.pricing_rule.pricing_rule import get_pricing_rule_for_item
import frappe
from frappe.utils.data import add_to_date, cint, flt, fmt_money, now, now_datetime
from frappe.utils.logger import set_log_level
set_log_level("DEBUG")
logger = frappe.logger("oryx-cron", allow_site=True, file_count=50)
| 40.4 | 89 | 0.821782 |
cad69b8c64846bddf05c885fb3d0640b8202d126 | 5,059 | py | Python | kibble/api/pages/mail/top-authors.py | jbampton/kibble | 55fb362d684e24e4e2d737ca0507965d23904623 | [
"Apache-2.0"
] | 3 | 2020-10-07T10:36:20.000Z | 2020-10-24T20:43:02.000Z | kibble/api/pages/mail/top-authors.py | jbampton/kibble | 55fb362d684e24e4e2d737ca0507965d23904623 | [
"Apache-2.0"
] | null | null | null | kibble/api/pages/mail/top-authors.py | jbampton/kibble | 55fb362d684e24e4e2d737ca0507965d23904623 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
########################################################################
# OPENAPI-URI: /api/mail/top-authors
########################################################################
# get:
# responses:
# '200':
# content:
# application/json:
# schema:
# $ref: '#/components/schemas/CommitterList'
# description: 200 Response
# default:
# content:
# application/json:
# schema:
# $ref: '#/components/schemas/Error'
# description: unexpected error
# security:
# - cookieAuth: []
# summary: Shows the top N of email authors
# post:
# requestBody:
# content:
# application/json:
# schema:
# $ref: '#/components/schemas/defaultWidgetArgs'
# responses:
# '200':
# content:
# application/json:
# schema:
# $ref: '#/components/schemas/CommitterList'
# description: 200 Response
# default:
# content:
# application/json:
# schema:
# $ref: '#/components/schemas/Error'
# description: unexpected error
# security:
# - cookieAuth: []
# summary: Shows the top N of email authors
#
########################################################################
"""
This is the TopN committers list renderer for Kibble
"""
import hashlib
import json
import re
import time
ROBITS = r"(git|jira|jenkins|gerrit)@"
def run(API, environ, indata, session):
# We need to be logged in for this!
if not session.user:
raise API.exception(403, "You must be logged in to use this API endpoint! %s")
now = time.time()
# First, fetch the view if we have such a thing enabled
viewList = []
if indata.get("view"):
viewList = session.getView(indata.get("view"))
if indata.get("subfilter"):
viewList = session.subFilter(indata.get("subfilter"), view=viewList)
dateTo = indata.get("to", int(time.time()))
dateFrom = indata.get(
"from", dateTo - (86400 * 30 * 6)
) # Default to a 6 month span
interval = indata.get("interval", "month")
####################################################################
####################################################################
dOrg = session.user["defaultOrganisation"] or "apache"
query = {
"query": {
"bool": {
"must": [
{"range": {"ts": {"from": dateFrom, "to": dateTo}}},
{"term": {"organisation": dOrg}},
]
}
}
}
# Source-specific or view-specific??
if indata.get("source"):
query["query"]["bool"]["must"].append(
{"term": {"sourceID": indata.get("source")}}
)
elif viewList:
query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
# Get top 25 committers this period
query["aggs"] = {"authors": {"terms": {"field": "sender", "size": 30}}}
res = session.DB.ES.search(
index=session.DB.dbname, doc_type="email", size=0, body=query
)
people = {}
for bucket in res["aggregations"]["authors"]["buckets"]:
email = bucket["key"]
# By default, we want to see humans, not bots on this list!
if re.match(ROBITS, email):
continue
count = bucket["doc_count"]
sha = hashlib.sha1(("%s%s" % (dOrg, email)).encode("utf-8")).hexdigest()
if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=sha):
pres = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id=sha)
person = pres["_source"]
person["name"] = person.get("name", "unknown")
people[email] = person
people[email]["gravatar"] = hashlib.md5(
person.get("email", "unknown").encode("utf-8")
).hexdigest()
people[email]["count"] = count
topN = []
for email, person in people.items():
topN.append(person)
topN = sorted(topN, key=lambda x: x["count"], reverse=True)
JSON_OUT = {
"topN": {"denoter": "emails", "items": topN},
"sorted": people,
"okay": True,
"responseTime": time.time() - now,
}
yield json.dumps(JSON_OUT)
| 32.63871 | 88 | 0.546155 |
bce8991500f09e8c759f84c00643566e0a2857d5 | 3,574 | py | Python | ntp/setup.py | michael-go/integrations-core | b094befc63a479e6496ad0d0c7bb340be63699fc | [
"BSD-3-Clause"
] | null | null | null | ntp/setup.py | michael-go/integrations-core | b094befc63a479e6496ad0d0c7bb340be63699fc | [
"BSD-3-Clause"
] | null | null | null | ntp/setup.py | michael-go/integrations-core | b094befc63a479e6496ad0d0c7bb340be63699fc | [
"BSD-3-Clause"
] | null | null | null | # Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
import json
import re
here = path.abspath(path.dirname(__file__))
def parse_req_line(line):
line = line.strip()
if not line or line.startswith('--hash') or line[0] == '#':
return None
req = line.rpartition('#')
if len(req[1]) == 0:
line = req[2].strip()
else:
line = req[1].strip()
if '--hash=' in line:
line = line[:line.find('--hash=')].strip()
if ';' in line:
line = line[:line.find(';')].strip()
if '\\' in line:
line = line[:line.find('\\')].strip()
return line
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Parse requirements
runtime_reqs = ['datadog-checks-base']
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
for line in f.readlines():
req = parse_req_line(line)
if req:
runtime_reqs.append(req)
def read(*parts):
with open(path.join(here, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# https://packaging.python.org/guides/single-sourcing-package-version/
version = find_version("datadog_checks", "ntp", "__init__.py")
manifest_version = None
with open(path.join(here, 'manifest.json'), encoding='utf-8') as f:
manifest = json.load(f)
manifest_version = manifest.get('version')
if version != manifest_version:
raise Exception("Inconsistent versioning in module and manifest - aborting wheel build")
setup(
name='datadog-ntp',
version=version,
description='The NTP check',
long_description=long_description,
keywords='datadog agent ntp check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.ntp'],
# Run-time dependencies
install_requires=list(set(runtime_reqs)),
# Development dependencies, run with:
# $ pip install -e .[dev]
extras_require={
'dev': [
'check-manifest',
'datadog_agent_tk>=5.15',
],
},
# Testing setup and dependencies
tests_require=[
'nose',
'coverage',
'datadog_agent_tk>=5.15',
],
test_suite='nose.collector',
# Extra files to ship with the wheel package
package_data={b'datadog_checks.ntp': ['conf.yaml.default']},
include_package_data=True,
# The entrypoint to run the check manually without an agent
entry_points={
'console_scripts': [
'ntp=datadog_checks.ntp:main',
],
},
)
| 27.921875 | 92 | 0.625909 |
a6bfb1d22545d84dbe8f1b04d9e50a1d2e2cefd1 | 401 | py | Python | build/crazyflie_controller/catkin_generated/pkg.develspace.context.pc.py | josephyaconelli/crazyflie_ros-pwm-control | 40a7370843557abc60f8d4c32163d9512d8277d2 | [
"MIT"
] | 1 | 2018-07-02T23:35:32.000Z | 2018-07-02T23:35:32.000Z | build/crazyflie_controller/catkin_generated/pkg.develspace.context.pc.py | josephyaconelli/crazyflie_ros-pwm-control | 40a7370843557abc60f8d4c32163d9512d8277d2 | [
"MIT"
] | null | null | null | build/crazyflie_controller/catkin_generated/pkg.develspace.context.pc.py | josephyaconelli/crazyflie_ros-pwm-control | 40a7370843557abc60f8d4c32163d9512d8277d2 | [
"MIT"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "crazyflie_controller"
PROJECT_SPACE_DIR = "/home/joseph/other_research/crazyflie_ros/devel"
PROJECT_VERSION = "0.0.1"
| 44.555556 | 69 | 0.723192 |
9ce5de08cb409fed6432eb1b9929b63e63da8ce2 | 12,534 | py | Python | src/dynamic/models/turbine_governor.py | yanzhaochang/PSATools-Python | 7524d7eeed26db9fba93c0ea03a7c8c0bfee7410 | [
"Apache-2.0"
] | 2 | 2021-03-29T10:36:57.000Z | 2021-04-01T06:54:10.000Z | src/dynamic/models/turbine_governor.py | yanzhaochang/PSATools-Python | 7524d7eeed26db9fba93c0ea03a7c8c0bfee7410 | [
"Apache-2.0"
] | null | null | null | src/dynamic/models/turbine_governor.py | yanzhaochang/PSATools-Python | 7524d7eeed26db9fba93c0ea03a7c8c0bfee7410 | [
"Apache-2.0"
] | 1 | 2021-04-01T06:54:24.000Z | 2021-04-01T06:54:24.000Z | import sys
sys.path.append('../..')
import apis
from apis import apis_dynamic
def solve_turbine_governor_model_state_variable(generator, par_type):
'''
Solve turbine governor model state variables.
Args:
(1) generator, int, generator connected bus number.
(2) par_type, bool, state variables type. True for actual value and False for estimated value.
Rets: None
'''
TGMN = apis.get_generator_related_model_data(generator, 'GOV', 'TGMN')
if TGMN == 'IEEEG1':
if par_type is True:
solve_IEEEG1_model_state_actual_value(generator)
else:
solve_IEEEG1_model_state_estimated_value(generator)
elif TGMN == 'IEEEG3':
if par_type is True:
solve_IEEEG3_model_state_actual_value(generator)
else:
solve_IEEEG3_model_state_estimated_value(generator)
else:
pass
return
def solve_IEEEG1_model_state_estimated_value(generator):
'''
Solving the estimated value of state parameters of IEEEG1 governor model
Args:
generator, IEEEG1 model connected bus number
Rets: None
'''
x1 = apis_dynamic.get_turbine_state_data(generator, True, 'x1')
x2 = apis_dynamic.get_turbine_state_data(generator, True, 'x2')
x3 = apis_dynamic.get_turbine_state_data(generator, True, 'x3')
omega = apis_dynamic.get_generator_state_data(generator, True, 'omega')
time_step = apis.get_simulator_parameter('dynamic', 'time_step')
P0 = apis.get_generator_related_model_data(generator, 'GOV', 'P0')
K = apis.get_generator_related_model_data(generator, 'GOV', 'K')
K1 = apis.get_generator_related_model_data(generator, 'GOV', 'K1')
K3 = apis.get_generator_related_model_data(generator, 'GOV', 'K3')
T3 = apis.get_generator_related_model_data(generator, 'GOV', 'T3')
UO = apis.get_generator_related_model_data(generator, 'GOV', 'UO')
UC = apis.get_generator_related_model_data(generator, 'GOV', 'UC')
PMAX = apis.get_generator_related_model_data(generator, 'GOV', 'PMAX')
PMIN = apis.get_generator_related_model_data(generator, 'GOV', 'PMIN')
T4 = apis.get_generator_related_model_data(generator, 'GOV', 'T4')
T5 = apis.get_generator_related_model_data(generator, 'GOV', 'T5')
dx1 = (P0 - abs(omega-1.0) * K - x1) / T3
if dx1 > UO:
dx1 = UO
if dx1 < UC:
dx1 = UC
if x1 == PMAX and dx1 > 0:
dx1 = 0
if x1 == PMIN and dx1 < 0:
dx1 = 0
dx2 = (x1 - x2) / T4
dx3 = (x2 - x3) / T5
x10 = x1 + dx1 * time_step
x20 = x2 + dx2 * time_step
x30 = x3 + dx3 * time_step
Pm0 = K1 * x20 + K3 * x30
apis_dynamic.set_turbine_state_data(generator, False, 'x1', x10)
apis_dynamic.set_turbine_state_data(generator, False, 'x2', x20)
apis_dynamic.set_turbine_state_data(generator, False, 'x3', x30)
apis_dynamic.set_generator_state_data(generator, False, 'Pm', Pm0)
return
def solve_IEEEG1_model_state_actual_value(generator):
'''
Solving the actual value of state parameters of IEEEG1 governor model
Args:
turbine, IEEEG1 model parameters
Rets:
None
'''
x1 = apis_dynamic.get_turbine_state_data(generator, True, 'x1')
x2 = apis_dynamic.get_turbine_state_data(generator, True, 'x2')
x3 = apis_dynamic.get_turbine_state_data(generator, True, 'x3')
x10 = apis_dynamic.get_turbine_state_data(generator, False, 'x1')
x20 = apis_dynamic.get_turbine_state_data(generator, False, 'x2')
x30 = apis_dynamic.get_turbine_state_data(generator, False, 'x3')
omega = apis_dynamic.get_generator_state_data(generator, True, 'omega')
omega0 = apis_dynamic.get_generator_state_data(generator, False, 'omega')
time_step = apis.get_simulator_parameter('dynamic', 'time_step')
P0 = apis.get_generator_related_model_data(generator, 'GOV', 'P0')
K = apis.get_generator_related_model_data(generator, 'GOV', 'K')
K1 = apis.get_generator_related_model_data(generator, 'GOV', 'K1')
K3 = apis.get_generator_related_model_data(generator, 'GOV', 'K3')
T3 = apis.get_generator_related_model_data(generator, 'GOV', 'T3')
UO = apis.get_generator_related_model_data(generator, 'GOV', 'UO')
UC = apis.get_generator_related_model_data(generator, 'GOV', 'UC')
PMAX = apis.get_generator_related_model_data(generator, 'GOV', 'PMAX')
PMIN = apis.get_generator_related_model_data(generator, 'GOV', 'PMIN')
T4 = apis.get_generator_related_model_data(generator, 'GOV', 'T4')
T5 = apis.get_generator_related_model_data(generator, 'GOV', 'T5')
dx1 = (P0 - abs(omega-1) * K - x1) / T3
if dx1 > UO:
dx1 = UO
if dx1 < UC:
dx1 = UC
if x1 == PMAX and dx1 > 0:
dx1 = 0
if x1 == PMIN and dx1 < 0:
dx1 = 0
dx2 = (x1 - x2) / T4
dx3 = (x2 - x3) / T5
dx10 = (P0 - abs(omega0-1) * K - x10) / T3
if dx10 > UO:
dx10 = UO
if dx10 < UC:
dx10 = UC
if x10 == PMAX and dx10 > 0:
dx10 = 0
if x10 == PMIN and dx10 < 0:
dx10 = 0
dx20 = (x10 - x20) / T4
dx30 = (x20 - x30) / T5
x1 = x1 + (dx1 + dx10) * 0.5 * time_step
x2 = x2 + (dx2 + dx20) * 0.5 * time_step
x3 = x3 + (dx3 + dx30) * 0.5 * time_step
Pm = K1 * x2 + K3 * x3
apis_dynamic.set_turbine_state_data(generator, True, 'x1', x1)
apis_dynamic.set_turbine_state_data(generator, True, 'x2', x2)
apis_dynamic.set_turbine_state_data(generator, True, 'x3', x3)
apis_dynamic.set_generator_state_data(generator, True, 'Pm', Pm)
return
def solve_IEEEG3_model_state_estimated_value(generator):
'''
Solving the estimated value of state parameters of IEEEG3 governor model
Args:
generator, IEEEG3 model parameters
Rets:
None
'''
x1 = apis_dynamic.get_turbine_state_data(generator, True, 'x1')
x2 = apis_dynamic.get_turbine_state_data(generator, True, 'x2')
x3 = apis_dynamic.get_turbine_state_data(generator, True, 'x3')
x4 = apis_dynamic.get_turbine_state_data(generator, True, 'x4')
time_step = apis.get_simulator_parameter('dynamic', 'time_step')
omega = apis_dynamic.get_generator_state_data(generator, True, 'omega')
P0 = apis.get_generator_related_model_data(generator, 'GOV', 'P0')
UO = apis.get_generator_related_model_data(generator, 'GOV', 'UO')
UC = apis.get_generator_related_model_data(generator, 'GOV', 'UC')
TP = apis.get_generator_related_model_data(generator, 'GOV', 'TP')
TG = apis.get_generator_related_model_data(generator, 'GOV', 'TG')
PMAX = apis.get_generator_related_model_data(generator, 'GOV', 'PMAX')
PMIN = apis.get_generator_related_model_data(generator, 'GOV', 'PMIN')
delta = apis.get_generator_related_model_data(generator, 'GOV', 'delta')
sigma = apis.get_generator_related_model_data(generator, 'GOV', 'sigma')
TR = apis.get_generator_related_model_data(generator, 'GOV', 'TR')
a11 = apis.get_generator_related_model_data(generator, 'GOV', 'a11')
a21 = apis.get_generator_related_model_data(generator, 'GOV', 'a21')
a13 = apis.get_generator_related_model_data(generator, 'GOV', 'a13')
a23 = apis.get_generator_related_model_data(generator, 'GOV', 'a23')
TW = apis.get_generator_related_model_data(generator, 'GOV', 'TW')
x1_input = P0 - abs(omega - 1) - sigma * x2 - TR * x2 - x3
dx1 = (x1_input / TG - x1) / TP
if x1 == UO and dx1 > 0:
dx1 = 0
if x1 == UC and dx1 < 0:
dx1 = 0
dx2 = x1
if x2 == PMAX and dx2 > 0:
dx2 = 0
if x2 == PMIN and dx2 < 0:
dx2 = 0
dx3 = (-delta * TR * x2 - TR * x3) / TR**2
dx4 = (-a13*a21*TW * x2 - a11*TW * x4) / (a11*TW)**2
x10 = x1 + dx1 * time_step
if x10 > UO:
x10 = UO
if x10 < UC:
x10 = UC
x20 = x2 + dx2 * time_step
if x20 > PMAX:
x20 = PMAX
if x20 < PMIN:
x20 = PMIN
x30 = x3 + dx3 * time_step
x40 = x4 + dx4 * time_step
Pm0 = a23 * x20 + (a13*a21/a11) * x20 + x40
apis_dynamic.set_turbine_state_data(generator, False, 'x1', x10)
apis_dynamic.set_turbine_state_data(generator, False, 'x2', x20)
apis_dynamic.set_turbine_state_data(generator, False, 'x3', x30)
apis_dynamic.set_turbine_state_data(generator, False, 'x4', x40)
apis_dynamic.set_generator_state_data(generator, False, 'Pm', Pm0)
return
def solve_IEEEG3_model_state_actual_value(generator):
'''
Solving the estimated value of state parameters of IEEEG3 governor model
Args:
generator, IEEEG3 model parameters
Rets:
None
'''
x1 = apis_dynamic.get_turbine_state_data(generator, True, 'x1')
x2 = apis_dynamic.get_turbine_state_data(generator, True, 'x2')
x3 = apis_dynamic.get_turbine_state_data(generator, True, 'x3')
x4 = apis_dynamic.get_turbine_state_data(generator, True, 'x4')
x10 = apis_dynamic.get_turbine_state_data(generator, False, 'x1')
x20 = apis_dynamic.get_turbine_state_data(generator, False, 'x2')
x30 = apis_dynamic.get_turbine_state_data(generator, False, 'x3')
x40 = apis_dynamic.get_turbine_state_data(generator, False, 'x4')
time_step = apis.get_simulator_parameter('dynamic', 'time_step')
omega = apis_dynamic.get_generator_state_data(generator, True, 'omega')
omega0 = apis_dynamic.get_generator_state_data(generator, False, 'omega')
P0 = apis.get_generator_related_model_data(generator, 'GOV', 'P0')
UO = apis.get_generator_related_model_data(generator, 'GOV', 'UO')
UC = apis.get_generator_related_model_data(generator, 'GOV', 'UC')
TP = apis.get_generator_related_model_data(generator, 'GOV', 'TP')
TG = apis.get_generator_related_model_data(generator, 'GOV', 'TG')
PMAX = apis.get_generator_related_model_data(generator, 'GOV', 'PMAX')
PMIN = apis.get_generator_related_model_data(generator, 'GOV', 'PMIN')
delta = apis.get_generator_related_model_data(generator, 'GOV', 'delta')
sigma = apis.get_generator_related_model_data(generator, 'GOV', 'sigma')
TR = apis.get_generator_related_model_data(generator, 'GOV', 'TR')
a11 = apis.get_generator_related_model_data(generator, 'GOV', 'a11')
a21 = apis.get_generator_related_model_data(generator, 'GOV', 'a21')
a13 = apis.get_generator_related_model_data(generator, 'GOV', 'a13')
a23 = apis.get_generator_related_model_data(generator, 'GOV', 'a23')
TW = apis.get_generator_related_model_data(generator, 'GOV', 'TW')
x1_input = P0 - abs(omega - 1) - sigma * x2 - TR * x2 - x3
dx1 = (x1_input / TG - x1) / TP
if x1 == UO and dx1 > 0:
dx1 = 0
if x1 == UC and dx1 < 0:
dx1 = 0
dx2 = x1
if x2 == PMAX and dx2 > 0:
dx2 = 0
if x2 == PMIN and dx2 < 0:
dx2 = 0
dx3 = (-delta * TR * x2 - TR * x3) / TR**2
dx4 = (-a13*a21*TW * x2 - a11*TW * x4) / (a11*TW)**2
x1_input0 = P0 - abs(omega0 - 1) - sigma * x20 - TR * x20 - x30
dx10 = (x1_input0 / TG - x10) / TP
if x10 == UO and dx10 > 0:
dx10 = 0
if x10 == UC and dx10 < 0:
dx10 = 0
dx20 = x10
if x20 == PMAX and dx20 > 0:
dx20 = 0
if x20 == PMIN and dx20 < 0:
dx20 = 0
dx30 = (-delta * TR * x20 - TR * x30) / TR**2
dx40 = (-a13*a21*TW * x20 - a11*TW * x40) / (a11*TW)**2
x1 = x1 + (dx1 + dx10) * 0.5 * time_step
x2 = x2 + (dx2 + dx20) * 0.5 * time_step
x3 = x3 + (dx3 + dx30) * 0.5 * time_step
x4 = x4 + (dx4 + dx40) * 0.5 * time_step
Pm = a23 * x2 + (a13*a21/a11) * x2 + x4
apis_dynamic.set_turbine_state_data(generator, True, 'x1', x1)
apis_dynamic.set_turbine_state_data(generator, True, 'x2', x2)
apis_dynamic.set_turbine_state_data(generator, True, 'x3', x3)
apis_dynamic.set_turbine_state_data(generator, True, 'x4', x4)
apis_dynamic.set_generator_state_data(generator, True, 'Pm', Pm)
return | 38.330275 | 103 | 0.62853 |
b35677b16d007df6bebe5600659ee22af25381d6 | 4,457 | py | Python | fsleyes/plugins/profiles/samplelineprofile.py | pauldmccarthy/fsleyes | 453a6b91ec7763c39195814d635257e3766acf83 | [
"Apache-2.0"
] | 12 | 2018-05-05T01:36:25.000Z | 2021-09-23T20:44:08.000Z | fsleyes/plugins/profiles/samplelineprofile.py | pauldmccarthy/fsleyes | 453a6b91ec7763c39195814d635257e3766acf83 | [
"Apache-2.0"
] | 97 | 2018-05-05T02:17:23.000Z | 2022-03-29T14:58:42.000Z | fsleyes/plugins/profiles/samplelineprofile.py | pauldmccarthy/fsleyes | 453a6b91ec7763c39195814d635257e3766acf83 | [
"Apache-2.0"
] | 6 | 2017-12-09T09:02:00.000Z | 2021-03-05T18:55:13.000Z | #!/usr/bin/env python
#
# samplelineprofile.py - The SampleLineProfile class.
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
"""This module provides the :class:`SampleLineProfile` class, an interaction
:class:`.Profile` for :class:`.OrthoPanel` views, which is used by the
:class:`.SampleLinePanel`.
"""
import wx
import fsleyes.profiles.orthoviewprofile as orthoviewprofile
class SampleLineProfile(orthoviewprofile.OrthoViewProfile):
"""The ``SampleLineProfile`` class is a :class:`.Profile` for the
:class:`.OrthoPanel` class, which allows the user to draw a line on
a canvas. The :class:`.SampleLinePanel` will then sample values along
that line from the currently selected overlay, and display them on a
plot.
"""
@staticmethod
def tempModes():
"""Returns the temporary mode map for the ``SampleLineProfile``,
which controls the use of modifier keys to temporarily enter other
interaction modes.
"""
return {
('sample', wx.WXK_SHIFT) : 'nav',
('sample', wx.WXK_CONTROL) : 'zoom',
('sample', wx.WXK_ALT) : 'pan',
('sample', (wx.WXK_CONTROL, wx.WXK_SHIFT)) : 'slice'}
@staticmethod
def altHandlers():
"""Returns the alternate handlers map, which allows event handlers
defined in one mode to be re-used whilst in another mode.
"""
return {('sample', 'MiddleMouseDrag') : ('pan', 'LeftMouseDrag')}
def __init__(self, viewPanel, overlayList, displayCtx):
"""Create a ``SampleLineProfile``.
:arg viewPanel: An :class:`.OrthoPanel` instance.
:arg overlayList: The :class:`.OverlayList` instance.
:arg displayCtx: The :class:`.DisplayContext` instance.
"""
orthoviewprofile.OrthoViewProfile.__init__(
self,
viewPanel,
overlayList,
displayCtx,
['sample'])
self.mode = 'sample'
self.__sampleLine = None
self.__sampleStart = None
self.__sampleEnd = None
def destroy(self):
"""Called when this ``SampleLineProfile`` is no longer used.
Clears the current line annotation, if there is one, then calls
the base class ``destroy`` method.
"""
if self.__sampleLine is not None:
line = self.__sampleLine
line.annot.dequeue(line, hold=True, fixed=False)
self.__sampleLine = None
line.annot.canvas.Refresh()
super().destroy()
@property
def sampleLine(self):
"""Returns a reference to the :class:`.Line` annotation that has most
recently been drawn, or ``None`` if no line has been drawn.
"""
return self.__sampleLine
@property
def sampleStart(self):
"""Return the ``(x, y, z)`` display coordinates of the start of the
most recently drawn line, or ``None`` if no line has been drawn.
"""
return self.__sampleStart
@property
def sampleEnd(self):
"""Return the ``(x, y, z)`` display coordinates of the end of the
most recently drawn line, or ``None`` if no line has been drawn.
"""
return self.__sampleEnd
def _sampleModeLeftMouseDown(self, ev, canvas, mousePos, canvasPos):
"""Adds a new line annotation."""
if self.__sampleLine is not None:
line = self.__sampleLine
line.annot.dequeue(line, hold=True, fixed=False)
line.annot.canvas.Refresh()
self.__sampleLine = None
self.__sampleStart = None
self.__sampleEnd = None
opts = canvas.opts
annot = canvas.getAnnotations()
x, y = (canvasPos[opts.xax], canvasPos[opts.yax])
self.__sampleStart = canvasPos
self.__sampleLine = annot.line(x, y, x, y,
lineWidth=3, colour='#ff5050',
hold=True, fixed=False)
def _sampleModeLeftMouseDrag(self, ev, canvas, mousePos, canvasPos):
"""Adjust the line end point so it tracks the mouse location."""
opts = canvas.opts
line = self.__sampleLine
line.x2 = canvasPos[opts.xax]
line.y2 = canvasPos[opts.yax]
self.__sampleEnd = canvasPos
canvas.Refresh()
| 33.014815 | 77 | 0.592551 |
a73c72402acd699adc2e5608d4575ca4d8840e9c | 652 | py | Python | sources/openembedded-core/meta/lib/oeqa/runtime/ping.py | wwright2/dcim3-angstrom1 | d358c7b2d446c0f330ed76b20f41e42f6f4f045c | [
"MIT"
] | null | null | null | sources/openembedded-core/meta/lib/oeqa/runtime/ping.py | wwright2/dcim3-angstrom1 | d358c7b2d446c0f330ed76b20f41e42f6f4f045c | [
"MIT"
] | null | null | null | sources/openembedded-core/meta/lib/oeqa/runtime/ping.py | wwright2/dcim3-angstrom1 | d358c7b2d446c0f330ed76b20f41e42f6f4f045c | [
"MIT"
] | null | null | null | import subprocess
import unittest
import sys
import time
from oeqa.oetest import oeRuntimeTest
class PingTest(oeRuntimeTest):
def test_ping(self):
output = ''
count = 0
endtime = time.time() + 60
while count < 5 and time.time() < endtime:
proc = subprocess.Popen("ping -c 1 %s" % self.target.ip, shell=True, stdout=subprocess.PIPE)
output += proc.communicate()[0]
if proc.poll() == 0:
count += 1
else:
count = 0
self.assertEqual(count, 5, msg = "Expected 5 consecutive replies, got %d.\nping output is:\n%s" % (count,output))
| 31.047619 | 121 | 0.581288 |
92c95a41d793d591a0fcb00c9e68e0cf27a2d8e9 | 16,015 | py | Python | synapse/storage/databases/main/appservice.py | dsonck92/synapse | 2560b1b6b2f74b5724253396c0e3665fa1f7968c | [
"Apache-2.0"
] | 9,945 | 2015-01-02T07:41:06.000Z | 2022-03-31T23:22:42.000Z | synapse/storage/databases/main/appservice.py | dsonck92/synapse | 2560b1b6b2f74b5724253396c0e3665fa1f7968c | [
"Apache-2.0"
] | 9,320 | 2015-01-08T14:09:03.000Z | 2022-03-31T21:11:24.000Z | synapse/storage/databases/main/appservice.py | dsonck92/synapse | 2560b1b6b2f74b5724253396c0e3665fa1f7968c | [
"Apache-2.0"
] | 2,299 | 2015-01-31T22:16:29.000Z | 2022-03-31T06:08:26.000Z | # Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from typing import TYPE_CHECKING, List, Optional, Pattern, Tuple
from synapse.appservice import (
ApplicationService,
ApplicationServiceState,
AppServiceTransaction,
)
from synapse.config.appservice import load_appservices
from synapse.events import EventBase
from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.types import JsonDict
from synapse.util import json_encoder
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
def _make_exclusive_regex(
services_cache: List[ApplicationService],
) -> Optional[Pattern]:
# We precompile a regex constructed from all the regexes that the AS's
# have registered for exclusive users.
exclusive_user_regexes = [
regex.pattern
for service in services_cache
for regex in service.get_exclusive_user_regexes()
]
if exclusive_user_regexes:
exclusive_user_regex = "|".join("(" + r + ")" for r in exclusive_user_regexes)
exclusive_user_pattern: Optional[Pattern] = re.compile(exclusive_user_regex)
else:
# We handle this case specially otherwise the constructed regex
# will always match
exclusive_user_pattern = None
return exclusive_user_pattern
class ApplicationServiceWorkerStore(SQLBaseStore):
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
self.services_cache = load_appservices(
hs.hostname, hs.config.appservice.app_service_config_files
)
self.exclusive_user_regex = _make_exclusive_regex(self.services_cache)
super().__init__(database, db_conn, hs)
def get_app_services(self):
return self.services_cache
def get_if_app_services_interested_in_user(self, user_id: str) -> bool:
"""Check if the user is one associated with an app service (exclusively)"""
if self.exclusive_user_regex:
return bool(self.exclusive_user_regex.match(user_id))
else:
return False
def get_app_service_by_user_id(self, user_id: str) -> Optional[ApplicationService]:
"""Retrieve an application service from their user ID.
All application services have associated with them a particular user ID.
There is no distinguishing feature on the user ID which indicates it
represents an application service. This function allows you to map from
a user ID to an application service.
Args:
user_id: The user ID to see if it is an application service.
Returns:
The application service or None.
"""
for service in self.services_cache:
if service.sender == user_id:
return service
return None
def get_app_service_by_token(self, token: str) -> Optional[ApplicationService]:
"""Get the application service with the given appservice token.
Args:
token: The application service token.
Returns:
The application service or None.
"""
for service in self.services_cache:
if service.token == token:
return service
return None
def get_app_service_by_id(self, as_id: str) -> Optional[ApplicationService]:
"""Get the application service with the given appservice ID.
Args:
as_id: The application service ID.
Returns:
The application service or None.
"""
for service in self.services_cache:
if service.id == as_id:
return service
return None
class ApplicationServiceStore(ApplicationServiceWorkerStore):
# This is currently empty due to there not being any AS storage functions
# that can't be run on the workers. Since this may change in future, and
# to keep consistency with the other stores, we keep this empty class for
# now.
pass
class ApplicationServiceTransactionWorkerStore(
ApplicationServiceWorkerStore, EventsWorkerStore
):
async def get_appservices_by_state(
self, state: ApplicationServiceState
) -> List[ApplicationService]:
"""Get a list of application services based on their state.
Args:
state: The state to filter on.
Returns:
A list of ApplicationServices, which may be empty.
"""
results = await self.db_pool.simple_select_list(
"application_services_state", {"state": state.value}, ["as_id"]
)
# NB: This assumes this class is linked with ApplicationServiceStore
as_list = self.get_app_services()
services = []
for res in results:
for service in as_list:
if service.id == res["as_id"]:
services.append(service)
return services
async def get_appservice_state(
self, service: ApplicationService
) -> Optional[ApplicationServiceState]:
"""Get the application service state.
Args:
service: The service whose state to set.
Returns:
An ApplicationServiceState or none.
"""
result = await self.db_pool.simple_select_one(
"application_services_state",
{"as_id": service.id},
["state"],
allow_none=True,
desc="get_appservice_state",
)
if result:
return ApplicationServiceState(result.get("state"))
return None
async def set_appservice_state(
self, service: ApplicationService, state: ApplicationServiceState
) -> None:
"""Set the application service state.
Args:
service: The service whose state to set.
state: The connectivity state to apply.
"""
await self.db_pool.simple_upsert(
"application_services_state", {"as_id": service.id}, {"state": state.value}
)
async def create_appservice_txn(
self,
service: ApplicationService,
events: List[EventBase],
ephemeral: List[JsonDict],
) -> AppServiceTransaction:
"""Atomically creates a new transaction for this application service
with the given list of events. Ephemeral events are NOT persisted to the
database and are not resent if a transaction is retried.
Args:
service: The service who the transaction is for.
events: A list of persistent events to put in the transaction.
ephemeral: A list of ephemeral events to put in the transaction.
Returns:
A new transaction.
"""
def _create_appservice_txn(txn):
# work out new txn id (highest txn id for this service += 1)
# The highest id may be the last one sent (in which case it is last_txn)
# or it may be the highest in the txns list (which are waiting to be/are
# being sent)
last_txn_id = self._get_last_txn(txn, service.id)
txn.execute(
"SELECT MAX(txn_id) FROM application_services_txns WHERE as_id=?",
(service.id,),
)
highest_txn_id = txn.fetchone()[0]
if highest_txn_id is None:
highest_txn_id = 0
new_txn_id = max(highest_txn_id, last_txn_id) + 1
# Insert new txn into txn table
event_ids = json_encoder.encode([e.event_id for e in events])
txn.execute(
"INSERT INTO application_services_txns(as_id, txn_id, event_ids) "
"VALUES(?,?,?)",
(service.id, new_txn_id, event_ids),
)
return AppServiceTransaction(
service=service, id=new_txn_id, events=events, ephemeral=ephemeral
)
return await self.db_pool.runInteraction(
"create_appservice_txn", _create_appservice_txn
)
async def complete_appservice_txn(
self, txn_id: int, service: ApplicationService
) -> None:
"""Completes an application service transaction.
Args:
txn_id: The transaction ID being completed.
service: The application service which was sent this transaction.
"""
txn_id = int(txn_id)
def _complete_appservice_txn(txn):
# Debugging query: Make sure the txn being completed is EXACTLY +1 from
# what was there before. If it isn't, we've got problems (e.g. the AS
# has probably missed some events), so whine loudly but still continue,
# since it shouldn't fail completion of the transaction.
last_txn_id = self._get_last_txn(txn, service.id)
if (last_txn_id + 1) != txn_id:
logger.error(
"appservice: Completing a transaction which has an ID > 1 from "
"the last ID sent to this AS. We've either dropped events or "
"sent it to the AS out of order. FIX ME. last_txn=%s "
"completing_txn=%s service_id=%s",
last_txn_id,
txn_id,
service.id,
)
# Set current txn_id for AS to 'txn_id'
self.db_pool.simple_upsert_txn(
txn,
"application_services_state",
{"as_id": service.id},
{"last_txn": txn_id},
)
# Delete txn
self.db_pool.simple_delete_txn(
txn,
"application_services_txns",
{"txn_id": txn_id, "as_id": service.id},
)
await self.db_pool.runInteraction(
"complete_appservice_txn", _complete_appservice_txn
)
async def get_oldest_unsent_txn(
self, service: ApplicationService
) -> Optional[AppServiceTransaction]:
"""Get the oldest transaction which has not been sent for this service.
Args:
service: The app service to get the oldest txn.
Returns:
An AppServiceTransaction or None.
"""
def _get_oldest_unsent_txn(txn):
# Monotonically increasing txn ids, so just select the smallest
# one in the txns table (we delete them when they are sent)
txn.execute(
"SELECT * FROM application_services_txns WHERE as_id=?"
" ORDER BY txn_id ASC LIMIT 1",
(service.id,),
)
rows = self.db_pool.cursor_to_dict(txn)
if not rows:
return None
entry = rows[0]
return entry
entry = await self.db_pool.runInteraction(
"get_oldest_unsent_appservice_txn", _get_oldest_unsent_txn
)
if not entry:
return None
event_ids = db_to_json(entry["event_ids"])
events = await self.get_events_as_list(event_ids)
return AppServiceTransaction(
service=service, id=entry["txn_id"], events=events, ephemeral=[]
)
def _get_last_txn(self, txn, service_id: Optional[str]) -> int:
txn.execute(
"SELECT last_txn FROM application_services_state WHERE as_id=?",
(service_id,),
)
last_txn_id = txn.fetchone()
if last_txn_id is None or last_txn_id[0] is None: # no row exists
return 0
else:
return int(last_txn_id[0]) # select 'last_txn' col
async def set_appservice_last_pos(self, pos: int) -> None:
def set_appservice_last_pos_txn(txn):
txn.execute(
"UPDATE appservice_stream_position SET stream_ordering = ?", (pos,)
)
await self.db_pool.runInteraction(
"set_appservice_last_pos", set_appservice_last_pos_txn
)
async def get_new_events_for_appservice(
self, current_id: int, limit: int
) -> Tuple[int, List[EventBase]]:
"""Get all new events for an appservice"""
def get_new_events_for_appservice_txn(txn):
sql = (
"SELECT e.stream_ordering, e.event_id"
" FROM events AS e"
" WHERE"
" (SELECT stream_ordering FROM appservice_stream_position)"
" < e.stream_ordering"
" AND e.stream_ordering <= ?"
" ORDER BY e.stream_ordering ASC"
" LIMIT ?"
)
txn.execute(sql, (current_id, limit))
rows = txn.fetchall()
upper_bound = current_id
if len(rows) == limit:
upper_bound = rows[-1][0]
return upper_bound, [row[1] for row in rows]
upper_bound, event_ids = await self.db_pool.runInteraction(
"get_new_events_for_appservice", get_new_events_for_appservice_txn
)
events = await self.get_events_as_list(event_ids)
return upper_bound, events
async def get_type_stream_id_for_appservice(
self, service: ApplicationService, type: str
) -> int:
if type not in ("read_receipt", "presence"):
raise ValueError(
"Expected type to be a valid application stream id type, got %s"
% (type,)
)
def get_type_stream_id_for_appservice_txn(txn):
stream_id_type = "%s_stream_id" % type
txn.execute(
# We do NOT want to escape `stream_id_type`.
"SELECT %s FROM application_services_state WHERE as_id=?"
% stream_id_type,
(service.id,),
)
last_stream_id = txn.fetchone()
if last_stream_id is None or last_stream_id[0] is None: # no row exists
return 0
else:
return int(last_stream_id[0])
return await self.db_pool.runInteraction(
"get_type_stream_id_for_appservice", get_type_stream_id_for_appservice_txn
)
async def set_type_stream_id_for_appservice(
self, service: ApplicationService, stream_type: str, pos: Optional[int]
) -> None:
if stream_type not in ("read_receipt", "presence"):
raise ValueError(
"Expected type to be a valid application stream id type, got %s"
% (stream_type,)
)
def set_type_stream_id_for_appservice_txn(txn):
stream_id_type = "%s_stream_id" % stream_type
txn.execute(
"UPDATE application_services_state SET %s = ? WHERE as_id=?"
% stream_id_type,
(pos, service.id),
)
await self.db_pool.runInteraction(
"set_type_stream_id_for_appservice", set_type_stream_id_for_appservice_txn
)
class ApplicationServiceTransactionStore(ApplicationServiceTransactionWorkerStore):
# This is currently empty due to there not being any AS storage functions
# that can't be run on the workers. Since this may change in future, and
# to keep consistency with the other stores, we keep this empty class for
# now.
pass
| 35.908072 | 87 | 0.618295 |
428d2b25acdebb10d9467776a8ba77982b6c0f8c | 1,300 | py | Python | locations/models.py | vishnusayanth/django-app | 6f95f3140188d5cdeb260b66b2b8fdfffc8cf52b | [
"MIT"
] | null | null | null | locations/models.py | vishnusayanth/django-app | 6f95f3140188d5cdeb260b66b2b8fdfffc8cf52b | [
"MIT"
] | null | null | null | locations/models.py | vishnusayanth/django-app | 6f95f3140188d5cdeb260b66b2b8fdfffc8cf52b | [
"MIT"
] | null | null | null | from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q
class Continent(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Country(models.Model):
name = models.CharField(max_length=50)
official_language = models.CharField(max_length=50)
country_code = models.IntegerField(default=None, null=True, blank=True)
iso_code = models.CharField(max_length=10)
continent = models.ForeignKey(Continent, on_delete=models.CASCADE)
def __str__(self):
return self.name
def capital(self):
return str(State.objects.filter(Q(country=self) & Q(capital_state=True)).first())
class State(models.Model):
name = models.CharField(max_length=50)
country = models.ForeignKey(Country, on_delete=models.CASCADE)
capital_state = models.BooleanField(default=False)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if self.capital_state:
capitals = len(State.objects.filter(Q(country_id=self.country.id) & Q(capital_state=True)))
if capitals > 1:
raise ValidationError('Capital already set for this country!')
super(State, self).save(*args, **kwargs)
| 31.707317 | 103 | 0.696154 |
32faefad947a53ef9777603c16318b24c7c5d818 | 353 | py | Python | github_status_embed/__init__.py | 893091483/Discord-embed | 92f6ecb120ca27f556bc105cde4be3bfcf8808be | [
"MIT"
] | 10 | 2020-12-05T09:53:36.000Z | 2022-03-15T20:53:11.000Z | github_status_embed/__init__.py | 893091483/Discord-embed | 92f6ecb120ca27f556bc105cde4be3bfcf8808be | [
"MIT"
] | 2 | 2022-03-01T03:09:15.000Z | 2022-03-10T17:40:20.000Z | github_status_embed/__init__.py | 893091483/Discord-embed | 92f6ecb120ca27f556bc105cde4be3bfcf8808be | [
"MIT"
] | 6 | 2021-04-20T09:38:21.000Z | 2022-03-20T03:15:16.000Z | """
Github Status Embed for Discord.
This applications sends an enhanced GitHub Actions Status Embed to a
Discord webhook. The default embeds sent by GitHub don't contain a lot
of information and are send for every workflow run. This application,
part of a Docker-based action, allows you to send more meaningful embeds
when you want to send them.
"""
| 35.3 | 72 | 0.790368 |
eea1fab11898866732ede15b1b5429f81332bded | 190 | py | Python | latihan1.py | MelindaIndriani17/praktikum3 | 25bdbf3c738f0bdb4f40bc2123d4d2f65adc2e3a | [
"Xnet",
"X11"
] | null | null | null | latihan1.py | MelindaIndriani17/praktikum3 | 25bdbf3c738f0bdb4f40bc2123d4d2f65adc2e3a | [
"Xnet",
"X11"
] | null | null | null | latihan1.py | MelindaIndriani17/praktikum3 | 25bdbf3c738f0bdb4f40bc2123d4d2f65adc2e3a | [
"Xnet",
"X11"
] | null | null | null | import random
a = 0
jumlah = int(input("Masukkan jumlah N : "))
for x in range(0, jumlah):
i = random.uniform(0.0, 0.5)
a+=1
print(" data ke-",a,"=>",i)
print("Selesai")
| 21.111111 | 44 | 0.557895 |
be02d310a52cd7ebfc9cb52a9d77231014b934a6 | 1,504 | py | Python | python-sandbox/entrypoint.py | leoniefrijters/onebot | e4bd43b0d18225f06b5281ce5c2665dd8a0dc188 | [
"BSD-3-Clause"
] | 13 | 2015-09-06T21:20:54.000Z | 2021-11-21T20:31:57.000Z | python-sandbox/entrypoint.py | leoniefrijters/onebot | e4bd43b0d18225f06b5281ce5c2665dd8a0dc188 | [
"BSD-3-Clause"
] | 47 | 2015-08-14T08:59:24.000Z | 2021-08-25T05:08:56.000Z | python-sandbox/entrypoint.py | leoniefrijters/onebot | e4bd43b0d18225f06b5281ce5c2665dd8a0dc188 | [
"BSD-3-Clause"
] | 7 | 2016-03-29T12:05:24.000Z | 2019-06-18T15:03:50.000Z | #!/usr/bin/env python
import io
import multiprocessing
from contextlib import redirect_stdout, redirect_stderr
TIMEOUT = 5
class UserProcess(multiprocessing.Process):
"""The user-provided process"""
def __init__(self, cmd, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cmd = cmd
def run(self):
"""Compile and capture the output"""
out = io.StringIO()
err = io.StringIO()
success = False
with redirect_stdout(out), redirect_stderr(err):
try:
code = compile(self.cmd, "<PEBKAC>", "single")
success = True
except (SyntaxError, OverflowError, ValueError) as e:
print(e, file=sys.stderr)
if success:
try:
exec(code)
except Exception as e:
print(repr(e), file=sys.stderr)
out = out.getvalue().strip()
if out:
print(f"Stdout: {out!r}")
err = err.getvalue().strip()
if err:
print(f"Stderr: {err!r}")
if not (out or err):
print("No output.")
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
print(f"Usage: {sys.argv[0]} <cmdline>", file=sys.stderr)
sys.exit(1)
process = UserProcess(sys.argv[1])
process.start()
process.join(TIMEOUT)
if process.is_alive():
process.terminate()
print(f"Terminated after {TIMEOUT} seconds.")
| 26.385965 | 65 | 0.545878 |
b47abfc0151e75c15df8e97c28434388d746ba18 | 9,923 | py | Python | Configs/Old_configs/Medical.py | yochaiedlitz/T2DM_UKB_predictions | 1e6b22e3d51d515eb065d7d5f46408f86f33d0b8 | [
"MIT"
] | 1 | 2022-01-17T13:13:02.000Z | 2022-01-17T13:13:02.000Z | Configs/Old_configs/Medical.py | yochaiedlitz/T2DM_UKB_predictions | 1e6b22e3d51d515eb065d7d5f46408f86f33d0b8 | [
"MIT"
] | null | null | null | Configs/Old_configs/Medical.py | yochaiedlitz/T2DM_UKB_predictions | 1e6b22e3d51d515eb065d7d5f46408f86f33d0b8 | [
"MIT"
] | null | null | null | import collections # Used for ordered dictionary
from PRS import PRS_sumstats
from UKBB_Func import PROBA_FOLDER
import sys
Top_Gen_Dict = PRS_sumstats.Get_Top_Gen_Dict()
Hyp_Param_Dict_A = collections.OrderedDict()
Hyp_Param_Dict_R = collections.OrderedDict()
TRAIN_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_train_val.csv'
TEST_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_test.csv'
ALL_TEST_AS_VAL = True
BASIC_JOB_NAME = ["Metabolic_Syndrome","Metabolic_Syndrome_Anthropometry","Metabolic_Syndrome_a1c_Anthropometry"]
BASIC_PROB_BASED_JOB_NAME = ["RE_"+x for x in BASIC_JOB_NAME]
Sub_Class_array = ["All","All","All"] # "All",, "All"
Job_ID = ["2443-0.0","2443-0.0","2443-0.0"]
RET_FEAT_file_names=BASIC_JOB_NAME
feat_list_folder="Diabetes_Features_lists/" #Folder where the features lists located
FEAT_file_names = ["Diabetes_Features_0705"] #Diabetes_Features.csv,Diabetes_Features_No_Baseline.csv,Baseline_Features.csv,Diabetes_Features_Lifestyle.csv,Diabetes_Features_No_Baseline.csv, Full_Diabetes_Features # "Diabetes_Features.csv","Diabetes_Features.csv","Diabetes_Features.csv",BMI_Features_Lifestyle.csv
#Features File name without ending
#Features File name without ending
FEAT_PATH = [feat_list_folder+x+".csv" for x in FEAT_file_names]
RET_FEAT_PATH=[feat_list_folder+x+".csv" for x in RET_FEAT_file_names]
#
# Data_Job_Names = {"6150-0.0": "Vascular", "2443-0.0": "Diabetes", "2453-0.0": "Cancer", "4041-0.0": "Gestational diabetes","21001-0.0":'BMI'}
CHARAC_SELECTED = {"Age at recruitment": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All"}
DISEASE_PROBA_DICT = {"Diabetes Probabilities": PROBA_FOLDER+"Diabetes_OnlyPROB.csv",
"CVD Probabilities": PROBA_FOLDER+"Vascular_OnlyPROB.csv",
"Cancer Probabilities": PROBA_FOLDER+"Cancer_OnlyPROB.csv"}
#PRS_COLS -Adding PRS -Only final score for each phenotype for each user
PRS_COLS = ['PRS_MAGIC_HbA1C', 'PRS_cigs_per_day', 'PRS_MAGIC_Scott_FG', 'PRS_ln_HOMA-IR', 'PRS_MAGIC_Scott_FI', 'PRS_height', 'PRS_Manning_FI', 'PRS_Leptin_BMI', 'PRS_cardio', 'PRS_triglycerides',
'PRS_Manning_FG', 'PRS_anorexia', 'PRS_Magic_2hrGlucose', 'PRS_Non_Diabetic_glucose2', 'PRS_ever_smoked', 'PRS_age_smoke', 'PRS_MAGIC_fastingProinsulin', 'PRS_Leptin_Unadjusted_BMI',
'PRS_MAGIC_Scott_FI_adjBMI', 'PRS_MAGIC_Scott_2hGlu', 'PRS_glucose_iris', 'PRS_ln_FastingInsulin', 'PRS_bmi', 'PRS_overweight', 'PRS_hba1c', 'PRS_alzheimer', 'PRS_whr', 'PRS_ln_HOMA-B',
'PRS_ldl', 'PRS_obesity_class2', 'PRS_obesity_class1', 'PRS_diabetes_BMI_Unadjusted', 'PRS_Manning_BMI_ADJ_FG', 'PRS_waist', 'PRS_ashtma', 'PRS_HBA1C_ISI', 'PRS_HbA1c_MANTRA',
'PRS_diabetes_BMI_Adjusted', 'PRS_Heart_Rate', 'PRS_Manning_BMI_ADJ_FI', 'PRS_cholesterol', 'PRS_hdl', 'PRS_FastingGlucose', 'PRS_hips']
# Select_Top_Traits_Gen_arr_names = ['HbA1c_MANTRA','t2d_mega_meta',"MAGIC_Scott_FG","triglycerides",'Magic_2hrGlucose','Manning_Fasting_Insulin'] #Keep empty if None
Select_Top_Traits_Gen_arr_names =['HbA1c_MANTRA','t2d_mega_meta',"MAGIC_Scott_FG",'Magic_2hrGlucose',
'bmi','anorexia','cardio','hips','waist',"overweight",'obesity_class1','obesity_class2',
"ever_smoked","hdl","ldl", 'triglycerides','cholesterol','diabetes_BMI_Unadjusted',
'diabetes_BMI_Adjusted','FastingGlucose','ln_HOMA-B','ln_HOMA-IR','ln_FastingInsulin',
'Leptin_BMI','Leptin_Unadjusted_BMI','Heart_Rate','MAGIC_fastingProinsulin',
'MAGIC_Scott_FI_adjBMI','MAGIC_Scott_FI','MAGIC_HbA1C','Manning_FG','Manning_BMI_ADJ_FG',
'Manning_Fasting_Insulin','Manning_BMI_ADJ_FI','HBA1C_ISI']#
USE_FAKE_QUE = False
NROWS = None # 1-500000 or None
NROWS_RETURN = None # How many returning participants to load
Split = True #Wheter or not to split data to train and test, should be false only for final testing
DEBUG = False
USE_PROBA = True # Whether or not to either calculate probability if working on all participants or to use probabilities
# calculated if working with returning participants
USE_PRS = False #wether to use PRS reults
Use_SNPs=False
NFOLD = 5
Choose_N_Fold = 3 #How many CV to make for the initial Cross validation when choosing the hyperparameters
Basic_HYP_PAR_ITER = 20
Prob_HYP_PAR_ITER=200
MEM = '30G'
N_THREADS = 10
P_THREADS = 2
Calc_Base_Prob = False
CALC_SHAP = True # Whether or not to calculate the SHAP values for the basic probabilities
SORT = True #Used mostly for debugging to activate the SORT_AUC_APS function
#Refit_model - path to model to be refitted in the first visit
Refit_Model =None#'/net/mraid08/export/jafar/UKBioBank/Yochai/UKBB_Runs/Refit/Refit_BL2AF_Diabetes/Diabetes_Results/Diabetes_shap_model.txt'#None##Name of the model to be refitted or None
#/net/mraid08/export/jafar/Yochai/UKBB_Runs/AF_To_refit2_Diabetes/Diabetes_Results
Finalize_Only = False
Calc_Prob_Based_Prob = True
RE_USE_PROBA = False
Calc_Transfer_Learning=False #Used when we would like torefit several base models and not a specific model
REFIT_SERIAL_MODELS=False# #Checking wether to refit a model folder just made in previous step, or use a pedefined folder
#Refit_Return_Model_Path - path to model to be refitted in the first visit
Refit_Return_Model_Path=None#'/net/mraid08/export/jafar/Yochai/UKBB_Runs/mock_refit/Diabetes_Results/'#'/net/mraid08/export/jafar/UKBioBank/Yochai/UKBB_Runs/Refit/Refit_BL2AF_Diabetes/Diabetes_Results/'#None#
HowHow = "left" #"inner" - take only participants who has probabilities for other disease as well, "left" - take all
CALC_P_SHAP = True # Whether or not to calculate the SHAP values for the Preob based predictions
SORT_Prob=True
Finalize_Prob_Based_Only = False
if REFIT_SERIAL_MODELS or Refit_Return_Model_Path:
Refit_Returned=True
else:
Refit_Returned=False
VISITS = [0,1,2]#[0,1,2]
NUM_OF_DEP_PLOT = 40
Lite = False #Used for debug
Thresh_in_Column = 0.7
Thresh_in_Row = 0.7
# CHARAC_SELECTED = {"Age at recruitment": "All", "Sex": "All", "Ethnic background": "All",
# "Type of special diet followed": "All"}
CHARAC_ID = {"Age at recruitment": "21022-0.0", "Sex": "31-0.0", "Ethnic background": "21000-0.0",
"Type of special diet followed": "20086-0.0"}
ETHNIC_CODE = {-3: "Prefer not to answer", -1: "Do not know", 1: "White", 2: "Mixed", 3: "Asian",
4: "Black or Black British", 5: "Chinese", 6: "Other ethnic group", 1001: "British", 1002: "Irish",
1003: "Any other white background", 2001: "White and Black Caribbean",
2002: "White and Black African", 2003: "White and Asian", 2004: "Any other mixed background",
3001: "Indian", 3002: "Pakistani", 3003: "Bangladeshi", 3004: "Any other Asian background",
4001: "Caribbean", 4002: "African", 4003: "Any other Black background"}
SEX_CODE = {"Female": 0, "Male": 1}
DIET_CODE = {"Gluten-free": 8, "Lactose-free": 9, "Low calorie": 10, "Vegetarian": 11, "Vegan": 12, "Other": 13}
Job_name_dict = {"6150-0.0": "Vascular", "2443-0.0": "Diabetes", "2453-0.0": "Cancer", "4041-0.0": "Gestational diabetes","21001-0.0":'BMI'} #,"Diabetes", "Cancer", "Gestational diabetes","Vascular"
No_symp_dict = {"6150-0.0": -7, "2443-0.0": 0, '2453-0.0': 0, '21001-0.0': "nan"}
# Hyp_Param_Dict_A['max_depth']=[2,4,8,16]
Hyp_Param_Dict_A['num_leaves']=[4,8,16,32,64,128,256]
Hyp_Param_Dict_A['is_unbalance'] = [True]
Hyp_Param_Dict_A['objective'] = ['binary']
Hyp_Param_Dict_A['boosting_type'] = ['gbdt']#,'rf','dart','goss'
Hyp_Param_Dict_A['metric'] = ["auc"] #MAP, aliases: mean_average_precision,kldiv, Kullback-Leibler divergence, aliases: kullback_leibler
Hyp_Param_Dict_A['num_boost_round'] = [10,50,100,250,500,1000, 2000,4000,8000]#,1000, 2000, 4000, 8000
Hyp_Param_Dict_A['learning_rate'] = [0.005, 0.01, 0.05,0.1]
Hyp_Param_Dict_A["min_child_samples"] = [10,25,50,250,500]
Hyp_Param_Dict_A["subsample"] = [0.1,0.25,0.5, 0.7, 0.9, 1]
Hyp_Param_Dict_A["colsample_bytree"] = [0.03,0.1, 0.25, 0.5, 0.7, 1]
Hyp_Param_Dict_A["boost_from_average"] = [True]
Hyp_Param_Dict_A['num_threads'] = [N_THREADS]
Hyp_Param_Dict_A['lambda_l1'] = [0,0.5,0.9,0.99,0.999]
Hyp_Param_Dict_A['lambda_l2'] = [0,0.5,0.9,0.99,0.999]
Hyp_Param_Dict_A['bagging_freq']=[0,1,5]
Hyp_Param_Dict_A['bagging_fraction']=[0.25,0.5,0.75,1]
# Hyp_Param_Dict_R['max_depth']=[2,4,8,16]
Hyp_Param_Dict_A['num_leaves']=[2,4,8,16,32,64,128]
Hyp_Param_Dict_R['is_unbalance'] = [True]
Hyp_Param_Dict_R['objective'] = ['binary']
Hyp_Param_Dict_R['boosting_type'] = ['gbdt']
Hyp_Param_Dict_R['metric'] = ["auc"] #MAP, aliases: mean_average_precision,kldiv, Kullback-Leibler divergence, aliases: kullback_leibler
Hyp_Param_Dict_R['num_boost_round'] = [50,100,250,500,1000,2000,4000]#,,1000, 2000, 4000, 8000
Hyp_Param_Dict_R['verbose'] = [-1]
Hyp_Param_Dict_R['learning_rate'] = [0.005, 0.01, 0.05]
Hyp_Param_Dict_R["min_child_samples"] = [5,10,25,50]
Hyp_Param_Dict_R["subsample"] = [0.5, 0.7, 0.9, 1]
Hyp_Param_Dict_R["colsample_bytree"] = [0.01,0.05, 0.1, 0.25, 0.5, 0.7, 1]
Hyp_Param_Dict_R["boost_from_average"] = [True]
Hyp_Param_Dict_R['num_threads'] = [P_THREADS]
Hyp_Param_Dict_R['lambda_l1'] = [0,0.25,0.5, 0.9, 0.99,0.999]
Hyp_Param_Dict_R['lambda_l2'] = [0,0.25,0.5, 0.9, 0.99,0.999]
Hyp_Param_Dict_A['bagging_freq']=[0,1,5]
Hyp_Param_Dict_A['bagging_fraction']=[0.5,0.75,1]
Select_Traits_Gen={}
for name in Select_Top_Traits_Gen_arr_names:
Select_Traits_Gen[name] = Top_Gen_Dict[name]
if (len(BASIC_JOB_NAME)!=len(Sub_Class_array) or (len(BASIC_JOB_NAME)!=len(Sub_Class_array)) or
(len(BASIC_JOB_NAME)!=len(Job_ID))):
sys.exit("BASIC_JOB_NAME,Sub_Class_array and Job_ID should be same size") | 59.065476 | 315 | 0.730223 |
2c8fefcc1cd7b12e6c4a468980052f0c5a0a2783 | 1,469 | py | Python | evaluation.py | rdedo099/HonoursProject2021 | 94c61218371587fd4dd9dacaa5e8f0ce7f44875d | [
"MIT"
] | null | null | null | evaluation.py | rdedo099/HonoursProject2021 | 94c61218371587fd4dd9dacaa5e8f0ce7f44875d | [
"MIT"
] | null | null | null | evaluation.py | rdedo099/HonoursProject2021 | 94c61218371587fd4dd9dacaa5e8f0ce7f44875d | [
"MIT"
] | null | null | null | from sklearn import metrics
from prettytable import PrettyTable
def evaluate_clustering(name, X, true_labels, pred_labels):
homogeneity = metrics.homogeneity_score(true_labels, pred_labels)
completeness = metrics.completeness_score(true_labels, pred_labels)
v_measure = metrics.v_measure_score(true_labels, pred_labels)
adj_rand_score = metrics.adjusted_rand_score(true_labels, pred_labels)
norm_mutual_score = metrics.normalized_mutual_info_score(true_labels, pred_labels)
fowlkes_m = metrics.fowlkes_mallows_score(true_labels, pred_labels)
#silhouette = metrics.silhouette_score(X, pred_labels, metric='euclidean')
return [name, "{:.2f}".format(homogeneity), "{:.2f}".format(completeness), "{:.2f}".format(v_measure), "{:.2f}".format(adj_rand_score), "{:.2f}".format(norm_mutual_score), "{:.2f}".format(fowlkes_m)]
def tabulate_results(results):
t = PrettyTable(['Name', 'Homogeneity', 'Completeness', 'V Measure', 'Adj Rand Score', 'Norm Mutual Score', 'Fowlkes Mallows'])
for result in results:
t.add_row(result)
print(t)
def tab_results(header, results):
t = PrettyTable(header)
for result in results:
t.add_row(result)
print(t)
def evaluate_vmeasure(true_labels, pred_labels):
return metrics.v_measure_score(true_labels, pred_labels)
def evaluate_fm(true_labels, pred_labels):
return metrics.fowlkes_mallows_score(true_labels, pred_labels) | 39.702703 | 203 | 0.733833 |
af4d240c6bd62e60d4aadd5036d10e043b50db0d | 867 | py | Python | tests.py | mosquito/cyleb128 | 84d0caed9fa9bd2f7801ef409f8679ea28cd5f4c | [
"Apache-2.0"
] | 1 | 2021-08-09T18:59:46.000Z | 2021-08-09T18:59:46.000Z | tests.py | mosquito/cyleb128 | 84d0caed9fa9bd2f7801ef409f8679ea28cd5f4c | [
"Apache-2.0"
] | null | null | null | tests.py | mosquito/cyleb128 | 84d0caed9fa9bd2f7801ef409f8679ea28cd5f4c | [
"Apache-2.0"
] | null | null | null | import pytest
from leb128 import LEB128S, LEB128U
ENCODE_CASES = [
(LEB128U.encode, 624485, b'\xe5\x8e&'),
(LEB128S.encode, -123456, b'\xc0\xbbx'),
]
@pytest.mark.parametrize("func, value, expected", ENCODE_CASES)
def test_encode(func, value, expected):
assert func(value) == expected
DECODE_CASES = [
(LEB128U.decode, b"\xe5\x8e\x26", 624485),
(LEB128S.decode, b"\xc0\xbb\x78", -123456),
]
@pytest.mark.parametrize("func, value, expected", DECODE_CASES)
def test_decode(func, value, expected):
assert func(value) == expected
TWO_WAY_CASES = [
(LEB128U, 624485),
(LEB128S, -123456),
]
@pytest.mark.parametrize("klass, value", TWO_WAY_CASES)
def test_two_way(klass, value):
assert value == klass.decode(klass.encode(value))
assert klass.encode(value) == klass.encode(
klass.decode(klass.encode(value))
)
| 22.230769 | 63 | 0.681661 |
a3d4eae545bd770014f7a2896921613fc07a0be7 | 6,636 | py | Python | pahelix/utils/metrics/molecular_generation/SA_Score/sascorer.py | agave233/PaddleHelix | e5578f72c2a203a27d9df7da111f1ced826c1429 | [
"Apache-2.0"
] | 454 | 2020-11-21T01:02:45.000Z | 2022-03-29T12:53:40.000Z | pahelix/utils/metrics/molecular_generation/SA_Score/sascorer.py | chupvl/PaddleHelix | 6e082f89b8090c3c360593d40a08bffc884165dd | [
"Apache-2.0"
] | 161 | 2020-12-12T06:35:54.000Z | 2022-03-27T11:31:13.000Z | pahelix/utils/metrics/molecular_generation/SA_Score/sascorer.py | chupvl/PaddleHelix | 6e082f89b8090c3c360593d40a08bffc884165dd | [
"Apache-2.0"
] | 108 | 2020-12-07T09:01:10.000Z | 2022-03-31T14:42:29.000Z | #!/usr/bin/python3
#-*-coding:utf-8-*-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# calculation of synthetic accessibility score as described in:
"""
Estimation of Synthetic Accessibility Score of Drug-like Molecules based on
Molecular Complexity and Fragment Contributions
Peter Ertl and Ansgar Schuffenhauer
Journal of Cheminformatics 1:8 (2009)
http://www.jcheminf.com/content/1/1/8
several small modifications to the original paper are included
particularly slightly different formula for marocyclic penalty
and taking into account also molecule symmetry (fingerprint density)
for a set of 10k diverse molecules the agreement between the original method
as implemented in PipelinePilot and this implementation is r2 = 0.97
peter ertl & greg landrum, september 2013
"""
from __future__ import print_function
import math
import os.path as op
import pickle
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
from rdkit.six import iteritems
_fscores = None
def readFragmentScores(name='fpscores'):
"""
tbd
"""
import gzip
global _fscores
# generate the full path filename:
if name == "fpscores":
name = op.join(op.dirname(__file__), name)
_fscores = pickle.load(gzip.open('%s.pkl.gz' % name))
outDict = {}
for i in _fscores:
for j in range(1, len(i)):
outDict[i[j]] = float(i[0])
_fscores = outDict
def numBridgeheadsAndSpiro(mol, ri=None):
"""
tbd
"""
nSpiro = rdMolDescriptors.CalcNumSpiroAtoms(mol)
nBridgehead = rdMolDescriptors.CalcNumBridgeheadAtoms(mol)
return nBridgehead, nSpiro
def calculateScore(m):
"""
tbd
"""
if _fscores is None:
readFragmentScores()
# fragment score
fp = rdMolDescriptors.GetMorganFingerprint(
m, 2 # <- 2 is the *radius* of the circular fingerprint
)
fps = fp.GetNonzeroElements()
score1 = 0.
nf = 0
for bitId, v in iteritems(fps):
nf += v
sfp = bitId
score1 += _fscores.get(sfp, -4) * v
score1 /= nf
# features score
nAtoms = m.GetNumAtoms()
nChiralCenters = len(Chem.FindMolChiralCenters(m, includeUnassigned=True))
ri = m.GetRingInfo()
nBridgeheads, nSpiro = numBridgeheadsAndSpiro(m, ri)
nMacrocycles = 0
for x in ri.AtomRings():
if len(x) > 8:
nMacrocycles += 1
sizePenalty = nAtoms ** 1.005 - nAtoms
stereoPenalty = math.log10(nChiralCenters + 1)
spiroPenalty = math.log10(nSpiro + 1)
bridgePenalty = math.log10(nBridgeheads + 1)
macrocyclePenalty = 0.
# ---------------------------------------
# This differs from the paper, which defines:
# macrocyclePenalty = math.log10(nMacrocycles+1)
# This form generates better results when 2 or more macrocycles are present
if nMacrocycles > 0:
macrocyclePenalty = math.log10(2)
score2 = (0. - sizePenalty - stereoPenalty -
spiroPenalty - bridgePenalty - macrocyclePenalty)
# correction for the fingerprint density
# not in the original publication, added in version 1.1
# to make highly symmetrical molecules easier to synthetise
score3 = 0.
if nAtoms > len(fps):
score3 = math.log(float(nAtoms) / len(fps)) * .5
sascore = score1 + score2 + score3
# need to transform "raw" value into scale between 1 and 10
min = -4.0
max = 2.5
sascore = 11. - (sascore - min + 1) / (max - min) * 9.
# smooth the 10-end
if sascore > 8.:
sascore = 8. + math.log(sascore + 1. - 9.)
if sascore > 10.:
sascore = 10.0
elif sascore < 1.:
sascore = 1.0
return sascore
def processMols(mols):
"""
tbd
"""
print('smiles\tName\tsa_score')
for m in mols:
if m is None:
continue
s = calculateScore(m)
smiles = Chem.MolToSmiles(m)
print(smiles + "\t" + m.GetProp('_Name') + "\t%3f" % s)
if __name__ == '__main__':
import sys
import time
t1 = time.time()
readFragmentScores("fpscores")
t2 = time.time()
suppl = Chem.SmilesMolSupplier(sys.argv[1])
t3 = time.time()
processMols(suppl)
t4 = time.time()
print('Reading took %.2f seconds. Calculating took %.2f seconds' % (
(t2 - t1), (t4 - t3)),
file=sys.stderr)
#
# Copyright (c) 2013, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
| 32.529412 | 114 | 0.673297 |
625933305c35003ef9ab90cfbca7b62852b671ff | 4,163 | py | Python | plugins/Craw_wanfang/Craw_wanfang.py | ljh2057/Craw | b9524c3e795ef4addc6177e414a73dd8231ac788 | [
"Apache-2.0"
] | null | null | null | plugins/Craw_wanfang/Craw_wanfang.py | ljh2057/Craw | b9524c3e795ef4addc6177e414a73dd8231ac788 | [
"Apache-2.0"
] | 4 | 2021-03-31T20:01:11.000Z | 2022-03-12T00:47:49.000Z | plugins/Craw_wanfang/Craw_wanfang.py | ljh2057/Craw | b9524c3e795ef4addc6177e414a73dd8231ac788 | [
"Apache-2.0"
] | 1 | 2020-09-28T07:19:00.000Z | 2020-09-28T07:19:00.000Z | from plugins.BasePlugin.BasePlugin import BasePlugin
from plugins.Craw_wanfang import Getxml
from plugins.Craw_wanfang import main
import os
from PyQt5.QtCore import pyqtSignal
import shutil
class Craw_wanfang(BasePlugin):
trigger = pyqtSignal()
CrawProcess=pyqtSignal(str)
def __init__(self, state=None, text=None,args={},filepath=None,propath=None):
super().__init__(state)
self.text=text
self.name=None
self.args =args
self.describe=None
self.configPath=None
self.filepath=filepath
self.propath=propath
self.p_keys=['name','describe','configPath','state','text','filepath','propath']
self.parameters={}.fromkeys(self.p_keys)
self.loadFromConfig()
def loadFromConfig(self):
'''遍历找到xml配置信息文件path'''
configfilePath=os.getcwd()+'/'+'plugins/'+self.__class__.__name__+'/'+self.__class__.__name__+'.xml'
# configfilePath=os.getcwd()+'/'+self.__class__.__name__+'/'+'.xml'
# print(configfilePath)
self.getxml =Getxml.getXml(configfilePath)
configDate = self.getxml.getfull()
self.configPath=configfilePath
'''
加载配置信息
获取爬虫name,爬虫描述、保存文件路径和属性文件路径
'''
self.name=configDate['name']
##self.type=configDate['type']
self.describe=configDate['describe']
if self.filepath==None:
self.filepath = configDate['filepath']
if self.propath==None:
self.propath = configDate['filepath']
# if self.propath==None:
# self.propath = configDate['propertypath']
def run(self):
self.args["flag"] = True
self.args["count"] = 0
self.args["state"] = '正在爬取'
self.args["text"] = self.text
self.args["CrawProcess"]=self.CrawProcess
##self.args["type"]=self.type
getxml = Getxml.getXml(self.configPath)
user_input = getxml.getData()
count = int(getxml.getCount()) * 20
search = main.SearchTools(count)
try:
search.search_reference(user_input, self.args)
except Exception as e:
print(e)
self.args["flag"] = False
self.args["count"] = 0
self.trigger.emit()
def stop(self):
self.args["flag"] = False
# self.args["state"] = '爬取结束'
self.trigger.emit()
'''保存数据,判断Craw_cnki_ori是否存在当前路径,不存在时创建'''
def saveData(self):
if os.path.exists(self.filepath):
if self.filepath.find('Craw_wanfang_ori') > 0:
savepath = self.filepath
else:
if 'Craw_wanfang_ori' in os.listdir(self.filepath):
savepath=self.filepath+'Craw_wanfang_ori' if self.filepath[-1] == '/' else self.filepath + '/Craw_wanfang_ori'
else:
os.makedirs(self.filepath+'Craw_wanfang_ori' if self.filepath[-1] == '/' else self.filepath + '/Craw_wanfang_ori')
savepath=self.filepath+'Craw_wanfang_ori' if self.filepath[-1] == '/' else self.filepath + '/Craw_wanfang_ori'
count = self.getxml.getCount()
search = main.SearchTools(count)
propath=os.path.abspath(os.path.join(savepath, ".."))
# print(propath+'/Craw_cnki文献属性.xls')
print(propath)
if os.path.exists(propath+'/Craw_wanfang文献属性.xls'):
os.remove(propath+'/Craw_wanfang文献属性.xls')
# if os.path.exists(propath+'/data'):
try:
shutil.move('data/PDFs/Craw_wanfang文献属性.xls',propath)
search.move_file('data/PDFs',savepath,self.args)
print("文件已存到%s目录下"%savepath)
except:
pass
else:
print("文件目录不存在")
def getParameters(self):
self.parameters['name']=self.name
self.parameters['describe']=self.describe
self.parameters['configPath']=self.configPath
self.parameters['state']=self.state
self.parameters['text']=self.text
self.parameters['filepath']=self.filepath
self.parameters['propath']=self.propath
return self.parameters
| 38.906542 | 134 | 0.600048 |
cdd64600c8715567baad21459dfd8aa4f34cf4b7 | 1,662 | py | Python | setup.py | aescwork/waxtablet | 60165d42fcd3192ec1cf26bff14f99e8920c96ba | [
"BSD-2-Clause"
] | null | null | null | setup.py | aescwork/waxtablet | 60165d42fcd3192ec1cf26bff14f99e8920c96ba | [
"BSD-2-Clause"
] | null | null | null | setup.py | aescwork/waxtablet | 60165d42fcd3192ec1cf26bff14f99e8920c96ba | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Always prefer setuptools over distutils
import os
import sys
import errno
from setuptools import setup, find_packages
from setuptools.command.develop import develop
from setuptools.command.install import install
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='waxtablet',
version='1.0',
description='A command-line journaling application which stores text entries in a database and displays them in html.',
long_description=long_description,
url='https://github.com/aescwork/waxtablet',
author='aescwork',
author_email='aescwork@protonmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='journal diary database text log organizer organization',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
entry_points={
'console_scripts': [
'waxtablet=waxtablet.main:main',
],
},
)
| 27.7 | 123 | 0.648014 |
cf7e800c7c1a59352899d5b0f4d9c283b3e91edb | 1,010 | py | Python | launches.py | zweed4u/launchesEND | fd016478c6f757e323009611d6b83ea42fbf8116 | [
"MIT"
] | 5 | 2017-12-05T04:00:22.000Z | 2020-12-16T20:44:46.000Z | launches.py | zweed4u/launchesEND | fd016478c6f757e323009611d6b83ea42fbf8116 | [
"MIT"
] | null | null | null | launches.py | zweed4u/launchesEND | fd016478c6f757e323009611d6b83ea42fbf8116 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#Hmmm... http://www.endclothing.com/media/us_sitemap.xml
import urllib2, zlib, json
url='https://launches.endclothing.com/api/products'
req = urllib2.Request(url)
req.add_header(':host','launches.endclothing.com');req.add_header(':method','GET');req.add_header(':path','/api/products');req.add_header(':scheme','https');req.add_header(':version','HTTP/1.1');req.add_header('accept','application/json, text/plain, */*');req.add_header('accept-encoding','gzip,deflate');req.add_header('accept-language','en-US,en;q=0.8');req.add_header('cache-control','max-age=0');req.add_header('cookie','__/');req.add_header('user-agent','Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/37.0.2062.120 Chrome/37.0.2062.120 Safari/537.36');
resp = urllib2.urlopen(req).read()
resp = zlib.decompress(bytes(bytearray(resp)),15+32)
data = json.loads(resp)
for product in data:
for attrib in product.keys():
print str(attrib)+' :: '+ str(product[attrib])
print '\n'
| 67.333333 | 598 | 0.723762 |
01db284db81075adc6de17bebd68fd06f77b7177 | 2,927 | py | Python | setup.py | LLAW3301/docassemble-LLAW33012021S1WWC1 | e7bf8de7daebbe498cd2256cd9ae1c56bff1cec1 | [
"MIT"
] | 2 | 2021-04-09T02:35:37.000Z | 2021-04-22T06:16:04.000Z | setup.py | LLAW3301/docassemble-LLAW33012021S1WWC1 | e7bf8de7daebbe498cd2256cd9ae1c56bff1cec1 | [
"MIT"
] | null | null | null | setup.py | LLAW3301/docassemble-LLAW33012021S1WWC1 | e7bf8de7daebbe498cd2256cd9ae1c56bff1cec1 | [
"MIT"
] | null | null | null | import os
import sys
from setuptools import setup, find_packages
from fnmatch import fnmatchcase
from distutils.util import convert_path
standard_exclude = ('*.pyc', '*~', '.*', '*.bak', '*.swp*')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info')
def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories):
out = {}
stack = [(convert_path(where), '', package)]
while stack:
where, prefix, package = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package))
else:
stack.append((fn, prefix + name + '/', package))
else:
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
setup(name='docassemble.LLAW33012021S1WWC1',
version='0.0.1',
description=('A docassemble extension.'),
long_description='# docassemble.LLAW33012021S1WWC1\r\n\r\n---\r\n# WWC Money Claim Form Guide\r\nThis application will assist self-represented workers to be able to calculate the difference between the amount they have been paid and the amount they are entitled to according to their award. The application will consolidate the relevant information needed to complete the SAET A38 Money Claim Form.\r\n\r\n---\r\n## Author\r\nTina Fotopoulos, Thao Luu, Sophie Potts, Tahlia Forster, Angus Lau, and\r\nMark Ferraretto, mark.ferraretto@flinders.edu.au\r\n\r\n',
long_description_content_type='text/markdown',
author='Mark Ferraretto',
author_email='mark.ferraretto@flinders.edu.au',
license='The MIT License (MIT)',
url='https://docassemble.org',
packages=find_packages(),
namespace_packages=['docassemble'],
install_requires=[],
zip_safe=False,
package_data=find_package_data(where='docassemble/LLAW33012021S1WWC1/', package='docassemble.LLAW33012021S1WWC1'),
)
| 47.983607 | 565 | 0.584899 |
9c0d558df4ede05d5e17c528e4583565d938b1e1 | 2,225 | py | Python | cerebralcortex/data_migrator/schema_builder.py | MD2Korg/CerebralCortex-2.0-legacy | 5a07d39d51f843d8f19d058dfbb8eb83609f1f90 | [
"BSD-2-Clause"
] | null | null | null | cerebralcortex/data_migrator/schema_builder.py | MD2Korg/CerebralCortex-2.0-legacy | 5a07d39d51f843d8f19d058dfbb8eb83609f1f90 | [
"BSD-2-Clause"
] | null | null | null | cerebralcortex/data_migrator/schema_builder.py | MD2Korg/CerebralCortex-2.0-legacy | 5a07d39d51f843d8f19d058dfbb8eb83609f1f90 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2017, MD2K Center of Excellence
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from cerebralcortex.kernel.schema_builder.execution_context import execution_context
def get_execution_context(pm_algo_name: str, old_schema: dict) -> dict:
"""
:param pm_algo_name:
:param old_schema:
:return:
"""
old_schema = {"old_schema": old_schema}
processing_module = execution_context().processing_module_schema(pm_algo_name, "blank", "blank", "blank")
algorithm = execution_context().algorithm_schema(pm_algo_name, "blank", "blank", "blank", "blank")
algo_and_old_schema = {**algorithm, **old_schema}
ec = execution_context().get_execution_context(processing_module, algo_and_old_schema)
return ec
def get_data_descriptor(old_schema: dict) -> dict:
"""
:param old_schema:
:return:
"""
return old_schema["datadescriptor"]
def get_annotations() -> dict:
"""
:return:
"""
return []
| 35.887097 | 109 | 0.751461 |
f9cc8eef2497793ef67bed02d4962f00de56e598 | 30,217 | py | Python | rasa/utils/train_utils.py | deepmipt/rasa | f0cc0ff6e515df2249998ff4e788009cccaecc02 | [
"Apache-2.0"
] | null | null | null | rasa/utils/train_utils.py | deepmipt/rasa | f0cc0ff6e515df2249998ff4e788009cccaecc02 | [
"Apache-2.0"
] | null | null | null | rasa/utils/train_utils.py | deepmipt/rasa | f0cc0ff6e515df2249998ff4e788009cccaecc02 | [
"Apache-2.0"
] | null | null | null | from collections import namedtuple
import logging
import numpy as np
from tqdm import tqdm
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensor2tensor.models.transformer import (
transformer_base,
transformer_prepare_encoder,
transformer_encoder,
)
from tensor2tensor.layers.common_attention import large_compatible_negative
from rasa.utils.common import is_logging_disabled
import typing
from typing import List, Optional, Text, Dict, Tuple, Union, Generator, Callable, Any
if typing.TYPE_CHECKING:
from tensor2tensor.utils.hparam import HParams
# avoid warning println on contrib import - remove for tf 2
tf.contrib._warning = None
logger = logging.getLogger(__name__)
# a fix for tf 1.14 double logging suggested in
# https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493
# from
# https://github.com/dhalperi/pybatfish/blob/f8ddd3938148f9a5d9c14c371a099802c564fac3/pybatfish/client/capirca.py#L33-L50
try:
# Capirca uses Google's abseil-py library, which uses a Google-specific
# wrapper for logging. That wrapper will write a warning to sys.stderr if
# the Google command-line flags library has not been initialized.
#
# https://github.com/abseil/abseil-py/blob/pypi-v0.7.1/absl/logging/__init__.py#L819-L825
#
# This is not right behavior for Python code that is invoked outside of a
# Google-authored main program. Use knowledge of abseil-py to disable that
# warning; ignore and continue if something goes wrong.
import absl.logging
# https://github.com/abseil/abseil-py/issues/99
logging.root.removeHandler(absl.logging._absl_handler)
# https://github.com/abseil/abseil-py/issues/102
absl.logging._warn_preinit_stderr = False
except Exception:
pass
# namedtuple for all tf session related data
SessionData = namedtuple("SessionData", ("X", "Y", "label_ids"))
def load_tf_config(config: Dict[Text, Any]) -> Optional[tf.ConfigProto]:
"""Prepare tf.ConfigProto for training"""
if config.get("tf_config") is not None:
return tf.ConfigProto(**config.pop("tf_config"))
else:
return None
# noinspection PyPep8Naming
def train_val_split(
session_data: "SessionData", evaluate_on_num_examples: int, random_seed: int
) -> Tuple["SessionData", "SessionData"]:
"""Create random hold out validation set using stratified split."""
label_counts = dict(
zip(*np.unique(session_data.label_ids, return_counts=True, axis=0))
)
if evaluate_on_num_examples >= len(session_data.X) - len(label_counts):
raise ValueError(
"Validation set of {} is too large. Remaining train set "
"should be at least equal to number of classes {}."
"".format(evaluate_on_num_examples, len(label_counts))
)
elif evaluate_on_num_examples < len(label_counts):
raise ValueError(
"Validation set of {} is too small. It should be "
"at least equal to number of classes {}."
"".format(evaluate_on_num_examples, len(label_counts))
)
counts = np.array([label_counts[label] for label in session_data.label_ids])
multi_X = session_data.X[counts > 1]
multi_Y = session_data.Y[counts > 1]
multi_label_ids = session_data.label_ids[counts > 1]
solo_X = session_data.X[counts == 1]
solo_Y = session_data.Y[counts == 1]
solo_label_ids = session_data.label_ids[counts == 1]
(X_train, X_val, Y_train, Y_val, label_ids_train, label_ids_val) = train_test_split(
multi_X,
multi_Y,
multi_label_ids,
test_size=evaluate_on_num_examples,
random_state=random_seed,
stratify=multi_label_ids,
)
X_train = np.concatenate([X_train, solo_X])
Y_train = np.concatenate([Y_train, solo_Y])
label_ids_train = np.concatenate([label_ids_train, solo_label_ids])
return (
SessionData(X=X_train, Y=Y_train, label_ids=label_ids_train),
SessionData(X=X_val, Y=Y_val, label_ids=label_ids_val),
)
def shuffle_session_data(session_data: "SessionData") -> "SessionData":
"""Shuffle session data."""
ids = np.random.permutation(len(session_data.X))
return SessionData(
X=session_data.X[ids],
Y=session_data.Y[ids],
label_ids=session_data.label_ids[ids],
)
def split_session_data_by_label(
session_data: "SessionData", unique_label_ids: "np.ndarray"
) -> List["SessionData"]:
"""Reorganize session data into a list of session data with the same labels."""
label_data = []
for label_id in unique_label_ids:
label_data.append(
SessionData(
X=session_data.X[session_data.label_ids == label_id],
Y=session_data.Y[session_data.label_ids == label_id],
label_ids=session_data.label_ids[session_data.label_ids == label_id],
)
)
return label_data
# noinspection PyPep8Naming
def balance_session_data(
session_data: "SessionData", batch_size: int, shuffle: bool
) -> "SessionData":
"""Mix session data to account for class imbalance.
This batching strategy puts rare classes approximately in every other batch,
by repeating them. Mimics stratified batching, but also takes into account
that more populated classes should appear more often.
"""
num_examples = len(session_data.X)
unique_label_ids, counts_label_ids = np.unique(
session_data.label_ids, return_counts=True, axis=0
)
num_label_ids = len(unique_label_ids)
# need to call every time, so that the data is shuffled inside each class
label_data = split_session_data_by_label(session_data, unique_label_ids)
data_idx = [0] * num_label_ids
num_data_cycles = [0] * num_label_ids
skipped = [False] * num_label_ids
new_X = []
new_Y = []
new_label_ids = []
while min(num_data_cycles) == 0:
if shuffle:
indices_of_labels = np.random.permutation(num_label_ids)
else:
indices_of_labels = range(num_label_ids)
for index in indices_of_labels:
if num_data_cycles[index] > 0 and not skipped[index]:
skipped[index] = True
continue
else:
skipped[index] = False
index_batch_size = (
int(counts_label_ids[index] / num_examples * batch_size) + 1
)
new_X.append(
label_data[index].X[
data_idx[index] : data_idx[index] + index_batch_size
]
)
new_Y.append(
label_data[index].Y[
data_idx[index] : data_idx[index] + index_batch_size
]
)
new_label_ids.append(
label_data[index].label_ids[
data_idx[index] : data_idx[index] + index_batch_size
]
)
data_idx[index] += index_batch_size
if data_idx[index] >= counts_label_ids[index]:
num_data_cycles[index] += 1
data_idx[index] = 0
if min(num_data_cycles) > 0:
break
return SessionData(
X=np.concatenate(new_X),
Y=np.concatenate(new_Y),
label_ids=np.concatenate(new_label_ids),
)
def gen_batch(
session_data: "SessionData",
batch_size: int,
batch_strategy: Text = "sequence",
shuffle: bool = False,
) -> Generator[Tuple["np.ndarray", "np.ndarray"], None, None]:
"""Generate batches."""
if shuffle:
session_data = shuffle_session_data(session_data)
if batch_strategy == "balanced":
session_data = balance_session_data(session_data, batch_size, shuffle)
num_batches = session_data.X.shape[0] // batch_size + int(
session_data.X.shape[0] % batch_size > 0
)
for batch_num in range(num_batches):
batch_x = session_data.X[batch_num * batch_size : (batch_num + 1) * batch_size]
batch_y = session_data.Y[batch_num * batch_size : (batch_num + 1) * batch_size]
yield batch_x, batch_y
# noinspection PyPep8Naming
def create_tf_dataset(
session_data: "SessionData",
batch_size: Union["tf.Tensor", int],
batch_strategy: Text = "sequence",
shuffle: bool = False,
) -> "tf.data.Dataset":
"""Create tf dataset."""
# set batch and sequence length to None
if session_data.X[0].ndim == 1:
shape_X = (None, session_data.X[0].shape[-1])
else:
shape_X = (None, None, session_data.X[0].shape[-1])
if session_data.Y[0].ndim == 1:
shape_Y = (None, session_data.Y[0].shape[-1])
else:
shape_Y = (None, None, session_data.Y[0].shape[-1])
return tf.data.Dataset.from_generator(
lambda batch_size_: gen_batch(
session_data, batch_size_, batch_strategy, shuffle
),
output_types=(tf.float32, tf.float32),
output_shapes=(shape_X, shape_Y),
args=([batch_size]),
)
def create_iterator_init_datasets(
session_data: "SessionData",
eval_session_data: "SessionData",
batch_size: Union["tf.Tensor", int],
batch_strategy: Text,
) -> Tuple["tf.data.Iterator", "tf.Operation", "tf.Operation"]:
"""Create iterator and init datasets."""
train_dataset = create_tf_dataset(
session_data, batch_size, batch_strategy=batch_strategy, shuffle=True
)
iterator = tf.data.Iterator.from_structure(
train_dataset.output_types,
train_dataset.output_shapes,
output_classes=train_dataset.output_classes,
)
train_init_op = iterator.make_initializer(train_dataset)
if eval_session_data is not None:
eval_init_op = iterator.make_initializer(
create_tf_dataset(eval_session_data, batch_size)
)
else:
eval_init_op = None
return iterator, train_init_op, eval_init_op
# noinspection PyPep8Naming
def create_tf_fnn(
x_in: "tf.Tensor",
layer_sizes: List[int],
droprate: float,
C2: float,
is_training: "tf.Tensor",
layer_name_suffix: Text,
activation: Optional[Callable] = tf.nn.relu,
use_bias: bool = True,
kernel_initializer: Optional["tf.keras.initializers.Initializer"] = None,
) -> "tf.Tensor":
"""Create nn with hidden layers and name suffix."""
reg = tf.contrib.layers.l2_regularizer(C2)
x = tf.nn.relu(x_in)
for i, layer_size in enumerate(layer_sizes):
x = tf.layers.dense(
inputs=x,
units=layer_size,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
kernel_regularizer=reg,
name="hidden_layer_{}_{}".format(layer_name_suffix, i),
reuse=tf.AUTO_REUSE,
)
x = tf.layers.dropout(x, rate=droprate, training=is_training)
return x
def tf_normalize_if_cosine(x: "tf.Tensor", similarity_type: Text) -> "tf.Tensor":
"""Normalize embedding if similarity type is cosine."""
if similarity_type == "cosine":
return tf.nn.l2_normalize(x, -1)
elif similarity_type == "inner":
return x
else:
raise ValueError(
"Wrong similarity type '{}', "
"should be 'cosine' or 'inner'"
"".format(similarity_type)
)
# noinspection PyPep8Naming
def create_tf_embed(
x: "tf.Tensor",
embed_dim: int,
C2: float,
similarity_type: Text,
layer_name_suffix: Text,
) -> "tf.Tensor":
"""Create dense embedding layer with a name."""
reg = tf.contrib.layers.l2_regularizer(C2)
embed_x = tf.layers.dense(
inputs=x,
units=embed_dim,
activation=None,
kernel_regularizer=reg,
name="embed_layer_{}".format(layer_name_suffix),
reuse=tf.AUTO_REUSE,
)
# normalize embedding vectors for cosine similarity
return tf_normalize_if_cosine(embed_x, similarity_type)
def create_t2t_hparams(
num_transformer_layers: int,
transformer_size: int,
num_heads: int,
droprate: float,
pos_encoding: Text,
max_seq_length: int,
is_training: "tf.Tensor",
) -> "HParams":
"""Create parameters for t2t transformer."""
hparams = transformer_base()
hparams.num_hidden_layers = num_transformer_layers
hparams.hidden_size = transformer_size
# it seems to be factor of 4 for transformer architectures in t2t
hparams.filter_size = hparams.hidden_size * 4
hparams.num_heads = num_heads
hparams.relu_dropout = droprate
hparams.pos = pos_encoding
hparams.max_length = max_seq_length
hparams.unidirectional_encoder = True
hparams.self_attention_type = "dot_product_relative_v2"
hparams.max_relative_position = 5
hparams.add_relative_to_values = True
# When not in training mode, set all forms of dropout to zero.
for key, value in hparams.values().items():
if key.endswith("dropout") or key == "label_smoothing":
setattr(hparams, key, value * tf.cast(is_training, tf.float32))
return hparams
# noinspection PyUnresolvedReferences
# noinspection PyPep8Naming
def create_t2t_transformer_encoder(
x_in: "tf.Tensor",
mask: "tf.Tensor",
attention_weights: Dict[Text, "tf.Tensor"],
hparams: "HParams",
C2: float,
is_training: "tf.Tensor",
) -> "tf.Tensor":
"""Create t2t transformer encoder."""
with tf.variable_scope("transformer", reuse=tf.AUTO_REUSE):
x = create_tf_fnn(
x_in,
[hparams.hidden_size],
hparams.layer_prepostprocess_dropout,
C2,
is_training,
layer_name_suffix="pre_embed",
activation=None,
use_bias=False,
kernel_initializer=tf.random_normal_initializer(
0.0, hparams.hidden_size ** -0.5
),
)
if hparams.multiply_embedding_mode == "sqrt_depth":
x *= hparams.hidden_size ** 0.5
x *= tf.expand_dims(mask, -1)
(
x,
self_attention_bias,
encoder_decoder_attention_bias,
) = transformer_prepare_encoder(x, None, hparams)
x *= tf.expand_dims(mask, -1)
x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout)
attn_bias_for_padding = None
# Otherwise the encoder will just use encoder_self_attention_bias.
if hparams.unidirectional_encoder:
attn_bias_for_padding = encoder_decoder_attention_bias
x = transformer_encoder(
x,
self_attention_bias,
hparams,
nonpadding=mask,
save_weights_to=attention_weights,
attn_bias_for_padding=attn_bias_for_padding,
)
x *= tf.expand_dims(mask, -1)
return tf.nn.dropout(tf.nn.relu(x), 1.0 - hparams.layer_prepostprocess_dropout)
def _tf_make_flat(x: "tf.Tensor") -> "tf.Tensor":
"""Make tensor 2D."""
return tf.reshape(x, (-1, x.shape[-1]))
def _tf_sample_neg(
batch_size: "tf.Tensor", all_bs: "tf.Tensor", neg_ids: "tf.Tensor"
) -> "tf.Tensor":
"""Sample negative examples for given indices"""
tiled_all_bs = tf.tile(tf.expand_dims(all_bs, 0), (batch_size, 1, 1))
return tf.batch_gather(tiled_all_bs, neg_ids)
def _tf_calc_iou_mask(
pos_b: "tf.Tensor", all_bs: "tf.Tensor", neg_ids: "tf.Tensor"
) -> "tf.Tensor":
"""Calculate IOU mask for given indices"""
pos_b_in_flat = tf.expand_dims(pos_b, -2)
neg_b_in_flat = _tf_sample_neg(tf.shape(pos_b)[0], all_bs, neg_ids)
intersection_b_in_flat = tf.minimum(neg_b_in_flat, pos_b_in_flat)
union_b_in_flat = tf.maximum(neg_b_in_flat, pos_b_in_flat)
iou = tf.reduce_sum(intersection_b_in_flat, -1) / tf.reduce_sum(union_b_in_flat, -1)
return 1.0 - tf.nn.relu(tf.sign(1.0 - iou))
def _tf_get_negs(
all_embed: "tf.Tensor", all_raw: "tf.Tensor", raw_pos: "tf.Tensor", num_neg: int
) -> Tuple["tf.Tensor", "tf.Tensor"]:
"""Get negative examples from given tensor."""
if len(raw_pos.shape) == 3:
batch_size = tf.shape(raw_pos)[0]
seq_length = tf.shape(raw_pos)[1]
else: # len(raw_pos.shape) == 2
batch_size = tf.shape(raw_pos)[0]
seq_length = 1
raw_flat = _tf_make_flat(raw_pos)
total_candidates = tf.shape(all_embed)[0]
all_indices = tf.tile(
tf.expand_dims(tf.range(0, total_candidates, 1), 0),
(batch_size * seq_length, 1),
)
shuffled_indices = tf.transpose(
tf.random.shuffle(tf.transpose(all_indices, (1, 0))), (1, 0)
)
neg_ids = shuffled_indices[:, :num_neg]
bad_negs = _tf_calc_iou_mask(raw_flat, all_raw, neg_ids)
if len(raw_pos.shape) == 3:
bad_negs = tf.reshape(bad_negs, (batch_size, seq_length, -1))
neg_embed = _tf_sample_neg(batch_size * seq_length, all_embed, neg_ids)
if len(raw_pos.shape) == 3:
neg_embed = tf.reshape(
neg_embed, (batch_size, seq_length, -1, all_embed.shape[-1])
)
return neg_embed, bad_negs
def sample_negatives(
a_embed: "tf.Tensor",
b_embed: "tf.Tensor",
b_raw: "tf.Tensor",
all_b_embed: "tf.Tensor",
all_b_raw: "tf.Tensor",
num_neg: int,
) -> Tuple[
"tf.Tensor", "tf.Tensor", "tf.Tensor", "tf.Tensor", "tf.Tensor", "tf.Tensor"
]:
"""Sample negative examples."""
neg_dial_embed, dial_bad_negs = _tf_get_negs(
_tf_make_flat(a_embed), _tf_make_flat(b_raw), b_raw, num_neg
)
neg_bot_embed, bot_bad_negs = _tf_get_negs(all_b_embed, all_b_raw, b_raw, num_neg)
return (
tf.expand_dims(a_embed, -2),
tf.expand_dims(b_embed, -2),
neg_dial_embed,
neg_bot_embed,
dial_bad_negs,
bot_bad_negs,
)
def tf_raw_sim(
a: "tf.Tensor", b: "tf.Tensor", mask: Optional["tf.Tensor"]
) -> "tf.Tensor":
"""Calculate similarity between given tensors."""
sim = tf.reduce_sum(a * b, -1)
if mask is not None:
sim *= tf.expand_dims(mask, 2)
return sim
def tf_sim(
pos_dial_embed: "tf.Tensor",
pos_bot_embed: "tf.Tensor",
neg_dial_embed: "tf.Tensor",
neg_bot_embed: "tf.Tensor",
dial_bad_negs: "tf.Tensor",
bot_bad_negs: "tf.Tensor",
mask: Optional["tf.Tensor"],
) -> Tuple["tf.Tensor", "tf.Tensor", "tf.Tensor", "tf.Tensor", "tf.Tensor"]:
"""Define similarity."""
# calculate similarity with several
# embedded actions for the loss
neg_inf = large_compatible_negative(pos_dial_embed.dtype)
sim_pos = tf_raw_sim(pos_dial_embed, pos_bot_embed, mask)
sim_neg = tf_raw_sim(pos_dial_embed, neg_bot_embed, mask) + neg_inf * bot_bad_negs
sim_neg_bot_bot = (
tf_raw_sim(pos_bot_embed, neg_bot_embed, mask) + neg_inf * bot_bad_negs
)
sim_neg_dial_dial = (
tf_raw_sim(pos_dial_embed, neg_dial_embed, mask) + neg_inf * dial_bad_negs
)
sim_neg_bot_dial = (
tf_raw_sim(pos_bot_embed, neg_dial_embed, mask) + neg_inf * dial_bad_negs
)
# output similarities between user input and bot actions
# and similarities between bot actions and similarities between user inputs
return sim_pos, sim_neg, sim_neg_bot_bot, sim_neg_dial_dial, sim_neg_bot_dial
def tf_calc_accuracy(sim_pos: "tf.Tensor", sim_neg: "tf.Tensor") -> "tf.Tensor":
"""Calculate accuracy"""
max_all_sim = tf.reduce_max(tf.concat([sim_pos, sim_neg], -1), -1)
return tf.reduce_mean(
tf.cast(tf.math.equal(max_all_sim, tf.squeeze(sim_pos, -1)), tf.float32)
)
# noinspection PyPep8Naming
def tf_loss_margin(
sim_pos: "tf.Tensor",
sim_neg: "tf.Tensor",
sim_neg_bot_bot: "tf.Tensor",
sim_neg_dial_dial: "tf.Tensor",
sim_neg_bot_dial: "tf.Tensor",
mask: Optional["tf.Tensor"],
mu_pos: float,
mu_neg: float,
use_max_sim_neg: bool,
C_emb: float,
) -> "tf.Tensor":
"""Define max margin loss."""
# loss for maximizing similarity with correct action
loss = tf.maximum(0.0, mu_pos - tf.squeeze(sim_pos, -1))
# loss for minimizing similarity with `num_neg` incorrect actions
if use_max_sim_neg:
# minimize only maximum similarity over incorrect actions
max_sim_neg = tf.reduce_max(sim_neg, -1)
loss += tf.maximum(0.0, mu_neg + max_sim_neg)
else:
# minimize all similarities with incorrect actions
max_margin = tf.maximum(0.0, mu_neg + sim_neg)
loss += tf.reduce_sum(max_margin, -1)
# penalize max similarity between pos bot and neg bot embeddings
max_sim_neg_bot = tf.maximum(0.0, tf.reduce_max(sim_neg_bot_bot, -1))
loss += max_sim_neg_bot * C_emb
# penalize max similarity between pos dial and neg dial embeddings
max_sim_neg_dial = tf.maximum(0.0, tf.reduce_max(sim_neg_dial_dial, -1))
loss += max_sim_neg_dial * C_emb
# penalize max similarity between pos bot and neg dial embeddings
max_sim_neg_dial = tf.maximum(0.0, tf.reduce_max(sim_neg_bot_dial, -1))
loss += max_sim_neg_dial * C_emb
if mask is not None:
# mask loss for different length sequences
loss *= mask
# average the loss over sequence length
loss = tf.reduce_sum(loss, -1) / tf.reduce_sum(mask, 1)
# average the loss over the batch
loss = tf.reduce_mean(loss)
# add regularization losses
loss += tf.losses.get_regularization_loss()
return loss
def tf_loss_softmax(
sim_pos: "tf.Tensor",
sim_neg: "tf.Tensor",
sim_neg_bot_bot: "tf.Tensor",
sim_neg_dial_dial: "tf.Tensor",
sim_neg_bot_dial: "tf.Tensor",
mask: Optional["tf.Tensor"],
scale_loss: bool,
) -> "tf.Tensor":
"""Define softmax loss."""
logits = tf.concat(
[sim_pos, sim_neg, sim_neg_bot_bot, sim_neg_dial_dial, sim_neg_bot_dial], -1
)
# create labels for softmax
if len(logits.shape) == 3:
pos_labels = tf.ones_like(logits[:, :, :1])
neg_labels = tf.zeros_like(logits[:, :, 1:])
else: # len(logits.shape) == 2
pos_labels = tf.ones_like(logits[:, :1])
neg_labels = tf.zeros_like(logits[:, 1:])
labels = tf.concat([pos_labels, neg_labels], -1)
if mask is None:
mask = 1.0
if scale_loss:
# mask loss by prediction confidence
pred = tf.nn.softmax(logits)
if len(pred.shape) == 3:
pos_pred = pred[:, :, 0]
else: # len(pred.shape) == 2
pos_pred = pred[:, 0]
mask *= tf.pow((1 - pos_pred) / 0.5, 4)
loss = tf.losses.softmax_cross_entropy(labels, logits, mask)
# add regularization losses
loss += tf.losses.get_regularization_loss()
return loss
# noinspection PyPep8Naming
def choose_loss(
sim_pos: "tf.Tensor",
sim_neg: "tf.Tensor",
sim_neg_bot_bot: "tf.Tensor",
sim_neg_dial_dial: "tf.Tensor",
sim_neg_bot_dial: "tf.Tensor",
mask: Optional["tf.Tensor"],
loss_type: Text,
mu_pos: float,
mu_neg: float,
use_max_sim_neg: bool,
C_emb: float,
scale_loss: bool,
) -> "tf.Tensor":
"""Use loss depending on given option."""
if loss_type == "margin":
return tf_loss_margin(
sim_pos,
sim_neg,
sim_neg_bot_bot,
sim_neg_dial_dial,
sim_neg_bot_dial,
mask,
mu_pos,
mu_neg,
use_max_sim_neg,
C_emb,
)
elif loss_type == "softmax":
return tf_loss_softmax(
sim_pos,
sim_neg,
sim_neg_bot_bot,
sim_neg_dial_dial,
sim_neg_bot_dial,
mask,
scale_loss,
)
else:
raise ValueError(
"Wrong loss type '{}', "
"should be 'margin' or 'softmax'"
"".format(loss_type)
)
# noinspection PyPep8Naming
def calculate_loss_acc(
a_embed: "tf.Tensor",
b_embed: "tf.Tensor",
b_raw: "tf.Tensor",
all_b_embed: "tf.Tensor",
all_b_raw: "tf.Tensor",
num_neg: int,
mask: Optional["tf.Tensor"],
loss_type: Text,
mu_pos: float,
mu_neg: float,
use_max_sim_neg: bool,
C_emb: float,
scale_loss: bool,
) -> Tuple["tf.Tensor", "tf.Tensor"]:
"""Calculate loss and accuracy."""
(
pos_dial_embed,
pos_bot_embed,
neg_dial_embed,
neg_bot_embed,
dial_bad_negs,
bot_bad_negs,
) = sample_negatives(a_embed, b_embed, b_raw, all_b_embed, all_b_raw, num_neg)
# calculate similarities
(sim_pos, sim_neg, sim_neg_bot_bot, sim_neg_dial_dial, sim_neg_bot_dial) = tf_sim(
pos_dial_embed,
pos_bot_embed,
neg_dial_embed,
neg_bot_embed,
dial_bad_negs,
bot_bad_negs,
mask,
)
acc = tf_calc_accuracy(sim_pos, sim_neg)
loss = choose_loss(
sim_pos,
sim_neg,
sim_neg_bot_bot,
sim_neg_dial_dial,
sim_neg_bot_dial,
mask,
loss_type,
mu_pos,
mu_neg,
use_max_sim_neg,
C_emb,
scale_loss,
)
return loss, acc
def confidence_from_sim(sim: "tf.Tensor", similarity_type: Text) -> "tf.Tensor":
if similarity_type == "cosine":
# clip negative values to zero
return tf.nn.relu(sim)
else:
# normalize result to [0, 1] with softmax
return tf.nn.softmax(sim)
def linearly_increasing_batch_size(
epoch: int, batch_size: Union[List[int], int], epochs: int
) -> int:
"""Linearly increase batch size with every epoch.
The idea comes from https://arxiv.org/abs/1711.00489.
"""
if not isinstance(batch_size, list):
return int(batch_size)
if epochs > 1:
return int(
batch_size[0] + epoch * (batch_size[1] - batch_size[0]) / (epochs - 1)
)
else:
return int(batch_size[0])
def output_validation_stat(
eval_init_op: "tf.Operation",
loss: "tf.Tensor",
acc: "tf.Tensor",
session: "tf.Session",
is_training: "tf.Session",
batch_size_in: "tf.Tensor",
ep_batch_size: int,
) -> Tuple[float, float]:
"""Output training statistics"""
session.run(eval_init_op, feed_dict={batch_size_in: ep_batch_size})
ep_val_loss = 0
ep_val_acc = 0
batches_per_epoch = 0
while True:
try:
batch_val_loss, batch_val_acc = session.run(
[loss, acc], feed_dict={is_training: False}
)
batches_per_epoch += 1
ep_val_loss += batch_val_loss
ep_val_acc += batch_val_acc
except tf.errors.OutOfRangeError:
break
return ep_val_loss / batches_per_epoch, ep_val_acc / batches_per_epoch
def train_tf_dataset(
train_init_op: "tf.Operation",
eval_init_op: "tf.Operation",
batch_size_in: "tf.Tensor",
loss: "tf.Tensor",
acc: "tf.Tensor",
train_op: "tf.Tensor",
session: "tf.Session",
is_training: "tf.Session",
epochs: int,
batch_size: Union[List[int], int],
evaluate_on_num_examples: int,
evaluate_every_num_epochs: int,
) -> None:
"""Train tf graph"""
session.run(tf.global_variables_initializer())
if evaluate_on_num_examples:
logger.info(
"Validation accuracy is calculated every {} epochs"
"".format(evaluate_every_num_epochs)
)
pbar = tqdm(range(epochs), desc="Epochs", disable=is_logging_disabled())
train_loss = 0
train_acc = 0
val_loss = 0
val_acc = 0
for ep in pbar:
ep_batch_size = linearly_increasing_batch_size(ep, batch_size, epochs)
session.run(train_init_op, feed_dict={batch_size_in: ep_batch_size})
ep_train_loss = 0
ep_train_acc = 0
batches_per_epoch = 0
while True:
try:
_, batch_train_loss, batch_train_acc = session.run(
[train_op, loss, acc], feed_dict={is_training: True}
)
batches_per_epoch += 1
ep_train_loss += batch_train_loss
ep_train_acc += batch_train_acc
except tf.errors.OutOfRangeError:
break
train_loss = ep_train_loss / batches_per_epoch
train_acc = ep_train_acc / batches_per_epoch
postfix_dict = {
"loss": "{:.3f}".format(train_loss),
"acc": "{:.3f}".format(train_acc),
}
if eval_init_op is not None:
if (ep + 1) % evaluate_every_num_epochs == 0 or (ep + 1) == epochs:
val_loss, val_acc = output_validation_stat(
eval_init_op,
loss,
acc,
session,
is_training,
batch_size_in,
ep_batch_size,
)
postfix_dict.update(
{
"val_loss": "{:.3f}".format(val_loss),
"val_acc": "{:.3f}".format(val_acc),
}
)
pbar.set_postfix(postfix_dict)
final_message = (
"Finished training embedding policy, "
"train loss={:.3f}, train accuracy={:.3f}"
"".format(train_loss, train_acc)
)
if eval_init_op is not None:
final_message += (
", validation loss={:.3f}, validation accuracy={:.3f}"
"".format(val_loss, val_acc)
)
logger.info(final_message)
def extract_attention(attention_weights) -> Optional["tf.Tensor"]:
"""Extract attention probabilities from t2t dict"""
attention = [
tf.expand_dims(t, 0)
for name, t in attention_weights.items()
if name.endswith("multihead_attention/dot_product_attention")
]
if attention:
return tf.concat(attention, 0)
def persist_tensor(name: Text, tensor: "tf.Tensor", graph: "tf.Graph") -> None:
"""Add tensor to collection if it is not None"""
if tensor is not None:
graph.clear_collection(name)
graph.add_to_collection(name, tensor)
def load_tensor(name: Text) -> Optional["tf.Tensor"]:
"""Load tensor or set it to None"""
tensor_list = tf.get_collection(name)
return tensor_list[0] if tensor_list else None
| 30.522222 | 121 | 0.636463 |
9f3448cb92c07aef15f432e404d6786fe54e9f29 | 127 | py | Python | Components/courses/urls.py | iamTanTan/E-Learning_Lab_Spring_2021 | e426ba982cc5044510eb1d8b80b377cb0bd5407a | [
"MIT"
] | 2 | 2021-01-29T22:35:28.000Z | 2021-05-13T23:35:54.000Z | Components/courses/urls.py | iamTanTan/E-Learning_Lab_Spring_2021 | e426ba982cc5044510eb1d8b80b377cb0bd5407a | [
"MIT"
] | 8 | 2021-03-19T11:24:23.000Z | 2022-03-12T00:57:13.000Z | Components/courses/urls.py | iamTanTan/E-Learning_Lab_Spring_2021 | e426ba982cc5044510eb1d8b80b377cb0bd5407a | [
"MIT"
] | 1 | 2021-09-11T15:00:09.000Z | 2021-09-11T15:00:09.000Z | from django.urls import path
from Components.courses import views
urlpatterns = [
path('', views.course, name='course'),
] | 21.166667 | 42 | 0.724409 |
a37c60def7c76b06f4b0e0bde125ab4c7171bce0 | 13,197 | py | Python | model-optimizer/extensions/ops/einsum.py | uikilin100/openvino | afc5191b8c75b1de4adc8cb07c6269b52882ddfe | [
"Apache-2.0"
] | 1 | 2021-03-16T17:40:26.000Z | 2021-03-16T17:40:26.000Z | model-optimizer/extensions/ops/einsum.py | uikilin100/openvino | afc5191b8c75b1de4adc8cb07c6269b52882ddfe | [
"Apache-2.0"
] | 42 | 2020-11-23T08:09:57.000Z | 2022-02-21T13:03:34.000Z | model-optimizer/extensions/ops/einsum.py | v-Golubev/openvino | 26936d1fbb025c503ee43fe74593ee9d7862ab15 | [
"Apache-2.0"
] | 4 | 2021-04-02T08:48:38.000Z | 2021-07-01T06:59:02.000Z | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import re
import numpy as np
from mo.front.common.partial_infer.utils import int64_array
from mo.graph.graph import Node, Graph
from mo.ops.op import Op
from mo.utils.broadcasting import bi_directional_shape_broadcasting
class Einsum(Op):
op = 'Einsum'
enabled = False
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': self.op,
'op': self.op,
'version': 'opset7',
'infer': self.infer,
'out_ports_count': 1,
}
super().__init__(graph, mandatory_props, attrs)
def backend_attrs(self):
return ['equation']
@staticmethod
def is_label_elsewhere(input_subscripts: list, label_to_check: str, excluded_subscript_inds: list) -> bool:
"""
Check if the given label is met in input subscripts excluding ones specified by a list of indices
excluded_subscript_inds
:param input_subscripts: input subscripts among which to check if the label is met
:param label_to_check: a label to check
:param excluded_subscript_inds: indices of input subscripts to be excluded for this check
:return: True - met, False - otherwise
"""
for ind, input_subscript in enumerate(input_subscripts):
if ind not in excluded_subscript_inds and label_to_check in input_subscript:
return True
return False
@staticmethod
def parse_equation(node_name: str, equation: str) -> (list, str):
"""
Parse Einsum equation and check that its format is correct to make sure that
all input subscripts consists of only alphabetic letters or alphabetic letters with one ellipsis.
In case of implicit mode the method recovers the right-hand part.
:param node_name: Einsum node name for which to parse an equation
:param equation: Equation to be parsed and checked
:return: A tuple of a list of input subscripts and output subscript
"""
# normalize equation by removing white-spaces
equation = equation.strip()
# split equation into the left and right hands
splitted_equation = equation.split('->')
assert len(splitted_equation) <= 2, "Einsum node {} has `equation` of incorrect format".format(node_name)
# split left-hand side of the equation and check a format of input subscripts
input_subscripts = splitted_equation[0]
input_subscripts_list = input_subscripts.split(',')
# prepare pattern to check a format of subscripts
subscript_pattern = re.compile("^[a-zA-Z]*(\\.\\.\\.){0,1}[a-zA-Z]*$")
ellipsis_pattern = re.compile("\\.\\.\\.")
is_ellipsis_met = False
for input_subscript in input_subscripts_list:
assert re.match(subscript_pattern, input_subscript) is not None, \
"Einsum node {} has `equation` with incorrect input subscript: {}".format(node_name, input_subscript)
is_ellipsis_met = is_ellipsis_met or re.search(ellipsis_pattern, input_subscript)
if len(splitted_equation) == 2:
output_subscript = splitted_equation[1]
assert re.match(subscript_pattern, output_subscript), \
"Einsum node {} has `equation` with incorrect output subscript: {}".format(node_name, output_subscript)
# if ellipsis is met, the output subscript must contain it as well
if is_ellipsis_met:
assert re.search(ellipsis_pattern, output_subscript), \
"The output subscript of Einsum node {} must contain ellipsis".format(node_name)
elif len(splitted_equation) == 1:
# recover output subscript in case implicit mode
output_subscript = ""
for ind, input_subscript in enumerate(input_subscripts_list):
labels = Einsum.extract_subscript_labels(node_name, input_subscript)
for label in labels:
if Einsum.is_label_elsewhere(input_subscripts_list, label, [ind]) is False:
output_subscript += label
output_subscript = ''.join(sorted(list(set(output_subscript) - {'.'})))
if is_ellipsis_met:
output_subscript = "..." + output_subscript
else:
assert False, "Einsum node {} equation has incorrect format. " \
"It must be in either explicit or implicit mode.".format(node_name)
return input_subscripts_list, output_subscript
@staticmethod
def normalize_equation(node_name: str, equation: str) -> str:
"""
Recover explicit mode of equation.
:param node_name: Einsum node name for which to recover explicit mode
:param equation: Einsum equation to recover explicit mode
:return: Recovered equation in explicit mode
"""
input_subscripts_list, output_subscript = Einsum.parse_equation(node_name, equation)
return ','.join(input_subscripts_list) + "->" + output_subscript
@staticmethod
def extract_subscript_labels(node_name: str, subscript: str) -> list:
"""
Extract labels for given subscript. Each label can be either alphabetic letter or ellipsis
:param node_name: Einsum node name
:param subscript: Given subscript
:return: A list of labels
"""
labels = []
len_subscript = len(subscript)
label_ind = 0
while label_ind < len_subscript:
if subscript[label_ind].isalpha():
labels.append(subscript[label_ind])
label_ind += 1
elif len_subscript - label_ind > 2 and subscript[label_ind:label_ind + 3] == "...":
labels.append("...")
label_ind += 3
else:
assert False, "Einsum node {} has `equation` with incorrect subscript: {}".format(node_name, subscript)
return labels
@staticmethod
def adjust_equation_with_NCHW_layout(node_name: str, equation: str, input_ranks: list, output_rank: int,
input_correct_layout_mask: list, output_correct_layout_mask: bool) -> (
str, list, bool):
"""
In order to satisfy NCHW layout, subscripts for tensors with rank greater than three must be adjusted by moving labels
of the last dimension to the second position in the subscript. There is an exception for such tensors when
the label is ellipsis and it covers multiple tail dimensions. The method returns equation with adjusted subscripts
to NCHW layout along with a boolean mask to indicate which subscripts are adjusted.
:param node_name: Einsum node name for which equation is adjusted
:param equation: Equation to be adjusted
:param input_ranks: a list of input ranks
:param output_rank: output rank
:return: adjusted equation, boolean mask for inputs, and boolean flag if output subscript is adjusted
"""
is_inputs_adjusted = []
input_subscripts, output_subscript = Einsum.parse_equation(node_name, equation)
num_inputs = len(input_ranks)
assert len(input_subscripts) == num_inputs, "The number of inputs must match a number " \
"of input subscripts"
assert len(input_correct_layout_mask) == num_inputs, "The number of inputs must match a number " \
"elements in input_correct_layout_mask list"
# permute labels in input subscripts and mark inputs for which inference in NCHW layout is acceptable
# in case ellipsis covering multiple dimensions in the end, the permutation is impossible
# so the corresponding input must be in the original format (NHWC)
permuted_input_subscripts = []
for input_ind in range(num_inputs):
input_subscript = input_subscripts[input_ind]
input_rank = input_ranks[input_ind]
labels = Einsum.extract_subscript_labels(node_name, input_subscript)
num_broadcasted_dims = input_rank - len(labels) + 1
if input_correct_layout_mask[input_ind]:
is_inputs_adjusted.append(True)
elif input_rank > 3 and (labels[-1] != "..." or labels[-1] == "..." and num_broadcasted_dims == 1):
is_inputs_adjusted.append(True)
labels.insert(1, labels[-1])
del labels[-1]
else:
is_inputs_adjusted.append(False)
permuted_input_subscript = ''.join(labels)
permuted_input_subscripts.append(permuted_input_subscript)
# perform the same procedure for the output subscript as for the inputs subscripts
labels = Einsum.extract_subscript_labels(node_name, output_subscript)
num_broadcasted_dims = output_rank - len(labels) + 1
if output_correct_layout_mask:
is_output_adjusted = True
elif output_rank > 3 and (labels[-1] != "..." or labels[-1] == "..." and num_broadcasted_dims == 1):
is_output_adjusted = True
labels.insert(1, labels[-1])
del labels[-1]
else:
is_output_adjusted = False
permuted_output_subscript = ''.join(labels)
# concatenate the left and right hands of the resulted equation
left_hand = ','.join(permuted_input_subscripts)
right_hand = permuted_output_subscript
permuted_equation = left_hand + "->" + right_hand
return permuted_equation, is_inputs_adjusted, is_output_adjusted
@staticmethod
def infer(node: Node):
node_name = node.soft_get('name', node.id)
connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()]
num_inputs = len(connected_in_ports)
assert node.has_valid('equation'), "Einsum node {} must contain `equation` attribute".format(node_name)
equation = node.equation
# parse the equation and extract input and output subscripts
input_subscripts, output_subscript = Einsum.parse_equation(node_name, equation)
# check that each operand has the corresponding input subscript
assert len(input_subscripts) == num_inputs, "The number of input operands of Einsum node {} " \
"must match the number of input subscripts " \
"in `equation`".format(node_name)
# check compatibility of dimension sizes with the same label and generate a dictionary of shapes for labels
label_to_shape = {}
for input_ind in range(num_inputs):
input_shape = node.in_port(input_ind).data.get_shape()
input_subscript = input_subscripts[input_ind]
labels = Einsum.extract_subscript_labels(node_name, input_subscript)
num_dims = len(input_shape)
num_labels = len(labels)
num_broadcasted_dims = num_dims - num_labels + 1
dim_ind = 0
label_ind = 0
while label_ind < num_labels and dim_ind < num_dims:
label = labels[label_ind]
if label == "...":
sub_shape = input_shape[dim_ind:dim_ind + num_broadcasted_dims]
if label in label_to_shape.keys():
common_shape = bi_directional_shape_broadcasting(sub_shape, label_to_shape[label])
assert common_shape is not None, "The dimensions labeled of ellipsis must be broadcastable " \
"for Einsum node {}".format(node_name)
label_to_shape[label] = common_shape
else:
label_to_shape[label] = sub_shape
dim_ind += num_broadcasted_dims
else:
dim_size = input_shape[dim_ind]
sub_shape = int64_array([dim_size])
assert label not in label_to_shape.keys() or np.array_equal(label_to_shape[label], sub_shape), \
"Sizes of dimensions with the same label of Einsum node {} " \
"must be compatible".format(node_name)
label_to_shape[label] = sub_shape
dim_ind += 1
label_ind += 1
# generate output shape based on the output subscript
output_shape = int64_array([])
labels = Einsum.extract_subscript_labels(node_name, output_subscript)
for label in labels:
assert label in label_to_shape.keys(), "The label in the output subscript must appear" \
" in input subscripts in equation {} " \
"of Einsum node {}".format(equation, node_name)
output_shape = np.concatenate((output_shape, label_to_shape[label]))
node.out_port(0).data.set_shape(output_shape)
| 50.563218 | 126 | 0.628249 |
c2ec43a010039adbdb8e298d074f7969c324c1ee | 655 | py | Python | tests/test_api_root.py | jacebrowning/memegen-flask | e4e67e76f061fa4e418901031b6086966376b8f3 | [
"MIT"
] | 3 | 2020-09-02T13:11:11.000Z | 2020-12-24T00:41:56.000Z | tests/test_api_root.py | jacebrowning/memegen-flask | e4e67e76f061fa4e418901031b6086966376b8f3 | [
"MIT"
] | 13 | 2020-08-30T21:38:53.000Z | 2020-09-05T03:19:17.000Z | tests/test_api_root.py | jacebrowning/memegen-flask | e4e67e76f061fa4e418901031b6086966376b8f3 | [
"MIT"
] | null | null | null | # pylint: disable=unused-variable,expression-not-assigned
from expecter import expect
from .utils import load
def describe_root():
def it_returns_links_and_metadata(client):
status, data = load(client.get("/api/"))
expect(status) == 200
expect(data) == {
'templates': "http://localhost/api/templates/",
'fonts': "http://localhost/api/fonts/",
'aliases': "http://localhost/api/aliases/",
'search': "http://localhost/api/search/",
'version': "5.6",
'changes': "https://raw.githubusercontent.com/jacebrowning/memegen-flask/main/CHANGELOG.md"
}
| 29.772727 | 103 | 0.606107 |
2250647ad63d5e249358df55258111b76ac254a8 | 23,466 | py | Python | ginga/AutoCuts.py | chyan26/ginga | e00c887d8660e0a4178f9681ca7ea7784b7ca129 | [
"BSD-3-Clause"
] | 1 | 2019-04-11T02:22:48.000Z | 2019-04-11T02:22:48.000Z | ginga/AutoCuts.py | chyan26/ginga | e00c887d8660e0a4178f9681ca7ea7784b7ca129 | [
"BSD-3-Clause"
] | null | null | null | ginga/AutoCuts.py | chyan26/ginga | e00c887d8660e0a4178f9681ca7ea7784b7ca129 | [
"BSD-3-Clause"
] | null | null | null | #
# AutoCuts.py -- class for calculating auto cut levels
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy as np
import threading
from ginga.misc import Bunch
#from ginga.misc.ParamSet import Param
from ginga.util import zscale
have_scipy = True
autocut_methods = ('minmax', 'median', 'histogram', 'stddev', 'zscale')
try:
import scipy.ndimage.filters
import scipy.optimize as optimize
#import scipy.misc
except ImportError:
have_scipy = False
autocut_methods = ('minmax', 'histogram', 'stddev', 'zscale')
# Lock to work around a non-threadsafe bug in scipy
_lock = threading.RLock()
class Param(Bunch.Bunch):
pass
class AutoCutsError(Exception):
pass
class AutoCutsBase(object):
@classmethod
def get_params_metadata(cls):
return []
def __init__(self, logger):
super(AutoCutsBase, self).__init__()
self.logger = logger
self.kind = 'base'
self.crop_radius = 512
def update_params(self, **param_dict):
# TODO: find a cleaner way to update these
self.__dict__.update(param_dict)
def get_algorithms(self):
return autocut_methods
def get_autocut_levels(self, image):
loval, hival = self.calc_cut_levels(image)
return loval, hival
def get_crop(self, image, crop_radius=None):
# Even with numpy, it's kind of slow for some of the autocut
# methods on a large image, so in those cases we can optionally
# take a crop of size (radius*2)x(radius*2) from the center of
# the image and calculate the cut levels on that
if crop_radius is None:
crop_radius = self.crop_radius
wd, ht = image.get_size()
(data, x1, y1, x2, y2) = image.cutout_radius(wd // 2, ht // 2,
crop_radius)
return data
def cut_levels(self, data, loval, hival, vmin=0.0, vmax=255.0):
loval, hival = float(loval), float(hival)
self.logger.debug("loval=%.2f hival=%.2f" % (loval, hival))
delta = hival - loval
if delta != 0.0:
data = data.clip(loval, hival)
f = ((data - loval) / delta)
else:
#f = (data - loval).clip(0.0, 1.0)
f = data - loval
f.clip(0.0, 1.0, out=f)
# threshold
f[np.nonzero(f)] = 1.0
# f = f.clip(0.0, 1.0) * vmax
# NOTE: optimization using in-place outputs for speed
f.clip(0.0, 1.0, out=f)
np.multiply(f, vmax, out=f)
return f
def __str__(self):
return self.kind
class Clip(AutoCutsBase):
def __init__(self, logger):
super(Clip, self).__init__(logger)
self.kind = 'clip'
def calc_cut_levels(self, image):
loval, hival = image.get_minmax()
return (float(loval), float(hival))
def cut_levels(self, data, loval, hival, vmin=0.0, vmax=255.0):
return data.clip(vmin, vmax)
class Minmax(AutoCutsBase):
def __init__(self, logger):
super(Minmax, self).__init__(logger)
self.kind = 'minmax'
def calc_cut_levels(self, image):
loval, hival = image.get_minmax()
return (float(loval), float(hival))
class Histogram(AutoCutsBase):
@classmethod
def get_params_metadata(cls):
return [
Param(name='usecrop', type=_bool,
valid=[True, False],
default=True,
description="Use center crop of image for speed"),
Param(name='pct', type=float,
widget='spinfloat', incr=0.001,
min=0.0, max=1.0, default=0.999,
description="Percentage of the histogram to retain"),
Param(name='numbins', type=int,
min=100, max=10000, default=2048,
description="Number of bins for the histogram"),
]
def __init__(self, logger, usecrop=True, pct=0.999, numbins=2048):
super(Histogram, self).__init__(logger)
self.kind = 'histogram'
self.usecrop = usecrop
self.pct = pct
self.numbins = numbins
def calc_cut_levels(self, image):
if self.usecrop:
data = self.get_crop(image)
count = np.count_nonzero(np.isfinite(data))
if count < (self.crop_radius ** 2.0) * 0.50:
# if we have less than 50% finite pixels then fall back
# to using the whole array
self.logger.debug("too many non-finite values in crop--"
"falling back to full image data")
data = image.get_data()
else:
data = image.get_data()
bnch = self.calc_histogram(data, pct=self.pct, numbins=self.numbins)
loval, hival = bnch.loval, bnch.hival
return loval, hival
def calc_histogram(self, data, pct=1.0, numbins=2048):
self.logger.debug("Computing histogram, pct=%.4f numbins=%d" % (
pct, numbins))
height, width = data.shape[:2]
self.logger.debug("Median analysis array is %dx%d" % (
width, height))
total_px = width * height
dsum = np.sum(data)
if np.isnan(dsum) or np.isinf(dsum):
# Oh crap, the array has a NaN or Inf value.
# We have to workaround this by making a copy of the array
# and substituting for the problem values, otherwise numpy's
# histogram() cannot handle it
self.logger.warning("NaN's found in data, using workaround for histogram")
data = data.copy()
# TODO: calculate a reasonable replacement value
data[np.isinf(data)] = 0.0
minval = np.nanmin(data)
maxval = np.nanmax(data)
substval = (minval + maxval) / 2.0
data[np.isnan(data)] = substval
data[np.isinf(data)] = substval
## dsum = np.sum(data)
## if np.isnan(dsum) or np.isinf(dsum):
## print "NaNs STILL PRESENT"
dist, bins = np.histogram(data, bins=numbins,
density=False)
else:
dist, bins = np.histogram(data, bins=numbins,
density=False)
cutoff = int((float(total_px) * (1.0 - pct)) / 2.0)
top = len(dist) - 1
self.logger.debug("top=%d cutoff=%d" % (top, cutoff))
#print "DIST: %s\nBINS: %s" % (str(dist), str(bins))
# calculate low cutoff
cumsum = np.cumsum(dist)
li = np.flatnonzero(cumsum > cutoff)
if len(li) > 0:
i = li[0]
count_px = cumsum[i]
else:
i = 0
count_px = 0
if i > 0:
nprev = cumsum[i - 1]
else:
nprev = 0
loidx = i
# interpolate between last two low bins
val1, val2 = bins[i], bins[i + 1]
divisor = float(count_px) - float(nprev)
if divisor > 0.0:
interp = (float(cutoff) - float(nprev)) / divisor
else:
interp = 0.0
loval = val1 + ((val2 - val1) * interp)
self.logger.debug("loval=%f val1=%f val2=%f interp=%f" % (
loval, val1, val2, interp))
# calculate high cutoff
revdist = dist[::-1]
cumsum = np.cumsum(revdist)
li = np.flatnonzero(cumsum > cutoff)
if len(li) > 0:
i = li[0]
count_px = cumsum[i]
else:
i = 0
count_px = 0
if i > 0:
nprev = cumsum[i - 1]
else:
nprev = 0
j = top - i
hiidx = j + 1
# interpolate between last two high bins
val1, val2 = bins[j], bins[j + 1]
divisor = float(count_px) - float(nprev)
if divisor > 0.0:
interp = (float(cutoff) - float(nprev)) / divisor
else:
interp = 0.0
hival = val1 + ((val2 - val1) * interp)
self.logger.debug("hival=%f val1=%f val2=%f interp=%f" % (
hival, val1, val2, interp))
return Bunch.Bunch(dist=dist, bins=bins, loval=loval, hival=hival,
loidx=loidx, hiidx=hiidx)
class StdDev(AutoCutsBase):
@classmethod
def get_params_metadata(cls):
return [
Param(name='usecrop', type=_bool,
valid=[True, False],
default=True,
description="Use center crop of image for speed"),
## Param(name='hensa_lo', type=float, default=35.0,
## description="Low subtraction factor"),
## Param(name='hensa_hi', type=float, default=90.0,
## description="High subtraction factor"),
]
def __init__(self, logger, usecrop=True):
super(StdDev, self).__init__(logger)
self.kind = 'stddev'
# Constants used to calculate the lo and hi cut levels using the
# "stddev" algorithm (from the old SOSS fits viewer)
self.usecrop = usecrop
self.hensa_lo = 35.0
self.hensa_hi = 90.0
def calc_cut_levels(self, image):
if self.usecrop:
data = self.get_crop(image)
count = np.count_nonzero(np.isfinite(data))
if count < (self.crop_radius ** 2.0) * 0.50:
# if we have less than 50% finite pixels then fall back
# to using the whole array
self.logger.info("too many non-finite values in crop--"
"falling back to full image data")
data = image.get_data()
else:
data = image.get_data()
loval, hival = self.calc_stddev(data, hensa_lo=self.hensa_lo,
hensa_hi=self.hensa_hi)
return loval, hival
def calc_stddev(self, data, hensa_lo=35.0, hensa_hi=90.0):
# This is the method used in the old SOSS fits viewer
mdata = np.ma.masked_array(data, np.isnan(data))
mean = np.mean(mdata)
sdev = np.std(mdata)
self.logger.debug("mean=%f std=%f" % (mean, sdev))
hensa_lo_factor = (hensa_lo - 50.0) / 10.0
hensa_hi_factor = (hensa_hi - 50.0) / 10.0
loval = hensa_lo_factor * sdev + mean
hival = hensa_hi_factor * sdev + mean
return loval, hival
class MedianFilter(AutoCutsBase):
@classmethod
def get_params_metadata(cls):
return [
## Param(name='usecrop', type=_bool,
## valid=set([True, False]),
## default=True,
## description="Use center crop of image for speed"),
Param(name='num_points', type=int,
default=2000, allow_none=True,
description="Number of points to sample"),
Param(name='length', type=int, default=5,
description="Median kernel length"),
]
def __init__(self, logger, num_points=2000, length=5):
super(MedianFilter, self).__init__(logger)
self.kind = 'median'
self.num_points = num_points
self.length = length
def calc_cut_levels(self, image):
wd, ht = image.get_size()
# sample the data
xmax = wd - 1
ymax = ht - 1
# evenly spaced sampling over rows and cols
xskip = int(max(1.0, np.sqrt(xmax * ymax / float(self.num_points))))
yskip = xskip
cutout = image.cutout_data(0, 0, xmax, ymax,
xstep=xskip, ystep=yskip)
loval, hival = self.calc_medianfilter(cutout, length=self.length)
return loval, hival
def calc_medianfilter(self, data, length=5):
assert len(data.shape) >= 2, \
AutoCutsError("input data should be 2D or greater")
if length is None:
length = 5
xout = scipy.ndimage.filters.median_filter(data, size=length)
loval = np.nanmin(xout)
hival = np.nanmax(xout)
return loval, hival
class ZScale(AutoCutsBase):
"""
Based on STScI's numdisplay implementation of IRAF's ZScale.
"""
@classmethod
def get_params_metadata(cls):
return [
Param(name='contrast', type=float,
default=0.25, allow_none=False,
description="Contrast"),
Param(name='num_points', type=int,
default=1000, allow_none=True,
description="Number of points to sample"),
]
def __init__(self, logger, contrast=0.25, num_points=1000):
super(ZScale, self).__init__(logger)
self.kind = 'zscale'
self.contrast = contrast
self.num_points = num_points
def calc_cut_levels(self, image):
wd, ht = image.get_size()
# calculate num_points parameter, if omitted
total_points = wd * ht
if total_points == 0:
self.logger.debug('total_points is 0, setting cut levels to 0')
return 0, 0
num_points = self.num_points
if num_points is None:
num_points = max(int(total_points * 0.0002), 1000)
num_points = min(num_points, total_points)
assert (0 < num_points <= total_points), \
AutoCutsError("num_points not in range 0-%d" % (total_points))
# sample the data
xmax = wd - 1
ymax = ht - 1
# evenly spaced sampling over rows and cols
xskip = int(max(1.0, np.sqrt(xmax * ymax / float(num_points))))
yskip = xskip
cutout = image.cutout_data(0, 0, xmax, ymax,
xstep=xskip, ystep=yskip)
loval, hival = self.calc_zscale(cutout, contrast=self.contrast,
num_points=self.num_points)
return loval, hival
def calc_zscale(self, data, contrast=0.25, num_points=1000):
# NOTE: num_per_row is ignored in this implementation
assert len(data.shape) >= 2, \
AutoCutsError("input data should be 2D or greater")
ht, wd = data.shape[:2]
# sanity check on contrast parameter
assert (0.0 < contrast <= 1.0), \
AutoCutsError("contrast (%.2f) not in range 0 < c <= 1" % (
contrast))
# remove masked elements, they cause problems
data = data[np.logical_not(np.ma.getmaskarray(data))]
# remove NaN and Inf from samples
samples = data[np.isfinite(data)].flatten()
samples = samples[:num_points]
loval, hival = zscale.zscale_samples(samples, contrast=contrast)
return loval, hival
class ZScale2(AutoCutsBase):
@classmethod
def get_params_metadata(cls):
return [
Param(name='contrast', type=float,
default=0.25, allow_none=True,
description="Contrast"),
Param(name='num_points', type=int,
default=600, allow_none=True,
description="Number of points to sample"),
Param(name='num_per_row', type=int,
default=None, allow_none=True,
description="Number of points to sample"),
]
def __init__(self, logger, contrast=0.25, num_points=1000,
num_per_row=None):
super(ZScale2, self).__init__(logger)
self.kind = 'zscale'
self.contrast = contrast
self.num_points = num_points
self.num_per_row = num_per_row
def calc_cut_levels(self, image):
data = image.get_data()
loval, hival = self.calc_zscale(data, contrast=self.contrast,
num_points=self.num_points,
num_per_row=self.num_per_row)
return loval, hival
def calc_zscale(self, data, contrast=0.25,
num_points=1000, num_per_row=None):
"""
From the IRAF documentation:
The zscale algorithm is designed to display the image values
near the median image value without the time consuming process of
computing a full image histogram. This is particularly useful for
astronomical images which generally have a very peaked histogram
corresponding to the background sky in direct imaging or the
continuum in a two dimensional spectrum.
The sample of pixels, specified by values greater than zero in the
sample mask zmask or by an image section, is selected up to a
maximum of nsample pixels. If a bad pixel mask is specified by the
bpmask parameter then any pixels with mask values which are greater
than zero are not counted in the sample. Only the first pixels up
to the limit are selected where the order is by line beginning from
the first line. If no mask is specified then a grid of pixels with
even spacing along lines and columns that make up a number less
than or equal to the maximum sample size is used.
If a contrast of zero is specified (or the zrange flag is used and
the image does not have a valid minimum/maximum value) then the
minimum and maximum of the sample is used for the intensity mapping
range.
If the contrast is not zero the sample pixels are ranked in
brightness to form the function I(i), where i is the rank of the
pixel and I is its value. Generally the midpoint of this function
(the median) is very near the peak of the image histogram and there
is a well defined slope about the midpoint which is related to the
width of the histogram. At the ends of the I(i) function there are
a few very bright and dark pixels due to objects and defects in the
field. To determine the slope a linear function is fit with
iterative rejection;
I(i) = intercept + slope * (i - midpoint)
If more than half of the points are rejected then there is no well
defined slope and the full range of the sample defines z1 and z2.
Otherwise the endpoints of the linear function are used (provided
they are within the original range of the sample):
z1 = I(midpoint) + (slope / contrast) * (1 - midpoint)
z2 = I(midpoint) + (slope / contrast) * (npoints - midpoint)
As can be seen, the parameter contrast may be used to adjust the
contrast produced by this algorithm.
"""
assert len(data.shape) >= 2, \
AutoCutsError("input data should be 2D or greater")
ht, wd = data.shape[:2]
assert (0.0 < contrast <= 1.0), \
AutoCutsError("contrast (%.2f) not in range 0 < c <= 1" % (
contrast))
# calculate num_points parameter, if omitted
total_points = np.size(data)
if num_points is None:
num_points = max(int(total_points * 0.0002), 600)
num_points = min(num_points, total_points)
assert (0 < num_points <= total_points), \
AutoCutsError("num_points not in range 0-%d" % (total_points))
# calculate num_per_row parameter, if omitted
if num_per_row is None:
num_per_row = max(int(0.015 * num_points), 1)
self.logger.debug("contrast=%.4f num_points=%d num_per_row=%d" % (
contrast, num_points, num_per_row))
# sample the data
num_rows = num_points // num_per_row
xmax = wd - 1
xskip = max(xmax // num_per_row, 1)
ymax = ht - 1
yskip = max(ymax // num_rows, 1)
# evenly spaced sampling over rows and cols
## xskip = int(max(1.0, np.sqrt(xmax * ymax / float(num_points))))
## yskip = xskip
cutout = data[0:ymax:yskip, 0:xmax:xskip]
# flatten and trim off excess
cutout = cutout.flat[0:num_points]
# actual number of points selected
num_pix = len(cutout)
assert num_pix <= num_points, \
AutoCutsError("Actual number of points (%d) exceeds calculated "
"number (%d)" % (num_pix, num_points))
# sort the data by value
cutout = np.sort(cutout)
# flat distribution?
data_min = np.nanmin(cutout)
data_max = np.nanmax(cutout)
if (data_min == data_max) or (contrast == 0.0):
return (data_min, data_max)
# compute the midpoint and median
midpoint = (num_pix // 2)
if num_pix % 2 != 0:
median = cutout[midpoint]
else:
median = 0.5 * (cutout[midpoint - 1] + cutout[midpoint])
self.logger.debug("num_pix=%d midpoint=%d median=%.4f" % (
num_pix, midpoint, median))
## # Remove outliers to aid fitting
## threshold = np.std(cutout) * 2.5
## cutout = cutout[np.where(np.fabs(cutout - median) > threshold)]
## num_pix = len(cutout)
# zscale fitting function:
# I(x) = slope * (x - midpoint) + intercept
def fitting(x, slope, intercept):
y = slope * (x - midpoint) + intercept
return y
# compute a least squares fit
X = np.arange(num_pix)
Y = cutout
sigma = np.array([1.0] * num_pix)
guess = np.array([0.0, 0.0])
# Curve fit
with _lock:
# NOTE: without this mutex, optimize.curvefit causes a fatal error
# sometimes--it appears not to be thread safe.
# The error is:
# "SystemError: null argument to internal routine"
# "Fatal Python error: GC object already tracked"
try:
p, cov = optimize.curve_fit(fitting, X, Y, guess, sigma)
except Exception as e:
self.logger.debug("curve fitting failed: %s" % (str(e)))
cov = None
if cov is None:
self.logger.debug("curve fitting failed")
return (float(data_min), float(data_max))
slope, intercept = p
## num_chosen = 0
self.logger.debug("intercept=%f slope=%f" % (
intercept, slope))
## if num_chosen < (num_pix // 2):
## self.logger.debug("more than half pixels rejected--falling back to min/max of sample")
## return (data_min, data_max)
# finally, compute the range
falloff = slope / contrast
z1 = median - midpoint * falloff
z2 = median + (num_pix - midpoint) * falloff
# final sanity check on cut levels
locut = max(z1, data_min)
hicut = min(z2, data_max)
if locut >= hicut:
locut = data_min
hicut = data_max
return (float(locut), float(hicut))
# funky boolean converter
_bool = lambda st: str(st).lower() == 'true' # noqa
autocuts_table = {
'clip': Clip,
'minmax': Minmax,
'stddev': StdDev,
'histogram': Histogram,
'median': MedianFilter,
'zscale': ZScale,
#'zscale2': ZScale2,
}
def get_autocuts(name):
if name not in autocut_methods:
raise AutoCutsError("Method '%s' is not supported" % (name))
return autocuts_table[name]
def get_autocuts_names():
l = list(autocuts_table.keys())
l.sort()
return l
# END
| 34.45815 | 101 | 0.568056 |
b5e87948037fbea0a8865066bfa553c42161c791 | 493 | py | Python | archive/online/2016/160315_increase_proofread_clavierbuch.py | the-it/WS_THEbotIT | 5630382e697a8b7432e0cf63a05a45fe43064caa | [
"MIT"
] | 5 | 2019-01-21T19:59:27.000Z | 2021-02-06T12:56:28.000Z | archive/online/2016/160315_increase_proofread_clavierbuch.py | the-it/WS_THEbotIT | 5630382e697a8b7432e0cf63a05a45fe43064caa | [
"MIT"
] | 697 | 2017-11-19T12:41:11.000Z | 2022-03-31T07:35:04.000Z | archive/online/2016/160315_increase_proofread_clavierbuch.py | the-it/WS_THEbotIT | 5630382e697a8b7432e0cf63a05a45fe43064caa | [
"MIT"
] | 1 | 2018-02-18T23:01:13.000Z | 2018-02-18T23:01:13.000Z | # -*- coding: utf-8 -*-
__author__ = 'eso'
import sys
sys.path.append('../../')
import re
import requests
import pywikibot
from pywikibot import proofreadpage
site = pywikibot.Site()
for i in range(1, 128):
page = pywikibot.proofreadpage.ProofreadPage(site, 'Seite:Versuch über die wahre Art das Clavier zu spielen Teil 1 1759.pdf/{}'.format(i))
print(page.status)
page._full_header.user = 'THEbotIT'
try:
page.validate()
page.save()
except:
pass
| 23.47619 | 142 | 0.669371 |
112b1a0383fff431925d7c2de1201a9c3e502e39 | 4,935 | py | Python | intmcp/model/history.py | RDLLab/i-ntmcp | 63deec3d956d41a0ad4b66a707536893859e4e9f | [
"MIT"
] | null | null | null | intmcp/model/history.py | RDLLab/i-ntmcp | 63deec3d956d41a0ad4b66a707536893859e4e9f | [
"MIT"
] | null | null | null | intmcp/model/history.py | RDLLab/i-ntmcp | 63deec3d956d41a0ad4b66a707536893859e4e9f | [
"MIT"
] | null | null | null | """A history in the environment """
from typing import Tuple, Optional
from intmcp.model import Action, Observation, JointAction, JointObservation
class AgentHistory:
"""An Action-Observation history for a single agent in a POSG environment.
A History is an ordered Tuple of (Action, Observation) tuples with one
entry for each time step in the environment
"""
def __init__(self, history: Tuple[Tuple[Action, Observation], ...]):
self.history = history
# pylint: disable=invalid-name
self.t = len(history) - 1
def extend(self, action: Action, obs: Observation) -> "AgentHistory":
"""Extend the current history with given action, observation pair. """
new_history = list(self.history)
new_history.append((action, obs))
return AgentHistory(tuple(new_history))
def get_sub_history(self, horizon: int) -> "AgentHistory":
"""Get a subset of history up to given horizon """
assert 0 < horizon <= len(self.history), (
"Cannot get sub history horizon must be 0 < horizon <= "
f"len(history: 0 < {horizon} <= len(self.history) invalid"
)
if horizon == len(self.history):
return self
return AgentHistory(self.history[:horizon])
def get_last_step(self) -> Tuple[Action, Observation]:
"""Get the last step in the history """
return self.history[-1]
@classmethod
def get_init_history(cls,
obs: Optional[Observation] = None) -> "AgentHistory":
"""Get Initial history """
if obs is None:
return cls(())
return cls(((Action.get_null_action(), obs), ))
def __hash__(self):
return hash(self.history)
def __eq__(self, other):
if not isinstance(other, AgentHistory):
return False
return self.history == other.history
def __str__(self):
h_str = []
for (a, o) in self.history:
h_str.append(f"({a}, {o})")
h_str = ",".join(h_str)
return f"h_{self.t}=<{h_str}>"
def __repr__(self):
return self.__str__()
def __getitem__(self, key):
return self.history[key]
def __iter__(self):
return AgentHistoryIterator(self)
class AgentHistoryIterator:
"""Iterator for the History class """
def __init__(self, history: AgentHistory):
self.history = history
self._idx = 0
def __iter__(self):
return self
def __next__(self):
if self._idx < len(self.history.history):
self._idx += 1
return self.history[self._idx-1]
raise StopIteration
class JointHistory:
"""A joint history for all agents in the environment """
def __init__(self, agent_histories: Tuple[AgentHistory, ...]):
self.agent_histories = agent_histories
self.num_agents = len(self.agent_histories)
@classmethod
def get_init_history(cls,
num_agents: int,
obs: Optional[JointObservation] = None
) -> "JointHistory":
"""Get Initial joint history """
if obs is None:
return cls(tuple(
AgentHistory.get_init_history() for _ in range(num_agents)
))
return cls(tuple(
AgentHistory.get_init_history(obs.get_agent_obs(i))
for i in range(num_agents)
))
def get_agent_history(self, agent_id: int) -> AgentHistory:
"""Get the history of given agent. """
return self.agent_histories[agent_id]
def extend(self,
action: JointAction,
obs: JointObservation) -> "JointHistory":
"""Extend the current history with given action, observation pair. """
new_agent_histories = []
for i in range(self.num_agents):
new_agent_histories.append(self.agent_histories[i].extend(
action.get_agent_action(i), obs.get_agent_obs(i)
))
return JointHistory(tuple(new_agent_histories))
def get_sub_history(self, horizon: int) -> "JointHistory":
"""Get a subset of history up to given horizon """
sub_agent_histories = []
for i in range(self.num_agents):
sub_agent_histories.append(
self.agent_histories[i].get_sub_history(horizon)
)
return JointHistory(tuple(sub_agent_histories))
def __hash__(self):
return hash(self.agent_histories)
def __eq__(self, other):
if not isinstance(other, JointHistory):
return False
return self.agent_histories == other.agent_histories
def __str__(self):
h_str = [f"{i} {h}" for i, h in enumerate(self.agent_histories)]
h_str.insert(0, "<JointHistory:")
h_str.append(">")
return "\n".join(h_str)
def __repr__(self):
return self.__str__()
| 32.682119 | 78 | 0.605471 |
48de38a63212f5aac92c1619c905090e1eeb735d | 2,959 | py | Python | test/test_optim.py | 6Ulm/POT | 28dd2fe2c461ac2287ae51e464f0b002d61f5f31 | [
"MIT"
] | 1 | 2021-04-14T20:16:47.000Z | 2021-04-14T20:16:47.000Z | test/test_optim.py | 6Ulm/POT | 28dd2fe2c461ac2287ae51e464f0b002d61f5f31 | [
"MIT"
] | null | null | null | test/test_optim.py | 6Ulm/POT | 28dd2fe2c461ac2287ae51e464f0b002d61f5f31 | [
"MIT"
] | 1 | 2020-12-07T08:47:55.000Z | 2020-12-07T08:47:55.000Z | """Tests for module optim fro OT optimization """
# Author: Remi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
import numpy as np
import ot
def test_conditional_gradient():
n_bins = 100 # nb bins
np.random.seed(0)
# bin positions
x = np.arange(n_bins, dtype=np.float64)
# Gaussian distributions
a = ot.datasets.make_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std
b = ot.datasets.make_1D_gauss(n_bins, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1)))
M /= M.max()
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg = 1e-1
G, log = ot.optim.cg(a, b, M, reg, f, df, verbose=True, log=True)
np.testing.assert_allclose(a, G.sum(1))
np.testing.assert_allclose(b, G.sum(0))
def test_conditional_gradient2():
n = 1000 # nb samples
mu_s = np.array([0, 0])
cov_s = np.array([[1, 0], [0, 1]])
mu_t = np.array([4, 4])
cov_t = np.array([[1, -.8], [-.8, 1]])
xs = ot.datasets.make_2D_samples_gauss(n, mu_s, cov_s)
xt = ot.datasets.make_2D_samples_gauss(n, mu_t, cov_t)
a, b = np.ones((n,)) / n, np.ones((n,)) / n
# loss matrix
M = ot.dist(xs, xt)
M /= M.max()
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg = 1e-1
G, log = ot.optim.cg(a, b, M, reg, f, df, numItermaxEmd=200000,
verbose=True, log=True)
np.testing.assert_allclose(a, G.sum(1))
np.testing.assert_allclose(b, G.sum(0))
def test_generalized_conditional_gradient():
n_bins = 100 # nb bins
np.random.seed(0)
# bin positions
x = np.arange(n_bins, dtype=np.float64)
# Gaussian distributions
a = ot.datasets.make_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std
b = ot.datasets.make_1D_gauss(n_bins, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1)))
M /= M.max()
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg1 = 1e-3
reg2 = 1e-1
G, log = ot.optim.gcg(a, b, M, reg1, reg2, f, df, verbose=True, log=True)
np.testing.assert_allclose(a, G.sum(1), atol=1e-05)
np.testing.assert_allclose(b, G.sum(0), atol=1e-05)
def test_solve_1d_linesearch_quad_funct():
np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad(1, -1, 0), 0.5)
np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad(-1, 5, 0), 0)
np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad(-1, 0.5, 0), 1)
def test_line_search_armijo():
xk = np.array([[0.25, 0.25], [0.25, 0.25]])
pk = np.array([[-0.25, 0.25], [0.25, -0.25]])
gfk = np.array([[23.04273441, 23.0449082], [23.04273441, 23.0449082]])
old_fval = -123
# Should not throw an exception and return None for alpha
alpha, _, _ = ot.optim.line_search_armijo(lambda x: 1, xk, pk, gfk, old_fval)
assert alpha is None
| 25.290598 | 81 | 0.60223 |
5c8005bfbc417d308aaac96e2aaf9ca7ab861d0d | 1,578 | py | Python | examples/connectfour/_shared.py | SSS-Says-Snek/hisock | 16818f27b018027496ac316c479db225e825e1ea | [
"MIT"
] | 23 | 2021-09-11T22:29:54.000Z | 2022-03-29T03:34:56.000Z | examples/connectfour/_shared.py | SSS-Says-Snek/hisock | 16818f27b018027496ac316c479db225e825e1ea | [
"MIT"
] | 32 | 2021-09-12T03:54:58.000Z | 2022-03-31T07:30:57.000Z | examples/connectfour/_shared.py | SSS-Says-Snek/hisock | 16818f27b018027496ac316c479db225e825e1ea | [
"MIT"
] | 9 | 2021-11-16T04:43:23.000Z | 2022-01-31T12:05:38.000Z | from __future__ import annotations
PIECE_COLORS = {
-1: (50, 50, 50), # NO_PIECE
0: (255, 0, 0), # RED
1: (255, 255, 0), # YELLOW
2: (120, 0, 0), # HOVER RED
3: (120, 120, 0), # HOVER YELLOW
}
class BoardEnum:
NO_PIECE = -1
RED = 0
YELLOW = 1
HOVER_RED = 2
HOVER_YELLOW = 3
class Board:
def __init__(self):
self.board: list[list[int]] = [
[BoardEnum.NO_PIECE for _ in range(7)] for _ in range(6)
]
self.win_vectors = (
(0, -1),
(1, -1),
(1, 0),
(1, 1),
(0, 1),
(-1, 1),
(-1, 0),
(-1, -1),
)
self.total_moves = 0
def player_win(self, new_pos: tuple[int, int]) -> bool:
potential_paths = []
# Calculate potential win paths
for win_vector in self.win_vectors:
potential_path = []
for i in range(4):
new_coord = [
new_pos[0] + win_vector[0] * i,
new_pos[1] + win_vector[1] * i,
]
if (not 0 <= new_coord[0] <= 6) or (not 0 <= new_coord[1] <= 5):
break
potential_path.append(self.board[new_coord[1]][new_coord[0]])
potential_paths.append(potential_path)
for path in potential_paths:
if path.count(self.board[new_pos[1]][new_pos[0]]) == 4:
return True
return False
def make_move(self, piece_type, x, y):
self.board[y][x] = piece_type
| 25.868852 | 80 | 0.470849 |
86546fc8f29fb59cb234d07468969d074df3f007 | 1,610 | py | Python | nuageneutronsync/utils/log.py | axxyhtrx/nuage-neutron-sync | a839a4530c2f9d9c96bfaf16824c2d5d3c135209 | [
"BSD-3-Clause"
] | 2 | 2017-06-23T16:24:48.000Z | 2022-03-06T04:48:18.000Z | nuageneutronsync/utils/log.py | axxyhtrx/nuage-neutron-sync | a839a4530c2f9d9c96bfaf16824c2d5d3c135209 | [
"BSD-3-Clause"
] | 4 | 2018-02-21T13:30:31.000Z | 2021-05-31T14:38:01.000Z | nuageneutronsync/utils/log.py | axxyhtrx/nuage-neutron-sync | a839a4530c2f9d9c96bfaf16824c2d5d3c135209 | [
"BSD-3-Clause"
] | 5 | 2017-06-06T14:21:57.000Z | 2019-01-25T03:15:52.000Z | #!/usr/bin/env python
import logging
import logging.handlers
import sys
import os
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
def setlogpath(path, config):
logger = logging.getLogger('nuage-neutron-sync')
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
enable_rotate, maxsize, backups = config.get_log_config()
if enable_rotate:
fileh = logging.handlers.RotatingFileHandler(path, 'a', maxBytes=maxsize * 1000000, backupCount=backups)
else:
fileh = logging.FileHandler(path, 'a')
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s")
fileh.setFormatter(formatter)
for hdlr in logger.handlers:
logger.removeHandler(hdlr)
logger.addHandler(fileh)
logger.propagate = False
def setloglevel(log_level):
logger = logging.getLogger('nuage-neutron-sync')
parsed_log_level = LEVELS.get(log_level.lower(), logging.NOTSET)
if not parsed_log_level:
raise ValueError('Invalid log level: {0}'.format(log_level))
logger.info("Loglevel set to {0}".format(log_level))
logger.setLevel(parsed_log_level)
def start_logging():
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
logger = logging.getLogger('nuage-neutron-sync')
logger.info("Logging started with logging level INFO")
# Suppress INFO messages from Bambou
l = logging.getLogger('bambou')
l.setLevel(logging.WARN)
return logger
| 29.814815 | 112 | 0.692547 |
4489e3d8995005e279c17a43dd395bc9338535c9 | 11,026 | py | Python | PeterMaar-NetLrnChatBot/Client/PeterMaarNetworkedChatClientGUI.app/Contents/Resources/__boot__.py | alset333/NetworkedLearningChatbot | 35c62d70bdbf3fcbff478da5346ec9eeea18ec50 | [
"BSD-3-Clause"
] | 3 | 2016-01-14T02:19:43.000Z | 2017-10-17T15:52:33.000Z | PeterMaar-NetLrnChatBot/Client/PeterMaarNetworkedChatClientGUI.app/Contents/Resources/__boot__.py | alset333/NetworkedLearningChatbot | 35c62d70bdbf3fcbff478da5346ec9eeea18ec50 | [
"BSD-3-Clause"
] | null | null | null | PeterMaar-NetLrnChatBot/Client/PeterMaarNetworkedChatClientGUI.app/Contents/Resources/__boot__.py | alset333/NetworkedLearningChatbot | 35c62d70bdbf3fcbff478da5346ec9eeea18ec50 | [
"BSD-3-Clause"
] | null | null | null | def _reset_sys_path():
# Clear generic sys.path[0]
import sys, os
resources = os.environ['RESOURCEPATH']
while sys.path[0] == resources:
del sys.path[0]
_reset_sys_path()
"""
sys.argv emulation
This module starts a basic event loop to collect file- and url-open AppleEvents. Those get
converted to strings and stuffed into sys.argv. When that is done we continue starting
the application.
This is a workaround to convert scripts that expect filenames on the command-line to work
in a GUI environment. GUI applications should not use this feature.
NOTE: This module uses ctypes and not the Carbon modules in the stdlib because the latter
don't work in 64-bit mode and are also not available with python 3.x.
"""
import sys
import os
import time
import ctypes
import struct
class AEDesc (ctypes.Structure):
_fields_ = [
('descKey', ctypes.c_int),
('descContent', ctypes.c_void_p),
]
class EventTypeSpec (ctypes.Structure):
_fields_ = [
('eventClass', ctypes.c_int),
('eventKind', ctypes.c_uint),
]
def _ctypes_setup():
carbon = ctypes.CDLL('/System/Library/Carbon.framework/Carbon')
timer_func = ctypes.CFUNCTYPE(
None, ctypes.c_void_p, ctypes.c_long)
ae_callback = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p)
carbon.AEInstallEventHandler.argtypes = [
ctypes.c_int, ctypes.c_int, ae_callback,
ctypes.c_void_p, ctypes.c_char ]
carbon.AERemoveEventHandler.argtypes = [
ctypes.c_int, ctypes.c_int, ae_callback,
ctypes.c_char ]
carbon.AEProcessEvent.restype = ctypes.c_int
carbon.AEProcessEvent.argtypes = [ctypes.c_void_p]
carbon.ReceiveNextEvent.restype = ctypes.c_int
carbon.ReceiveNextEvent.argtypes = [
ctypes.c_long, ctypes.POINTER(EventTypeSpec),
ctypes.c_double, ctypes.c_char,
ctypes.POINTER(ctypes.c_void_p)
]
carbon.AEGetParamDesc.restype = ctypes.c_int
carbon.AEGetParamDesc.argtypes = [
ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.POINTER(AEDesc)]
carbon.AECountItems.restype = ctypes.c_int
carbon.AECountItems.argtypes = [ ctypes.POINTER(AEDesc),
ctypes.POINTER(ctypes.c_long) ]
carbon.AEGetNthDesc.restype = ctypes.c_int
carbon.AEGetNthDesc.argtypes = [
ctypes.c_void_p, ctypes.c_long, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p ]
carbon.AEGetDescDataSize.restype = ctypes.c_int
carbon.AEGetDescDataSize.argtypes = [ ctypes.POINTER(AEDesc) ]
carbon.AEGetDescData.restype = ctypes.c_int
carbon.AEGetDescData.argtypes = [
ctypes.POINTER(AEDesc),
ctypes.c_void_p,
ctypes.c_int,
]
carbon.FSRefMakePath.restype = ctypes.c_int
carbon.FSRefMakePath.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint]
return carbon
def _run_argvemulator(timeout = 60):
# Configure ctypes
carbon = _ctypes_setup()
# Is the emulator running?
running = [True]
timeout = [timeout]
# Configure AppleEvent handlers
ae_callback = carbon.AEInstallEventHandler.argtypes[2]
kAEInternetSuite, = struct.unpack('>i', b'GURL')
kAEISGetURL, = struct.unpack('>i', b'GURL')
kCoreEventClass, = struct.unpack('>i', b'aevt')
kAEOpenApplication, = struct.unpack('>i', b'oapp')
kAEOpenDocuments, = struct.unpack('>i', b'odoc')
keyDirectObject, = struct.unpack('>i', b'----')
typeAEList, = struct.unpack('>i', b'list')
typeChar, = struct.unpack('>i', b'TEXT')
typeFSRef, = struct.unpack('>i', b'fsrf')
FALSE = b'\0'
TRUE = b'\1'
eventLoopTimedOutErr = -9875
kEventClassAppleEvent, = struct.unpack('>i', b'eppc')
kEventAppleEvent = 1
@ae_callback
def open_app_handler(message, reply, refcon):
# Got a kAEOpenApplication event, which means we can
# start up. On some OSX versions this event is even
# sent when an kAEOpenDocuments or kAEOpenURLs event
# is sent later on.
#
# Therefore don't set running to false, but reduce the
# timeout to at most two seconds beyond the current time.
timeout[0] = min(timeout[0], time.time() - start + 2)
#running[0] = False
return 0
carbon.AEInstallEventHandler(kCoreEventClass, kAEOpenApplication,
open_app_handler, 0, FALSE)
@ae_callback
def open_file_handler(message, reply, refcon):
listdesc = AEDesc()
sts = carbon.AEGetParamDesc(message, keyDirectObject, typeAEList,
ctypes.byref(listdesc))
if sts != 0:
print("argvemulator warning: cannot unpack open document event")
running[0] = False
return
item_count = ctypes.c_long()
sts = carbon.AECountItems(ctypes.byref(listdesc), ctypes.byref(item_count))
if sts != 0:
print("argvemulator warning: cannot unpack open document event")
running[0] = False
return
desc = AEDesc()
for i in range(item_count.value):
sts = carbon.AEGetNthDesc(ctypes.byref(listdesc), i+1, typeFSRef, 0, ctypes.byref(desc))
if sts != 0:
print("argvemulator warning: cannot unpack open document event")
running[0] = False
return
sz = carbon.AEGetDescDataSize(ctypes.byref(desc))
buf = ctypes.create_string_buffer(sz)
sts = carbon.AEGetDescData(ctypes.byref(desc), buf, sz)
if sts != 0:
print("argvemulator warning: cannot extract open document event")
continue
fsref = buf
buf = ctypes.create_string_buffer(1024)
sts = carbon.FSRefMakePath(ctypes.byref(fsref), buf, 1023)
if sts != 0:
print("argvemulator warning: cannot extract open document event")
continue
if sys.version_info[0] > 2:
sys.argv.append(buf.value.decode('utf-8'))
else:
sys.argv.append(buf.value)
running[0] = False
return 0
carbon.AEInstallEventHandler(kCoreEventClass, kAEOpenDocuments,
open_file_handler, 0, FALSE)
@ae_callback
def open_url_handler(message, reply, refcon):
listdesc = AEDesc()
ok = carbon.AEGetParamDesc(message, keyDirectObject, typeAEList,
ctypes.byref(listdesc))
if ok != 0:
print("argvemulator warning: cannot unpack open document event")
running[0] = False
return
item_count = ctypes.c_long()
sts = carbon.AECountItems(ctypes.byref(listdesc), ctypes.byref(item_count))
if sts != 0:
print("argvemulator warning: cannot unpack open url event")
running[0] = False
return
desc = AEDesc()
for i in range(item_count.value):
sts = carbon.AEGetNthDesc(ctypes.byref(listdesc), i+1, typeChar, 0, ctypes.byref(desc))
if sts != 0:
print("argvemulator warning: cannot unpack open URL event")
running[0] = False
return
sz = carbon.AEGetDescDataSize(ctypes.byref(desc))
buf = ctypes.create_string_buffer(sz)
sts = carbon.AEGetDescData(ctypes.byref(desc), buf, sz)
if sts != 0:
print("argvemulator warning: cannot extract open URL event")
else:
if sys.version_info[0] > 2:
sys.argv.append(buf.value.decode('utf-8'))
else:
sys.argv.append(buf.value)
running[0] = False
return 0
carbon.AEInstallEventHandler(kAEInternetSuite, kAEISGetURL,
open_url_handler, 0, FALSE)
# Remove the funny -psn_xxx_xxx argument
if len(sys.argv) > 1 and sys.argv[1].startswith('-psn_'):
del sys.argv[1]
start = time.time()
now = time.time()
eventType = EventTypeSpec()
eventType.eventClass = kEventClassAppleEvent
eventType.eventKind = kEventAppleEvent
while running[0] and now - start < timeout[0]:
event = ctypes.c_void_p()
sts = carbon.ReceiveNextEvent(1, ctypes.byref(eventType),
start + timeout[0] - now, TRUE, ctypes.byref(event))
if sts == eventLoopTimedOutErr:
break
elif sts != 0:
print("argvemulator warning: fetching events failed")
break
sts = carbon.AEProcessEvent(event)
if sts != 0:
print("argvemulator warning: processing events failed")
break
carbon.AERemoveEventHandler(kCoreEventClass, kAEOpenApplication,
open_app_handler, FALSE)
carbon.AERemoveEventHandler(kCoreEventClass, kAEOpenDocuments,
open_file_handler, FALSE)
carbon.AERemoveEventHandler(kAEInternetSuite, kAEISGetURL,
open_url_handler, FALSE)
def _argv_emulation():
import sys, os
# only use if started by LaunchServices
if os.environ.get('_PY2APP_LAUNCHED_'):
_run_argvemulator()
_argv_emulation()
def _chdir_resource():
import os
os.chdir(os.environ['RESOURCEPATH'])
_chdir_resource()
def _disable_linecache():
import linecache
def fake_getline(*args, **kwargs):
return ''
linecache.orig_getline = linecache.getline
linecache.getline = fake_getline
_disable_linecache()
import re, sys
cookie_re = re.compile(b"coding[:=]\s*([-\w.]+)")
if sys.version_info[0] == 2:
default_encoding = 'ascii'
else:
default_encoding = 'utf-8'
def guess_encoding(fp):
for i in range(2):
ln = fp.readline()
m = cookie_re.search(ln)
if m is not None:
return m.group(1).decode('ascii')
return default_encoding
def _run():
global __file__
import os, site
sys.frozen = 'macosx_app'
base = os.environ['RESOURCEPATH']
argv0 = os.path.basename(os.environ['ARGVZERO'])
script = SCRIPT_MAP.get(argv0, DEFAULT_SCRIPT)
path = os.path.join(base, script)
sys.argv[0] = __file__ = path
if sys.version_info[0] == 2:
with open(path, 'rU') as fp:
source = fp.read() + "\n"
else:
with open(path, 'rb') as fp:
encoding = guess_encoding(fp)
with open(path, 'r', encoding=encoding) as fp:
source = fp.read() + '\n'
exec(compile(source, path, 'exec'), globals(), globals())
def _setup_ctypes():
from ctypes.macholib import dyld
import os
frameworks = os.path.join(os.environ['RESOURCEPATH'], '..', 'Frameworks')
dyld.DEFAULT_FRAMEWORK_FALLBACK.insert(0, frameworks)
dyld.DEFAULT_LIBRARY_FALLBACK.insert(0, frameworks)
_setup_ctypes()
DEFAULT_SCRIPT='PeterMaarNetworkedChatClientGUI.py'
SCRIPT_MAP={}
_run()
| 31.323864 | 100 | 0.626882 |
74f558d2d92e28cfa1f93f13372ce3528e8c04be | 4,829 | py | Python | tensorflow_tts/processor/ljspeech.py | Defrisoft/TensorFlowTTS | d18f1c71b03285a37792121901c5ac1e14c07c20 | [
"Apache-2.0"
] | 1 | 2021-12-22T07:24:58.000Z | 2021-12-22T07:24:58.000Z | tensorflow_tts/processor/ljspeech.py | Defrisoft/TensorFlowTTS | d18f1c71b03285a37792121901c5ac1e14c07c20 | [
"Apache-2.0"
] | null | null | null | tensorflow_tts/processor/ljspeech.py | Defrisoft/TensorFlowTTS | d18f1c71b03285a37792121901c5ac1e14c07c20 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform preprocessing and raw feature extraction for LJSpeech dataset."""
import os
import re
import logging
import numpy as np
import soundfile as sf
from dataclasses import dataclass
from tensorflow_tts.processor import BaseProcessor
from tensorflow_tts.utils import cleaners
from tensorflow_tts.utils.utils import PROCESSOR_FILE_NAME
valid_symbols = [
'A', 'E', 'I', 'O', 'U',
'B', 'CH', 'D', "DH", 'F', 'G', 'GH', 'H', 'J', 'K', 'KH', 'L', 'M',
'N','NG',"NG'", "NY", 'P', 'R', 'S', 'SH', 'T', "TH", 'V', 'W', 'Y', 'Z',
]
_pad = "pad"
_eos = "eos"
_punctuation = "!'(),.:;? "
_special = "-"
_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ["@" + s for s in valid_symbols]
# Export all symbols:
LJSPEECH_SYMBOLS = (
[_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet + [_eos]
)
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r"(.*?)\{(.+?)\}(.*)")
@dataclass
class LJSpeechProcessor(BaseProcessor):
"""LJSpeech processor."""
cleaner_names: str = "english_cleaners"
positions = {
"wave_file": 0,
"text": 1,
"text_norm": 2,
}
train_f_name: str = "metadata.csv"
def create_items(self):
logging.info('Opening and spliting text files to ids, text and normalised texts')
if self.data_dir:
with open(
os.path.join(self.data_dir, self.train_f_name), encoding="utf-8"
) as f:
self.items = [self.split_line(self.data_dir, line, "|") for line in f]
def split_line(self, data_dir, line, split):
parts = line.strip().split(split)
wave_file = parts[self.positions["wave_file"]]
text_norm = parts[self.positions["text_norm"]]
wav_path = os.path.join(data_dir, "wavs", f"{wave_file}.wav")
speaker_name = "ljspeech"
return text_norm, wav_path, speaker_name
def setup_eos_token(self):
return _eos
def save_pretrained(self, saved_path):
os.makedirs(saved_path, exist_ok=True)
self._save_mapper(os.path.join(saved_path, PROCESSOR_FILE_NAME), {})
def get_one_sample(self, item):
text, wav_path, speaker_name = item
# normalize audio signal to be [-1, 1], soundfile already norm.
audio, rate = sf.read(wav_path)
audio = audio.astype(np.float32)
# convert text to ids
text_ids = np.asarray(self.text_to_sequence(text), np.int32)
sample = {
"raw_text": text,
"text_ids": text_ids,
"audio": audio,
"utt_id": os.path.split(wav_path)[-1].split(".")[0],
"speaker_name": speaker_name,
"rate": rate,
}
return sample
def text_to_sequence(self, text):
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += self._symbols_to_sequence(
self._clean_text(text, [self.cleaner_names])
)
break
sequence += self._symbols_to_sequence(
self._clean_text(m.group(1), [self.cleaner_names])
)
sequence += self._arpabet_to_sequence(m.group(2))
text = m.group(3)
# add eos tokens
sequence += [self.eos_id]
return sequence
def _clean_text(self, text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception("Unknown cleaner: %s" % name)
text = cleaner(text)
return text
def _symbols_to_sequence(self, symbols):
return [self.symbol_to_id[s] for s in symbols if self._should_keep_symbol(s)]
def _arpabet_to_sequence(self, text):
return self._symbols_to_sequence(["@" + s for s in text.split()])
def _should_keep_symbol(self, s):
return s in self.symbol_to_id and s != "_" and s != "~"
| 32.85034 | 95 | 0.614827 |
a07021e1bc8fecfa46307dc74d60a5922ea468ea | 1,190 | py | Python | tests/test_default.py | Cologler/dataclasses_fromdict-python | b84d628c1f69c6e19b35e5ef9eecfa88d5e57ca0 | [
"MIT"
] | null | null | null | tests/test_default.py | Cologler/dataclasses_fromdict-python | b84d628c1f69c6e19b35e5ef9eecfa88d5e57ca0 | [
"MIT"
] | null | null | null | tests/test_default.py | Cologler/dataclasses_fromdict-python | b84d628c1f69c6e19b35e5ef9eecfa88d5e57ca0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2019~2999 - Cologler <skyoflw@gmail.com>
# ----------
#
# ----------
from typing import List, Tuple
from dataclasses import dataclass, asdict
from dataclasses_fromdict import from_dict
@dataclass
class Point:
x: int
y: int
@dataclass
class PointList:
items: List[Point]
@dataclass
class Line:
items: Tuple[Point, Point]
@dataclass
class Points:
items: Tuple[Point, ...]
def test_simple():
p = Point(10, 20)
d = asdict(p)
assert d == {'x': 10, 'y': 20}
assert from_dict(d, Point) == p
def test_generic_list():
pl = PointList([Point(0, 0), Point(10, 4)])
d = asdict(pl)
assert d == {'items': [{'x': 0, 'y': 0}, {'x': 10, 'y': 4}]}
assert from_dict(d, PointList) == pl
def test_generic_tuple():
l = Line((Point(0, 0), Point(10, 4)))
d = asdict(l)
assert d == {'items':({'x': 0, 'y': 0}, {'x': 10, 'y': 4})}
assert from_dict(d, Line) == l
def test_generic_vartuple():
ps = Points((Point(0, 0), Point(10, 4), Point(20, 8)))
d = asdict(ps)
assert d == {'items':({'x': 0, 'y': 0}, {'x': 10, 'y': 4}, {'x': 20, 'y': 8})}
assert from_dict(d, Points) == ps
| 20.877193 | 82 | 0.557143 |
a4989780b83a2e0446ec922fb67728a761d51bcf | 11,629 | py | Python | src/m5_more_sequences.py | smileyle/12-MoreSequences | e80814b5275ad7fcea934b5f86ab0f938bf10742 | [
"MIT"
] | null | null | null | src/m5_more_sequences.py | smileyle/12-MoreSequences | e80814b5275ad7fcea934b5f86ab0f938bf10742 | [
"MIT"
] | null | null | null | src/m5_more_sequences.py | smileyle/12-MoreSequences | e80814b5275ad7fcea934b5f86ab0f938bf10742 | [
"MIT"
] | null | null | null | """
This module lets you practice various patterns
for ITERATING through SEQUENCES, including:
-- Beginning to end
-- Other ranges (e.g., backwards and every-3rd-item)
-- The COUNT/SUM/etc pattern
-- The FIND pattern (via LINEAR SEARCH)
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and Lauren Smiley.
""" # Done: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
def main():
""" Calls the TEST functions in this module. """
run_test_sum_radii()
run_test_count_last_n_odds()
run_test_index_of_first_negative()
run_test_contains_an_a()
# ----------------------------------------------------------------------
# Many problems simply iterate (loop) through ALL of the sequence,
# as in the sum_radii problem below.
# ----------------------------------------------------------------------
def run_test_sum_radii():
""" Tests the sum_radii function. """
print()
print('--------------------------------------------------')
print('Testing the sum_radii function:')
print('--------------------------------------------------')
# Test 1 is ALREADY DONE (here).
print()
circle1 = rg.Circle(rg.Point(100, 100), 25)
circle2 = rg.Circle(rg.Point(100, 100), 50)
circle3 = rg.Circle(rg.Point(100, 100), 10)
expected = 85
seq = (circle1, circle2, circle3)
actual = sum_radii(seq)
print('Expected:', expected)
print('Actual: ', actual)
# Test 2 is ALREADY DONE (here).
print()
circle1 = rg.Circle(rg.Point(200, 20), 80)
circle2 = rg.Circle(rg.Point(300, 100), 60)
circle3 = rg.Circle(rg.Point(100, 150), 0)
circle4 = rg.Circle(rg.Point(0, 0), 30)
expected = 170
seq = (circle1, circle2, circle3, circle4)
actual = sum_radii(seq)
print('Expected:', expected)
print('Actual: ', actual)
def sum_radii(circles):
list = 0
for k in range(len(circles)):
list = list + circles[k].radius
return list
"""
What comes in:
-- a sequence of rg.Circle objects
What goes out:
Returns the sum of the radii of the given sequence of rg.Circles.
Side effects: None.
Example: If
circle1 = rg.Circle(rg.Point(999, 100), 25)
circle2 = rg.Circle(rg.Point(888, 200), 50)
circle3 = rg.Circle(rg.Point(777, 300), 10)
then sum_radii([circle1, circle2, circle3])
returns 25 + 50 + 10, which is 85.
Type hints:
:type circles: list | tuple of rg.Circle
:rtype: int | float
"""
# ------------------------------------------------------------------
# Done: 2. Implement and test this function.
# The testing code is already written for you (above).
#
# Note: No fair using "slices" on ANY of these problems,
# if you happen to know what they are.
#
# Likewise, no fair using any builtin methods on sequences
# or strings, if you happen to know any.
#
# Instead, use explicit loops, as you have for other problems.
# ------------------------------------------------------------------
# ----------------------------------------------------------------------
# Some problems iterate (loop) through PART of the sequence,
# perhaps BACKWARDS, as in the count_last_n_odds problem below.
# ----------------------------------------------------------------------
def run_test_count_last_n_odds():
""" Tests the count_last_n_odds function. """
print()
print('--------------------------------------------------')
print('Testing the count_last_n_odds function:')
print('--------------------------------------------------')
# Six tests - ALREADY DONE (here).
seq = [1, 5, 88, 44, 33, 77, 10, 12, 9]
answer1 = count_last_n_odds(seq, 0)
answer2 = count_last_n_odds(seq, 1)
answer3 = count_last_n_odds(seq, 6)
answer4 = count_last_n_odds(seq, 7)
answer5 = count_last_n_odds(seq, 8)
answer6 = count_last_n_odds(seq, 9)
print()
print('Test set #1 of count_last_n_odds:',
answer1, answer2, answer3, answer4, answer5, answer6)
print('The above should be: 0 1 3 3 4 5')
# Six more tests - ALREADY DONE (here).
seq = [17, 88, -5, -10, 0]
answer1 = count_last_n_odds(seq, 0)
answer2 = count_last_n_odds(seq, 1)
answer3 = count_last_n_odds(seq, 2)
answer4 = count_last_n_odds(seq, 3)
answer5 = count_last_n_odds(seq, 4)
answer6 = count_last_n_odds(seq, 5)
print()
print('Test set #2 of count_last_n_odds:',
answer1, answer2, answer3, answer4, answer5, answer6)
print('The above should be: 0 0 0 1 1 2')
def count_last_n_odds(integers, n):
count = 0
if n <= len(integers):
for k in range(len(integers)-1, len(integers)-n-1, -1):
if integers[k] % 2 == 1:
count = count + 1
return count
"""
What comes in:
-- a sequence of integers
-- a non-negative integer n that is less than or equal to
the length of the given sequence
What goes out: Returns the number of odd integers
in the last n items of the given sequence.
Side effects: None.
Examples:
If the sequence is (13, 66, 15, 3), then:
count_last_n_odds(sequence, 0) is 0 [no odds]
count_last_n_odds(sequence, 1) is 1 [1 odd, namely 3]
count_last_n_odds(sequence, 2) is 2 [2 odds, namely 3 and 15]
count_last_n_odds(sequence, 3) is 2 [2 odds, namely 3 and 15]
count_last_n_odds(sequence, 4) is 3 [3 odds: 3, 15 and 13]
Type hints:
:type integers: list | tuple of int
:type n: int
:rtype: int
"""
# ------------------------------------------------------------------
# Done: 3. Implement and test this function.
# The testing code is already written for you (above).
# ------------------------------------------------------------------
# ----------------------------------------------------------------------
# Some problems iterate (loop) through PART of the sequence,
# stopping when the loop FINDS something of interest
# (or continuing to the end if it does NOT find the thing of interest),
# as in the following problems:
# ----------------------------------------------------------------------
def run_test_index_of_first_negative():
""" Tests the index_of_first_negative function. """
print()
print('--------------------------------------------------')
print('Testing the index_of_first_negative function:')
print('--------------------------------------------------')
# Test 1:
print()
expected = 3
actual = index_of_first_negative([90, 0, 20, -5, 30, -10, 15])
print('Expected:', expected)
print('Actual: ', actual)
# Test 2:
print()
expected = 0
actual = index_of_first_negative([-5, 30, -10, 15])
print('Expected:', expected)
print('Actual: ', actual)
# Test 3:
print()
expected = 4
actual = index_of_first_negative([5, 30, 10, 15, -1])
print('Expected:', expected)
print('Actual: ', actual)
# Test 4:
print()
expected = -1
actual = index_of_first_negative([5, 30, 10, 15, 1, 6])
print('Expected:', expected)
print('Actual: ', actual)
if actual == '-1':
print(' Your answer is WRONG.')
print(' You returned the STRING \'-1\'')
print(' when you should have returned just -1')
def index_of_first_negative(numbers):
for k in range(len(numbers)):
if numbers[k] < 0:
return k
return -1
"""
What comes in:
-- a sequence of numbers
What goes out: Returns the INDEX of the first negative number
in the given sequence of numbers, or -1 if the sequence
contains no negative numbers.
Note: "first" negative number means the negative number
whose index is smallest -- see the examples.
Side effects: None.
Examples: If the argument is:
-- [4, 30, -19, 8, -3, -50, 100], this function returns 2
since the first negative number is -19, which is at index 2
-- [-8, 44, 33], this function returns 0
since the first negative number is -8, which is at index 0
-- [1, 29, 22, 8], this function returns -1
since the list contains no negative numbers
Type hints:
:type numbers: list | tuple of float | int
:rtype: int
"""
# ------------------------------------------------------------------
# Done: 4. Implement and test this function.
# The testing code is already written for you (above).
# ------------------------------------------------------------------
def run_test_contains_an_a():
""" Tests the contains_an_a function. """
print()
print('--------------------------------------------------')
print('Testing the contains_an_a function:')
print('--------------------------------------------------')
# Tests:
actual1 = contains_an_a('nope')
actual2 = contains_an_a('yes a is here')
actual3 = contains_an_a('many aaaaas aaa aaa')
actual4 = contains_an_a('not until the very end is a')
actual5 = contains_an_a('a @ the beginning')
actual6 = contains_an_a('')
actual7 = contains_an_a('BLAH BLAH BLAH')
actual8 = contains_an_a('BLAH BLAH BLAH \t MORE BLAH')
actual9 = contains_an_a('BLAH BLAH BLAH \t MORE BLaH')
actuals = (actual1, actual2, actual3, actual4, actual5, actual6,
actual7, actual8, actual9)
expecteds = (False, True, True, True, True, False,
False, False, True)
for k in range(len(actuals)):
print()
print('Expected:', expecteds[k])
print('Actual: ', actuals[k])
if type(actuals[k]) is str and str(expecteds[k]) == actuals[k]:
print('Your code FAILED this test for contains_an_a.')
print(' You appear to have returned the STRING:')
print(' "' + actuals[k] + '"')
print(' instead of the built-in constant:')
print(' ' + str(expecteds[k]))
def contains_an_a(s):
"""
What comes in:
-- a string
What goes out: Returns True if the given string contains
the character 'a'. Returns False if the given string
does not contain the character 'a'.
Side effects: None.
Examples:
-- contains_an_a('blah blah blah') returns True
-- contains_an_a('BLAH BLAH BLAH') returns False
-- contains_an_a('abc') returns True
-- contains_an_a('') returns False
Type hints:
:type s: str
:rtype: bool
"""
# ------------------------------------------------------------------
# TODO: 5. Implement and test this function.
# The testing code is already written for you (above).
#
####################################################################
# IMPORTANT:
# -- True and False are built-in constants.
# Do NOT return the STRINGs 'True' and 'False'.
####################################################################
#
# Implementation requirement:
# Use an explicit loop, as you have done in the other problems.
# No fair using the count or find string methods.
# ------------------------------------------------------------------
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 36.227414 | 72 | 0.524895 |
89d2a44563c4aef42933c44e98f1a0572e29cf58 | 329 | py | Python | django_learning/dim_sum/migrations/0006_remove_dimsumitem_tasty.py | angelptli/django-learning | f0d25e4b85526c2c092c4e4d2f108e7602db7f0d | [
"MIT"
] | null | null | null | django_learning/dim_sum/migrations/0006_remove_dimsumitem_tasty.py | angelptli/django-learning | f0d25e4b85526c2c092c4e4d2f108e7602db7f0d | [
"MIT"
] | null | null | null | django_learning/dim_sum/migrations/0006_remove_dimsumitem_tasty.py | angelptli/django-learning | f0d25e4b85526c2c092c4e4d2f108e7602db7f0d | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-10-24 04:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dim_sum', '0005_dimsumitem_tasty'),
]
operations = [
migrations.RemoveField(
model_name='dimsumitem',
name='tasty',
),
]
| 18.277778 | 47 | 0.592705 |
09d7e262e7a785b0aca86edb3a50b85d807f3ca5 | 2,429 | py | Python | references/encase/code/OptF.py | wenh06/cinc2020 | b3757f54df86c8470e8f22f3399b4aecd64dd5d1 | [
"BSD-2-Clause"
] | 4 | 2020-10-31T07:02:37.000Z | 2021-05-24T08:11:35.000Z | references/encase/code/OptF.py | DeepPSP/cinc2020 | 38105ed9dac6554e2dd51b94e5553fb8ba22dbe6 | [
"BSD-2-Clause"
] | null | null | null | references/encase/code/OptF.py | DeepPSP/cinc2020 | 38105ed9dac6554e2dd51b94e5553fb8ba22dbe6 | [
"BSD-2-Clause"
] | 1 | 2021-05-25T14:54:31.000Z | 2021-05-25T14:54:31.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 17 15:06:35 2017
@author: v-shon
"""
import numpy as np
import sklearn
from collections import Counter
class OptF(object):
def __init__(self, alpha=0.5, epochs=100):
self.alpha = alpha
self.epochs = epochs
self.theta = None
def gradTheta(self, theta, train_data, train_label):
"""
Jansche, Martin. EMNLP 2005
"Maximum expected F-measure training of logistic regression models."
must be normalized first
"""
n_row, n_col = train_data.shape
m = 0.0
A = 0.0
dm = np.zeros([n_col, 1])
dA = np.zeros([n_col, 1])
p = np.zeros([n_row, 1])
p = 1.0 / (1.0 + np.exp(-np.dot(train_data, theta)))
m = sum(p)
A = sum(p * train_label)
dm = np.dot(np.transpose(train_data), p * (1 - p))
dA = np.dot(np.transpose(train_data), p * (1 - p) * train_label)
n_pos = sum(train_label)
h = 1 / (self.alpha * n_pos + (1 - self.alpha) * m)
F = h * A
t = F * (1 - self.alpha)
dF = h * (dA - t * dm)
return F, dF
def fit(self, train_data, train_label):
train_feature = sklearn.preprocessing.scale(train_data, axis=0)
n_row, n_col = train_feature.shape
train_feature = np.c_[np.ones([n_row, 1]), train_feature]
train_label = np.expand_dims(np.array(train_label), axis=1)
self.theta = np.random.rand(n_col+1, 1)
for epoch in range(self.epochs):
F, dF = self.gradTheta(self.theta, train_feature, train_label)
self.theta = self.theta + dF
def predict_prob(self, test_data):
test_data = np.array(test_data)
if test_data.ndim == 1:
test_data = np.expand_dims(test_data, axis=0)
test_feature = test_data
n_row, n_col = test_feature.shape
test_feature = np.c_[np.ones([n_row, 1]), test_feature]
# print(test_feature)
z = np.dot(test_feature, self.theta)
gz = 1 / (1 + np.exp(-z))
return gz
def predict(self, test_data):
gz = self.predict_prob(test_data)
out = []
for prob in gz:
if prob > 0.5:
out.append(1)
else:
out.append(0)
return out
| 25.041237 | 77 | 0.533141 |
7c8d770d3a59c777f99af1fdd6a052a54f99049f | 1,537 | py | Python | run_thingview.py | sourceperl/iot.things.srv | e11a57522254c32ceb8d76fcd61947609238b88f | [
"MIT"
] | null | null | null | run_thingview.py | sourceperl/iot.things.srv | e11a57522254c32ceb8d76fcd61947609238b88f | [
"MIT"
] | null | null | null | run_thingview.py | sourceperl/iot.things.srv | e11a57522254c32ceb8d76fcd61947609238b88f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from thingview import app
import argparse
import os
if __name__ == '__main__':
# parse argument
parser = argparse.ArgumentParser(description='ThingView server')
parser.add_argument('-b', '--bind', type=str, default='0.0.0.0',
help='bind address (default is "0.0.0.0")')
parser.add_argument('-p', '--port', type=int, default=8080,
help='listen port')
parser.add_argument('-k', '--secret-key', type=str, default=None,
help='secret key')
parser.add_argument('-d', '--debug', action='store_true',
help='set debug mode')
args = parser.parse_args()
# flask session secret-key
if args.secret_key:
app.secret_key = args.secret_key
# auto-reload when templates dir is update
if args.debug:
extra_dirs = ['./thingview/templates',]
extra_files = extra_dirs[:]
for extra_dir in extra_dirs:
for dirname, dirs, files in os.walk(extra_dir):
for filename in files:
filename = os.path.join(dirname, filename)
if os.path.isfile(filename):
extra_files.append(filename)
else:
extra_files = []
# define SSL
ssl_ctx = ('/etc/iot.things.srv/ssl.cert', '/etc/iot.things.srv/ssl.key')
# start flask with auto-reload
app.run(host=args.bind, port=args.port, ssl_context=ssl_ctx, extra_files=extra_files, debug=args.debug)
| 38.425 | 107 | 0.593364 |
ce292420cbea8d9eae47ec1c9b212216c3975a21 | 275 | py | Python | notification/context_processors.py | alexhayes/lehins-notification | ec61fd10c708fe740639be0134ef94f012caf842 | [
"MIT"
] | null | null | null | notification/context_processors.py | alexhayes/lehins-notification | ec61fd10c708fe740639be0134ef94f012caf842 | [
"MIT"
] | null | null | null | notification/context_processors.py | alexhayes/lehins-notification | ec61fd10c708fe740639be0134ef94f012caf842 | [
"MIT"
] | null | null | null | from notification.models import Notice
def notification(request):
if request.user.is_authenticated():
return {
'notice_unseen_count': Notice.objects.unseen_count_for(
request.user, on_site=True),
}
else:
return {}
| 25 | 67 | 0.621818 |
3bbf74ea908da770960c8b4e1418d1e9f61749fc | 28,340 | py | Python | kd_splicing/kd_splicing/features.py | konovalovdmitry/catsnap | d5f1d7c37dcee1ad3fee2cdc12a3b44b56f4c63f | [
"MIT"
] | null | null | null | kd_splicing/kd_splicing/features.py | konovalovdmitry/catsnap | d5f1d7c37dcee1ad3fee2cdc12a3b44b56f4c63f | [
"MIT"
] | null | null | null | kd_splicing/kd_splicing/features.py | konovalovdmitry/catsnap | d5f1d7c37dcee1ad3fee2cdc12a3b44b56f4c63f | [
"MIT"
] | 1 | 2021-09-30T08:06:20.000Z | 2021-09-30T08:06:20.000Z | from __future__ import annotations
import itertools
import multiprocessing
import os
import pickle
import string
import uuid
from collections import defaultdict
from dataclasses import dataclass, field
from functools import partial
from itertools import chain
from multiprocessing import get_context
from typing import Dict, Iterable, List, Mapping, Optional, Tuple, Any
from copy import copy
from Bio.SubsMat import MatrixInfo
from tqdm import tqdm
from kd_common import logutil, pathutil
from kd_splicing import blast, database, location, ml, models, paths
from kd_splicing.location.models import ConvertSegment, Location, LocationPart
from kd_splicing.location.utils import (union, convert_location,
get_alignment_segments, intersection,
merge, nucleotide_to_amino,
relative_to_alignment,
relative_to_location,
symmetric_difference)
from kd_splicing.models import IsoformTuple, Match, Queries, SimpleMatch
_logger = logutil.get_logger(__name__)
_SCORE_TABLE = {
**{letter: 1 for letter in string.ascii_uppercase},
"+": 1,
}
def _build_matrix() -> Mapping[Tuple[str, str], float]:
result: Dict[Tuple[str, str], float] = {}
for tup, score in MatrixInfo.blosum30.items():
if score < 0:
score = 0
result[(tup[0], tup[1])] = score
result[(tup[1], tup[0])] = score
return result
_SCORE_MATRIX = _build_matrix()
def count_all_matches(midline: str) -> float:
count = 0.
for m in midline:
count += _SCORE_TABLE.get(m, 0)
return count
def count_matches(loc: Location, midline: str) -> float:
count = 0.
for part in loc.parts:
for i in range(part.start, part.end):
if i >= 0 and i < len(midline):
count += _SCORE_TABLE.get(midline[i], 0)
return count
def evaluate_score(loc: Location, seq_a: str, seq_b: str, open_gap: float = 0, extend_gap: float = 0,
penalize_extend_when_opening: bool=False) -> float:
openA = False
openB = False
if penalize_extend_when_opening:
open_gap += extend_gap
score = .0
for part in loc.parts:
for i in range(part.start, part.end):
if not(i >= 0 and i < len(seq_a)):
continue
char_a = seq_a[i]
char_b = seq_b[i]
if char_a !='-' and char_b != '-':
openA, openB = False, False
score += _SCORE_MATRIX[(char_a, char_b)]
elif char_a == '-':
if not openA:
score += open_gap
openA = True
openB = False
else:
score += extend_gap
elif char_b == '-':
if not openB:
score += open_gap
openB = True
openA = False
else:
score += extend_gap
return score
def convert_splicing(splicing: Location, loc: Location, translation_len: int) -> Location:
converted = relative_to_location(splicing, loc)
converted = nucleotide_to_amino(converted)
converted = merge(converted)
return converted
@dataclass
class CalcMatchContext:
hit_a: blast.Hit
hit_b: blast.Hit
iso_a: uuid.UUID
iso_b: uuid.UUID
splicing_a: Location
splicing_b: Location
debug: bool = False
@dataclass
class CalcQuery:
iso_a: uuid.UUID
iso_a_len: int
iso_a_location: database.models.Location
iso_b: uuid.UUID
iso_b_location: database.models.Location
iso_b_len: int
organism: str
@dataclass
class CalcQueriesContext:
queries: List[CalcQuery]
iso_to_hits: Mapping[uuid.UUID, List[blast.Hit]]
q_iso_to_gene: Mapping[uuid.UUID, uuid.UUID]
hit_tuple: Optional[IsoformTuple] = None
debug: bool = False
@dataclass
class CalcBatch:
result_path: str
ctx: CalcQueriesContext
def _parallel_results_folder(launch_folder: str) -> str:
return pathutil.create_folder(launch_folder, "parallel_results")
def _extend_splicing(loc: Location) -> None:
if not loc.parts:
return
strand = loc.parts[0].strand
a_splicing_length = 0
b_splicing_length = 0
for part in loc.parts[::strand]:
if len(part.data["source"]) == 1:
if "a" in part.data['source']:
a_splicing_length += part.length()
else:
b_splicing_length += part.length()
part.data["is_splicing"] = True
elif a_splicing_length % 3 != b_splicing_length % 3:
part.data["is_splicing"] = True
a_splicing_length += part.length()
b_splicing_length += part.length()
else:
part.data["is_splicing"] = False
def _add_directed_event_ids(loc: Location, name: str, direction: int) -> None:
if not loc.parts:
return
event_id = 0
is_prev_splicing = True
strand = loc.parts[0].strand if loc.parts[0].strand is not None else 1
for part in loc.parts[::direction * strand]:
if not is_prev_splicing and part.data["is_splicing"]:
event_id += 1
if part.data["is_splicing"]:
is_prev_splicing = True
part.data[name] = event_id
else:
is_prev_splicing = False
def _add_event_ids(loc: Location) -> None:
if not loc.parts:
return
_add_directed_event_ids(loc, "event_id_forward", 1)
_add_directed_event_ids(loc, "event_id_backward", -1)
for part in loc.parts:
if part.data["is_splicing"]:
part.data["event_id"] = part.data["event_id_forward"] + 100 * part.data["event_id_backward"]
def _get_splicings_from_union(loc_union: Location) -> Tuple[Location, Location]:
a = Location()
b = Location()
for union_part in loc_union.parts:
if union_part.data["is_splicing"] and "a" in union_part.data["source"]:
a.parts.append(copy(union_part))
if union_part.data["is_splicing"] and "b" in union_part.data["source"]:
b.parts.append(copy(union_part))
return a, b
def _get_splicing(a: Location, b:Location) -> Tuple[Location, Location]:
loc_union = union(a, b)
_extend_splicing(loc_union)
_add_event_ids(loc_union)
return _get_splicings_from_union(loc_union)
def _loc_per_event(loc: Location, name: str) -> Mapping[int, Location]:
result: Dict[int, Location] = defaultdict(Location)
for p in loc.parts:
result[p.data[name]].parts.append(p)
return result
def _splicing_intersection_length_weight(length: int) -> float:
_MIN_LENGTH = 20
_MIN_W = 1
_MAX_LENGTH = 100
_MAX_W = 0.3
if length < _MIN_LENGTH:
return _MIN_W
if length > _MAX_LENGTH:
return _MAX_W
x = (length - _MIN_LENGTH) / (_MAX_LENGTH - _MIN_LENGTH)
return x * x * (_MAX_W - _MIN_W) + _MIN_W
def _normalize_length(length: int) -> float:
_MIN_LENGTH = 10
_MIN_W = 1
_MAX_LENGTH = 100
_MAX_W = 0.3
if length < _MIN_LENGTH:
return length / _MIN_W
if length > _MAX_LENGTH:
return length / _MAX_W
x = float(length - _MIN_LENGTH) / (_MAX_LENGTH - _MIN_LENGTH)
w = x * (_MAX_W - _MIN_W) + _MIN_W
return length / w
def _calc_splicing_difference_per_event(
m: Match,
query_splicing_a: Location,
query_splicing_b: Location,
hit_splicing_a: Location,
hit_splicing_b: Location,
symmetric_difference_a: Location,
symmetric_difference_b: Location,
debug: bool
) -> None:
if debug:
print("_calc_splicing_difference_per_event")
print("aligned_query_splicing_a", query_splicing_a)
print("aligned_query_splicing_b", query_splicing_b)
print("aligned_hit_splicing_a", hit_splicing_a)
print("aligned_hit_splicing_b", hit_splicing_b)
query_a_splicing_loc = _loc_per_event(query_splicing_a, "event_id")
query_b_splicing_loc = _loc_per_event(query_splicing_b, "event_id")
hit_a_splicing_loc = _loc_per_event(hit_splicing_a, "event_id")
hit_b_splicing_loc = _loc_per_event(hit_splicing_b, "event_id")
events = query_a_splicing_loc.keys() | query_b_splicing_loc.keys() | hit_a_splicing_loc.keys() | hit_b_splicing_loc.keys()
if not events:
m.splicing_difference = 1
return
m.splicing_difference = 0
for event_id in events:
query_a_event_splicing = query_a_splicing_loc[event_id]
query_b_event_splicing = query_b_splicing_loc[event_id]
hit_a_event_splicing = hit_a_splicing_loc[event_id]
hit_b_event_splicing = hit_b_splicing_loc[event_id]
a_splicing_length = query_a_event_splicing.length() + hit_a_event_splicing.length()
b_splicing_length = query_b_event_splicing.length() + hit_b_event_splicing.length()
splicing_length = max(a_splicing_length, b_splicing_length)
normalized_splicing_length = splicing_length
a_event_difference = symmetric_difference(query_a_event_splicing, hit_a_event_splicing)
b_event_difference = symmetric_difference(query_b_event_splicing, hit_b_event_splicing)
a_difference = a_event_difference.length() / normalized_splicing_length if normalized_splicing_length else 1
b_difference = b_event_difference.length() / normalized_splicing_length if normalized_splicing_length else 1
m.splicing_difference = max(m.splicing_difference, a_difference + b_difference)
if debug:
print()
print("Event id: ", event_id)
print("_calc_splicing_difference_per_event")
print("query_a_event_splicing", query_a_event_splicing)
print("query_b_event_splicing", query_b_event_splicing)
print("hit_a_event_splicing", hit_a_event_splicing)
print("hit_b_event_splicing", hit_b_event_splicing)
print("a_event_difference", a_event_difference)
print("b_event_difference", b_event_difference)
print("a_difference", a_difference)
print("b_difference", b_difference)
print("normalized_splicing_length", normalized_splicing_length)
###############
# Calc
###############
def calc_single(ctx: CalcMatchContext) -> Match:
match = Match(
query_isoforms=IsoformTuple(ctx.iso_a, ctx.iso_b),
hit_isoforms=IsoformTuple(ctx.hit_a.iso_uuid, ctx.hit_b.iso_uuid),
hit_organism=ctx.hit_a.organism,
hit_db_name=ctx.hit_a.db_name,
)
hit_global_splicing_a, hit_global_splicing_b = _get_splicing(
ctx.hit_a.iso_location, ctx.hit_b.iso_location)
hit_splicing_a = convert_splicing(
hit_global_splicing_a, ctx.hit_a.iso_location, ctx.hit_a.iso_len)
hit_splicing_b = convert_splicing(
hit_global_splicing_b, ctx.hit_b.iso_location, ctx.hit_b.iso_len)
aligned_splicing_a = convert_location(
ctx.splicing_a, ctx.hit_a.query_segments)
aligned_hit_splicing_a = convert_location(
hit_splicing_a, ctx.hit_a.hit_segments)
aligned_splicing_b = convert_location(
ctx.splicing_b, ctx.hit_b.query_segments)
aligned_hit_splicing_b = convert_location(
hit_splicing_b, ctx.hit_b.hit_segments)
splicing_intersection_a = intersection(
aligned_splicing_a, aligned_hit_splicing_a)
splicing_intersection_b = intersection(
aligned_splicing_b, aligned_hit_splicing_b)
splicing_intersection_a_length = splicing_intersection_a.length()
splicing_intersection_b_length = splicing_intersection_b.length()
########################
# Splicing difference
########################
splicing_symmetric_difference_a = symmetric_difference(
aligned_splicing_a, aligned_hit_splicing_a)
splicing_symmetric_difference_b = symmetric_difference(
aligned_splicing_b, aligned_hit_splicing_b)
if ctx.debug:
print("<---------------------------------->")
print("ctx.hit_a.iso_location", ctx.hit_a.iso_location)
print("ctx.hit_b.iso_location", ctx.hit_b.iso_location)
print("hit_global_splicing_a", hit_global_splicing_a)
print("hit_global_splicing_b", hit_global_splicing_b)
print("hit_splicing_a", hit_splicing_a)
print("hit_splicing_b", hit_splicing_b)
_calc_splicing_difference_per_event(
m=match,
query_splicing_a=aligned_splicing_a,
query_splicing_b=aligned_splicing_b,
hit_splicing_a=aligned_hit_splicing_a,
hit_splicing_b=aligned_hit_splicing_b,
symmetric_difference_a=splicing_symmetric_difference_a,
symmetric_difference_b=splicing_symmetric_difference_b,
debug=ctx.debug,
)
########################
# Splicing Similarity
########################
# splicing_score_a = evaluate_score(splicing_intersection_a, ctx.hit_a.qseq, ctx.hit_a.hseq)
# splicing_score_b = evaluate_score(splicing_intersection_b, ctx.hit_b.qseq, ctx.hit_b.hseq)
query_splicing_length_a = aligned_splicing_a.length()
hit_splicing_length_a = aligned_hit_splicing_a.length()
max_splicing_length_a = max(query_splicing_length_a, hit_splicing_length_a)
query_splicing_length_b = aligned_splicing_b.length()
hit_splicing_length_b = aligned_hit_splicing_b.length()
max_splicing_length_b = max(query_splicing_length_b, hit_splicing_length_b)
max_splicing_length = max(max_splicing_length_a, max_splicing_length_b)
normalized_max_splicing_length = _normalize_length(max_splicing_length)
max_intersection_length = max(splicing_intersection_a_length, splicing_intersection_b_length)
normalized_max_intersection_length = _normalize_length(max_intersection_length)
splicing_score_a = count_matches(splicing_intersection_a, ctx.hit_a.midline)
splicing_dissimilarity_a = (max_splicing_length_a - splicing_score_a) / normalized_max_splicing_length \
if normalized_max_splicing_length else 0
splicing_score_b = count_matches(splicing_intersection_b, ctx.hit_b.midline)
splicing_dissimilarity_b = (max_splicing_length_b - splicing_score_b) / normalized_max_splicing_length \
if normalized_max_splicing_length else 0
splicing_intersection_length = splicing_intersection_a_length + splicing_intersection_b_length
normalized_length = _splicing_intersection_length_weight(splicing_intersection_length) * splicing_intersection_length
match.splicing_similarity = (splicing_score_a + splicing_score_b) / normalized_length \
if normalized_length else 0
match.splicing_dissimilarity = splicing_dissimilarity_a + splicing_dissimilarity_b
if ctx.debug:
print("<----------->")
print("Splicing similarity")
print("ctx.hit_a.qseq :", ctx.hit_a.qseq)
print("ctx.hit_a.midline:", ctx.hit_a.midline)
print("ctx.hit_a.hseq :", ctx.hit_a.hseq)
print("ctx.hit_a.hit_from: ", ctx.hit_a.hit_from)
print("splicing_score_a :", splicing_score_a)
print("splicing_intersection_a_length", splicing_intersection_a_length)
print("max_splicing_length_a", max_splicing_length_a)
print("normalized_max_splicing_length", normalized_max_splicing_length)
print("splicing_dissimilarity_a", splicing_dissimilarity_a)
print("splicing_intersection_a", splicing_intersection_a)
print()
print("ctx.hit_b.qseq :", ctx.hit_b.qseq)
print("ctx.hit_b.midline:", ctx.hit_b.midline)
print("ctx.hit_b.hseq :", ctx.hit_b.hseq)
print("ctx.hit_b.hit_from: ", ctx.hit_b.hit_from)
print("splicing_score_b :", splicing_score_b)
print("splicing_intersection_b_length", splicing_intersection_b_length)
print("max_splicing_length_b", max_splicing_length_b)
print("normalized_max_splicing_length", normalized_max_splicing_length)
print("splicing_dissimilarity_b", splicing_dissimilarity_b)
print("splicing_intersection_b", splicing_intersection_b)
print()
print("splicing_intersection_length", splicing_intersection_length)
print("normalized_length", normalized_length)
print("splicing_similarity", match.splicing_similarity)
print("splicing_dissimilarity", match.splicing_dissimilarity)
########################
# isoform blast score
########################
# match.isoform_blast_score = (ctx.hit_a.score + ctx.hit_b.score) / (ctx.hit_a.query_len + ctx.hit_b.query_len + ctx.hit_a.iso_len + ctx.hit_b.iso_len)
match_score_a = count_all_matches(ctx.hit_a.midline)
match_score_b = count_all_matches(ctx.hit_b.midline)
match.isoform_blast_score = (match_score_a + match_score_b) / (ctx.hit_a.query_len + ctx.hit_b.query_len + ctx.hit_a.iso_len + ctx.hit_b.iso_len)
if ctx.debug:
print("ctx.hit_a.score", ctx.hit_a.score)
print("match_score_a", match_score_a)
print("ctx.hit_a.query_len", ctx.hit_a.query_len)
print("ctx.hit_a.iso_len", ctx.hit_a.iso_len)
print("ctx.hit_b.score", ctx.hit_b.score)
print("match_score_b", match_score_b)
print("ctx.hit_b.query_len", ctx.hit_b.query_len)
print("ctx.hit_b.iso_len", ctx.hit_b.iso_len)
print("match.isoform_blast_score", match.isoform_blast_score)
return match
def prepare_calc_queries(db: database.models.DB, launch_folder: str, query_ctx: Queries, query_tuples: List[IsoformTuple], ) -> CalcQueriesContext:
isoforms = list(set(chain.from_iterable(
(query_isoforms.a, query_isoforms.b)
for query_isoforms in query_tuples
)))
iso_to_hits = {}
for iso_uuid in isoforms:
iso = db.isoforms[iso_uuid]
gene = db.genes[iso.gene_uuid]
record = db.records[gene.record_uuid]
iso_to_hits[iso_uuid] = blast.get_results(
db,
launch_folder,
query_len=len(iso.translation),
result_file=query_ctx.isoform_to_file[iso_uuid],
query_organism=record.organism,
)
queries = []
for query in query_tuples:
iso_a = db.isoforms[query.a]
iso_b = db.isoforms[query.b]
gene = db.genes[iso_a.gene_uuid]
record = db.records[gene.record_uuid]
queries.append(CalcQuery(
iso_a=iso_a.uuid,
iso_a_location=iso_a.location,
iso_a_len=len(iso_a.translation),
iso_b=iso_b.uuid,
iso_b_location=iso_b.location,
iso_b_len=len(iso_b.translation),
organism=record.organism,
))
q_iso_to_gene = {iso_uuid:db.isoforms[iso_uuid].gene_uuid for iso_uuid in isoforms}
return CalcQueriesContext(
queries=queries,
iso_to_hits=iso_to_hits,
q_iso_to_gene=q_iso_to_gene,
)
def calc_queries(ctx: CalcQueriesContext, use_tqdm: bool = True) -> List[Match]:
if ctx.debug:
with open(os.path.join(paths.FOLDER_DATA, "calc_queries_ctx.pkl"), "wb") as f:
pickle.dump(ctx, f)
hit_groups: Iterable[List[blast.Hit]] = tqdm(
ctx.iso_to_hits.values(), desc="get_alignment_segments for hits") if use_tqdm else ctx.iso_to_hits.values()
for hits in hit_groups:
for hit in hits:
hit.query_segments = get_alignment_segments(
hit.qseq, hit.query_from, hit.query_len)
hit.hit_segments = get_alignment_segments(
hit.hseq, hit.hit_from, hit.iso_len)
matches = []
queries: List[CalcQuery] = tqdm(
ctx.queries, desc="calc_queires") if use_tqdm else ctx.queries
for query in queries:
query_union = union(query.iso_a_location, query.iso_b_location)
_extend_splicing(query_union)
_add_event_ids(query_union)
query_splicing_global_a, query_splicing_global_b = _get_splicings_from_union(query_union)
hits_a = ctx.iso_to_hits[query.iso_a]
hits_b = ctx.iso_to_hits[query.iso_b]
splicing_a = convert_splicing(
query_splicing_global_a, query.iso_a_location, query.iso_a_len)
splicing_b = convert_splicing(
query_splicing_global_b, query.iso_b_location, query.iso_b_len)
if ctx.debug:
print("<------------->")
print("query.iso_a_location", query.iso_a_location)
print("query.iso_b_location", query.iso_b_location)
print("query_union", query_union)
print("query_splicing_global_a", query_splicing_global_a)
print("query_splicing_global_b", query_splicing_global_b)
print("splicing_a", splicing_a)
print("splicing_b", splicing_b)
gene_to_hits_b: Dict[uuid.UUID, List[blast.Hit]] = defaultdict(list)
for h in hits_b:
gene_to_hits_b[h.iso_gene_uuid].append(h)
for hit_a in hits_a:
# if hit_a.organism == query.organism:
# continue
hits_b_from_same_gene = gene_to_hits_b.get(hit_a.iso_gene_uuid, [])
for hit_b in hits_b_from_same_gene:
if ctx.hit_tuple and not (ctx.hit_tuple.a == hit_a.iso_uuid and ctx.hit_tuple.b == hit_b.iso_uuid):
continue
if hit_b.iso_uuid == hit_a.iso_uuid:
continue
m = calc_single(CalcMatchContext(
hit_a=hit_a,
hit_b=hit_b,
iso_a=query.iso_a,
iso_b=query.iso_b,
splicing_a=splicing_a,
splicing_b=splicing_b,
debug=ctx.debug,
))
matches.append(m)
return matches
def calc(
db: database.models.DB,
launch_folder: str,
queries: Queries,
query_tuples: Optional[List[IsoformTuple]] = None,
hit_tuple: Optional[IsoformTuple] = None,
debug: bool = False,
) -> List[Match]:
if query_tuples is None:
query_tuples = queries.tuples
ctx = prepare_calc_queries(db, launch_folder, queries, query_tuples,)
ctx.hit_tuple = hit_tuple
ctx.debug = debug
return calc_queries(ctx)
def _check_connections(two_level_dict: Dict[uuid.UUID, Dict[Any, uuid.UUID]], m: Match, iso_from: uuid.UUID, mid: Any, iso_to: uuid.UUID) -> None:
sub_dict = two_level_dict[iso_from]
saved_iso_to = sub_dict.get(mid)
if saved_iso_to is None:
sub_dict[mid] = iso_to
elif saved_iso_to != iso_to:
m.predicted_positive = False
def transform(ctx: CalcQueriesContext, matches: List[Match], detector: ml.Detector) -> List[Match]:
detector.transform(matches)
query_to_organism_to_match: Dict[IsoformTuple,
Dict[Tuple[str, str], Match]] = defaultdict(dict)
for m in matches:
organism_to_match = query_to_organism_to_match[m.query_isoforms]
key = m.hit_organism, m.hit_db_name
simple_match = organism_to_match.get(key)
if simple_match is None or simple_match.predicted_positive_probability < m.predicted_positive_probability:
organism_to_match[key] = m
matches = [
m
for organism_to_match in query_to_organism_to_match.values()
for m in organism_to_match.values()
]
matches.sort(key=lambda m: m.predicted_positive_probability, reverse=True)
q_iso_to_organism_h_iso: Dict[uuid.UUID, Dict[str, uuid.UUID]] = defaultdict(dict)
h_iso_to_q_gene_to_q_iso: Dict[uuid.UUID, Dict[uuid.UUID, uuid.UUID]] = defaultdict(dict)
for m in matches:
_check_connections(q_iso_to_organism_h_iso, m, m.query_isoforms.a, m.hit_organism, m.hit_isoforms.a)
_check_connections(q_iso_to_organism_h_iso, m, m.query_isoforms.b, m.hit_organism, m.hit_isoforms.b)
_check_connections(h_iso_to_q_gene_to_q_iso, m, m.hit_isoforms.a, ctx.q_iso_to_gene[m.query_isoforms.a], m.query_isoforms.a)
_check_connections(h_iso_to_q_gene_to_q_iso, m, m.hit_isoforms.b, ctx.q_iso_to_gene[m.query_isoforms.b], m.query_isoforms.b)
return matches
def calc_single_batch_parallel(batch: CalcBatch, detector: ml.Detector) -> None:
matches = calc_queries(batch.ctx, use_tqdm=False)
matches = transform(batch.ctx, matches, detector)
query_to_matches: Dict[IsoformTuple, List[SimpleMatch]] = defaultdict(list)
for m in matches:
query_to_matches[m.query_isoforms].append(SimpleMatch(
hit_isoforms=m.hit_isoforms,
predicted_positive=m.predicted_positive,
predicted_positive_probability=m.predicted_positive_probability,
))
with open(batch.result_path, "wb") as f:
pickle.dump(dict(query_to_matches), f,
protocol=pickle.HIGHEST_PROTOCOL)
def build_calc_batch_generator(
db: database.models.DB,
launch_folder: str,
queries: Queries,
query_tuples: List[IsoformTuple],
batch_size: int = 10,
) -> Iterable[CalcBatch]:
batch_idx = itertools.count()
results_folder = _parallel_results_folder(launch_folder)
gene_to_tuples: Dict[uuid.UUID, List[IsoformTuple]] = defaultdict(list)
for query_tuple in query_tuples:
gene_to_tuples[db.isoforms[query_tuple.a].gene_uuid].append(query_tuple)
tuple_groups = list(gene_to_tuples.values())
for i in range(0, len(tuple_groups), batch_size):
result_path = os.path.join(
results_folder, f"batch_{next(batch_idx)}.pkl")
if os.path.exists(result_path): continue
batch_tuples = [
query_tuple
for tuple_group in tuple_groups[i:i + batch_size]
for query_tuple in tuple_group
]
ctx = prepare_calc_queries(db, launch_folder, queries, batch_tuples)
yield CalcBatch(
result_path=result_path,
ctx=ctx,
)
def calc_batches(
db: database.models.DB,
launch_folder: str,
queries: Queries,
query_tuples: List[IsoformTuple],
detector: ml.Detector,
batch_size: int = 10
) -> None:
_logger.info("Start calc batches")
batches = build_calc_batch_generator(
db, launch_folder, queries, query_tuples)
for batch in tqdm(batches, total=len(query_tuples) / batch_size, desc="calc_batches"):
calc_single_batch_parallel(batch, detector=detector)
def calc_parallel(
db: database.models.DB,
launch_folder: str,
queries: Queries,
query_tuples: List[IsoformTuple],
detector: ml.Detector,
batch_size: int = 10
) -> None:
_logger.info("Start calc parallel")
pathutil.reset_folder(_parallel_results_folder(launch_folder))
generator = build_calc_batch_generator(
db, launch_folder, queries, query_tuples)
with get_context("spawn").Pool(19, maxtasksperchild=50) as p:
list(tqdm(p.imap_unordered(
partial(
calc_single_batch_parallel,
detector=detector,
),
generator,
chunksize=1,
), total=len(query_tuples) / batch_size))
###############
# Helpers
###############
def read_simple_matches(launch_folder: str) -> Mapping[IsoformTuple, List[SimpleMatch]]:
results_folder = _parallel_results_folder(launch_folder)
result: Dict[IsoformTuple, List[SimpleMatch]] = {}
for file_path in tqdm(pathutil.file_list(results_folder), desc="read_simple_matches"):
with open(file_path, "rb") as f:
result.update(pickle.load(f))
return result
def convert_matches(simple_matches: Dict[IsoformTuple, List[SimpleMatch]]) -> List[Match]:
result = []
for query_isoforms, matches in simple_matches.items():
for m in matches:
result.append(Match(
query_isoforms=query_isoforms,
hit_isoforms=m.hit_isoforms,
predicted_positive=m.predicted_positive,
predicted_positive_probability=m.predicted_positive_probability,
hit_organism="",
))
return result
if __name__ == "__main__":
with open(os.path.join(paths.FOLDER_DATA, "calc_queries_ctx.pkl"), "rb") as f:
ctx = pickle.load(f)
ctx.debug = True
calc_queries(ctx, use_tqdm=False) | 38.663029 | 155 | 0.669584 |
0871b143c0ff1017ee382c4bc7dad0e84be1b480 | 16,220 | py | Python | DSN/Malargue/__init__.py | SDRAST/Data_Reduction | f007d716b5c28c086910a81206cffaf37ff6368c | [
"Apache-2.0"
] | null | null | null | DSN/Malargue/__init__.py | SDRAST/Data_Reduction | f007d716b5c28c086910a81206cffaf37ff6368c | [
"Apache-2.0"
] | null | null | null | DSN/Malargue/__init__.py | SDRAST/Data_Reduction | f007d716b5c28c086910a81206cffaf37ff6368c | [
"Apache-2.0"
] | null | null | null | """
Data_Reduction.Malargue
=======================
Subclasses for reducing data taken with DSN-like open loop recorders.
Open-loop recorders are raw IF voltage recorders that are not synchronized with
the communications between the spacecraft and the ground station. As such, they
are the most basic kind of recorder possible in radio astronomy, equivalent to
VLBI recorders. Indeed, an early implementation was known as the "VLBI Science
Recorder" (VSR), followed by later varieties of VSR and eventually, the OSR.
OLR recordings at different stations are indeed combined for VLBI measurements
of spacecraft with respect to distant radio sources, a powerful navigation tool.
Raw IF recordings can be computational converted into any of the standard
signal types used in radio astronomy -- square-law detected power, spectra,
Stokes parameters, VLBI U-V maps, high time ans spectral resolution pulsar data,
*etc.*
"""
import datetime
import glob
import logging
import numpy as NP
import os.path
import time
import Data_Reduction as DR
import Data_Reduction.DSN as DSN
import Data_Reduction.DSN.RSData as RSData
import DatesTimes as DT
logger = logging.getLogger(__name__)
class Observation(DSN.Observation):
"""
Class for observations based on open-loop recording made as DSA-3 (DSS-84)
The arguments for the superclass initialization are::
parent (typically ``self`` or ``None``),
name (will be set to a YEAR/DOY default if not provided),
dss (required),
date (YEAR/DOY required),
start (optional, usually taken from file header),
end (otional, inferred from header and number of records), and
project (required).
"""
def __init__(self, parent=None, name=None, dss=None,
date=None, start=None, end=None,
project=None):
"""
The project, station, and date are needed to locate the directory for the
working files.
The header item ``STATION_ID`` is not correct in files from Malargue.
The start time is in the header as ``TIME_TAG_YEAR``, ``TIME_TAG_DOY``,
``TIME_TAG_SECOND_OF_DAY`` and ``TIMETAG_PICOSECONDS_OF_THE_SECOND``.
These channel metadata can be extracted from the header: ``SAMPLE_RATE`` for
bandwidth, ``RF_TO_IF_DOWNCONV`` for the band and receiver center frequency,
``IF_TO_CHANNEL_DOWNCONV`` for the channel center frequency.
Args:
parent (Session): (optional) session to which this observation belongs
name (str): (optional) an identifier; default is station ID + "obs"
dss (int): (required) station number
date (str): (required) "YEAR/DOY"
start (float): (optional) UNIX time at the start
end (float): (optional) UNIX time at the end
project (str): (required) directory under /usr/local/projects
"""
mylogger = logging.getLogger(logger.name+".Observation")
DSN.Observation.__init__(self, parent=parent, name=name, dss=dss, date=date,
project=project)
self.logger = mylogger
def load_file(self, num_recs=5, datafile=None, schdfile=None, catlfile=None):
"""
loads data from an OLR file
This is Malargue soecific because of the catalog and schedule file formats.
We load five records at a time or poor old Python really bogs down.
I think we proceed as follows:
#. Get a list of data files in the directory.
#. We parse the ``*.obs`` file in the directory to get:
#. a list of scan numbers,
#. the base file name for each scan (without scan or channel number),
#. a list of channel numbers from the first scan.
#. For each scan (which is a map):
#. Ignore if there is no corresponding data file (``NET*.prd``)
#. Find the corresponding schedule (``sch*.txt``) and load the times and
position names.
#. Open the matching catalog (``cat*.txt``) and for each position get the
coordinates.
#. for each channel:
#. Form the file name: ``NET4n%03dtSsMG12rOPc%02d*.prd`` where the
first formatted item is scan number and the second is channel.
#. Process the ordered list of data files found with the above mask.
#. Read and parse the file header.
#. For each record (in groups of five records at a time) for
efficiency):
#. For the first record of the file only
#. Put the time and coordinates in the first row of the
structured numpy array.
#. Read the record data from the datafile.
#. Process the data (e.g. sum square average, power spectrum,
spectrogram, etc.)
#. Save the processed data for each record in the numpy dict
keyed on channel number.
#. save the numpy array of reduced data.
"""
if datafile and schdfile: # and catlfile:
pass
else:
self.logger.error("load_file: missing file: data=%s, sched=%s catlog=%s")
raise RuntimeError("need data, a recording schedule, and coordinates")
# set the datafile reader options
options = {'toPrintHeaders':False, 'toPrintData': False,
'lookForDate': True, 'fixTime': True}
fname = self.sessionpath+datafile
fmt = self.checkFormat(fname)
options.update({'format':fmt})
# extract the datasets
self.schedule = NP.genfromtxt(schdfile, dtype=[('time', 'S24'),
('duration', int),
('name', 'S8')])
self.times = self.schedule['time']
self.durations = self.schedule['duration']
num_samples = len(self.times)
self.records = {}
if True:
for start in self.times[:num_recs]:
idx = list(self.times).index(start)
options.update({'startDate': datetime.datetime.strptime(
start.decode("utf-8"),
'%Y/%m/%d/%H:%M:%S')})
options.update({'duration': float(self.durations[idx])})
if fmt == "RDEF":
self.records[idx] = self.RDEF(fname, options)
elif fmt == "VDR":
self.records[idx] = self.VDR(fname, options)
elif fmt == "SFDU":
self.records[idx] = self.SFDU(fname, options)
else:
logger.error("get_file_metadata: %s has unknown format %s",
filename, fmt)
class Map(Observation):
"""
"""
def __init__(self):
"""
"""
pass
class Session(DR.Session):
"""
Class for a Malargue observing session on a given year and DOY
Attributes Inherited from Superclass::
doy (int) - day of year for session
logger (logging.Logger) - logging.Logger object
parent (object) - a data reduction session (mult. observ. sessions)
year (int) -
doy (int) -
project (str) -
session_dir (str) - path to results from this session
A session usually refers to a telescope, date and project. This will
normally define a path to the session directory.
"""
def __init__(self, parent=None, date=None, project=None, dss=None,
path=None):
""""
initialize data reduction for one observing session
Args
====
parent: (object) optional class for a data reduction tool
date: (int) required
project: (str) required
dss (int) required
path (str) optional
If `path` is given for a non-standard observing files location, and it does
not exist, it will be created. Then the Recording and Observation instances
must be directed to where the files are.
"""
mylogger = logging.getLogger(logger.name+".Session")
DR.Session.__init__(self, parent=parent, date=date, project=project,
dss=dss, path=None)
self.logger = mylogger
metafiles = self.select_data_files(name_pattern="*.obs") # automatically
class Recording(DSN.Recording):
"""
Metadata and directory for raw data files from DSA-3.
"""
def __init__(self, session=None, name=None,
dss=84, date=None, project=None):
"""
Initialize a Recording object
Args
====
session: (Session) optional
name: (str) optional; will be constructed if not given
filename: (str) optional: will glob for `*.obs` if not given
dss: (int)
date: (str) `"YEAR/DOY"`
project: (str) `"SolarPatrol"`
"""
DSN.Recording.__init__(self, session=session, name=name,
dss=dss, date=date)
self.metafiles = self.session.select_data_files(name_pattern="*.obs")
self.scans = {}
for mfile in self.metafiles:
key = os.path.basename(mfile) # the .obs file to parse
self.logger.debug("__init__: parsing %s", key)
self.scans[key] = self.parse_obs_file(filename=mfile)
for scan in self.scans[key].keys():
self.logger.debug("__init__: processing scan %s", scan)
channels = self.scans[key][scan]['channel'].keys()
self.logger.debug("__init__: scan channels: %s", channels)
for channel in channels:
datafile = self.scans[key][scan]['channel'][channel]['file']
self.logger.debug("__init__: processing %s for channel %s",
datafile, channel)
OLfile = RSData.File(sessionpath=session.session_dir, filename=datafile)
try:
self.scans[key][scan]['channel'][channel]['header'] = OLfile.readHeader()
self.logger.info("__init__: found header for scan %s, channel %s",
scan, channel)
except FileNotFoundError:
self.logger.warning("__init__: could not find %s", datafile)
def parse_support_report(self, filename=None):
"""
Notes
=====
# The metadata may not always fall on the same lines. This that case,
search for "OL Sample Rate", get its index, and t
"""
if filename:
pass
else:
filename = glob.glob("MLG*.txt")[0]
report = open(self.session.session_dir+filename)
lines = report.readlines() # starting from 1
col2_ofst = lines.index('CONAE\n') # line 49
# track times
start_index = lines.index('BOA\n') # line 4
keys = [s.strip() for s in lines[start_index:start_index+2]]
values = [s.strip() for s in lines[start_index+2:start_index+4]]
end_index = lines.index('EOT\n') # line 51
keys += [s.strip() for s in lines[end_index:end_index+2]]
values += [s.strip() for s in lines[end_index+2:end_index+4]]
# Frequency Plan
frp_index = lines.index('Frequency Plan\n') # line 11
chn_index = lines.index('Downlink Chains\n') # line 14
keys += [s.strip() for s in lines[frp_index+1:chn_index]]
# frequency plan value
col2_ofst -= 2 # line 58
first = col2_ofst + frp_index+1; last = first + (chn_index-frp_index-1)
values += [s.strip() for s in lines[first:last]]
# for the downlink chains we invent keys
tcp_index = lines.index('TTCP Configuration\n') # line 17
links = [s.strip() for s in lines[chn_index+1:tcp_index]]
values += links
for i in list(range(len(links))):
keys.append("link "+str(i))
# open loop configuration parameters
pars_index = lines.index("OL Sample Rate\n") # line 21
fec_index = lines.index('FEC Configuration\n') # line 41
keys += [l.strip() for l in lines[pars_index:fec_index]]
col2_ofst -= 5
first = col2_ofst + pars_index # line 62
last = first + (fec_index-pars_index)
values += [k.strip() for k in lines[first:last]]
return dict(zip(keys, values))
def parse_obs_file(self, filename=None):
"""
Extract metadata from a .obs file
An ``.obs`` file has some preliminary observation followed by a section for
each ``scan``, which might be a map or series of spectra::
#=========================================================================================================
# SCAN_NUM SRC_ID START_TIME STOP_TIME RA DEC TFREQ
# -------- ---------------- ----------------- ----------------- ---------- ---------- ----------
S 001 ... 2020-163T11:44:46 2020-163T11:54:49 999.000000 999.000000 31950000000.000000
# DATAFILE COH_FLAG DOR_MULT FSUB HARMONIC
# ------------------------------------- -------- ----------- ---------------- ----------
D NET4n001tSsMG12rOPc01-20163114446.prd F xxx -3.00000e+06 0
D NET4n001tSsMG12rOPc02-20163114446.prd F xxx -1.00000e+06 0
D NET4n001tSsMG12rOPc03-20163114446.prd F xxx +1.00000e+06 0
D NET4n001tSsMG12rOPc04-20163114446.prd F xxx +3.00000e+06 0
D NET4n001tSsMG12rOPc05-20163114446.prd F xxx -3.00000e+06 0
D NET4n001tSsMG12rOPc06-20163114446.prd F xxx -1.00000e+06 0
D NET4n001tSsMG12rOPc07-20163114446.prd F xxx +1.00000e+06 0
D NET4n001tSsMG12rOPc08-20163114446.prd F xxx +3.00000e+06 0
Z
#=========================================================================================================
Lines with usable metedata are keyed with::
D - datafile and subchannel data
R - station 1
S - scan
T - station 2
V - version
Z - end of section
If no list of filenames is given, it will take the first one, if there are
any.
"""
if filename == None:
# search for it
self.logger.debug("parse_obs_file: looking in %s", self.sessionpath)
filenames = glob.glob(self.sessionpath+"*.obs")
self.logger.debug("parse_obs_file: found %s", filenames)
if len(filenames) == 1:
pass
elif len(filenames) == 0:
self.logger.error("No observation file found in %s", self.sessionpath)
raise RuntimeError("Data reduction requires an observation file")
else:
self.logger.warning("Found %d observation files; using %s",
len(filenames), filename)
filename = filenames[0]
fd = open(filename, "rt")
lines = fd.readlines()
fd.close()
scan = {}
for line in lines:
parts = line.strip().split()
if line[0] == "#":
pass
elif line[0] == "S":
# a new scan section
scan_ID = int(parts[1])
if float(parts[7]) == 0.0:
pass # use previously found frequency
else:
freq = float(parts[7]) # use this for subsequent scans if necessary
scan[scan_ID] = {"start": DT.ISOtime2datetime(parts[3]),
"stop": DT.ISOtime2datetime(parts[4]),
"ra2000": float(parts[5]),
"dec000": float(parts[6]),
"freq": freq/1e6,
"channel": {}}
elif line[0] == "D":
# file and channel data
filename = parts[1]
ch_ptr1 = filename.index('c') # start of channel number in file name
ch_ptr2 = filename.index('-') # end of channel number in file name
chan = int(filename[ch_ptr1+1:ch_ptr2])
scan[scan_ID]['channel'][chan] = {'file': filename}
scan[scan_ID]['channel'][chan]['offset'] = float(parts[4])
return scan
| 42.349869 | 116 | 0.571455 |
a926087fe2800b2290846285115bc83729a08026 | 132,991 | py | Python | src/genie/libs/parser/junos/show_chassis.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/junos/show_chassis.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/junos/show_chassis.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z |
''' show_chassis.py
Parser for the following show commands:
* show chassis alarms
* show chassis fpc detail
* show chassis fpc pic-status
* show chassis environment routing-engine
* show chassis environment
* show chassis environment fpc
* show chassis environment component
* show chassis fabric summary
* show chassis fabric plane
* show chassis firmware
* show chassis firmware no-forwarding
* show chassis fpc
* show chassis routing-engine
* show chassis routing-engine no-forwarding
* show chassis hardware
* show chassis hardware detail
* show chassis hardware detail no-forwarding
* show chassis hardware extensive
* show chassis hardware extensive no-forwarding
* show chassis power
* show chassis pic fpc-slot {fpc-slot} pic-slot {pic-slot}
'''
# python
import re
from genie.metaparser import MetaParser
from pyats.utils.exceptions import SchemaError
from genie.metaparser.util.schemaengine import (Any,
Optional, Use, Schema, Or, ListOf)
class ShowChassisFpcDetailSchema(MetaParser):
schema = {
Optional("@xmlns:junos"): str,
"fpc-information": {
Optional("@junos:style"): str,
Optional("@xmlns"): str,
"fpc": {
"fips-capable": str,
"fips-mode": str,
"memory-ddr-dram-size": str,
"memory-dram-size": str,
"memory-rldram-size": str,
"slot": str,
"start-time": {
"#text": str,
Optional("@junos:seconds"): str
},
"state": str,
"temperature": {
"#text": str,
Optional("@junos:celsius"): str
},
"up-time": {
"#text": str,
Optional("@junos:seconds"): str
}
}
}
}
class ShowChassisFpcDetail(ShowChassisFpcDetailSchema):
""" Parser for:
* show chassis fpc detail
"""
cli_command = 'show chassis fpc detail'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
#Slot 0 information:
p1 = re.compile(r'^Slot +(?P<slot>\d+) +information:$')
#State Online
p2 = re.compile(r'^State +(?P<state>\S+)$')
#Temperature Testing
p3 = re.compile(r'^Temperature +(?P<temperature>\S+)$')
#Total CPU DRAM 511 MB
p4 = re.compile(r'^Total CPU DRAM +(?P<memory_dram_size>\d+)\sMB$')
#Total RLDRAM 10 MB
p5 = re.compile(r'^Total RLDRAM +(?P<memory_rldram_size>\d+)\sMB$')
#Total DDR DRAM 0 MB
p6 = re.compile(r'^Total DDR DRAM +(?P<memory_ddr_dram_size>\d+)\sMB$')
#FIPS Capable False
p7 = re.compile(r'^FIPS Capable +(?P<fips_capable>\S+)$')
#FIPS Mode False
p8 = re.compile(r'^FIPS Mode +(?P<fips_mode>\S+)$')
#Start time 2019-08-29 09:09:16 UTC
p9 = re.compile(r'^Start time +(?P<start_time>[\d\-\:A-Za-z ]+)$')
#Uptime 208 days, 22 hours, 50 minutes, 26 seconds
p10 = re.compile(r'^Uptime +(?P<up_time>[\d\-\,A-Za-z ]+)$')
ret_dict = {}
for line in out.splitlines():
line = line.strip()
#Slot 0 information:
m = p1.match(line)
if m:
ospf_area = ret_dict.setdefault("fpc-information", {})\
.setdefault("fpc", {})
group = m.groupdict()
ospf_area.update({'slot' : group['slot']})
continue
#State Online
m = p2.match(line)
if m:
group = m.groupdict()
ospf_area.update({'state' : group['state']})
continue
#Temperature Testing
m = p3.match(line)
if m:
group = m.groupdict()
temperature_dict = {}
temperature_dict["#text"] = group["temperature"]
ospf_area.update({'temperature' : temperature_dict})
continue
#Total CPU DRAM 511 MB
m = p4.match(line)
if m:
group = m.groupdict()
ospf_area.update({'memory-dram-size' : group['memory_dram_size']})
continue
#Total RLDRAM 10 MB
m = p5.match(line)
if m:
group = m.groupdict()
ospf_area.update({'memory-rldram-size' : group['memory_rldram_size']})
continue
#Total DDR DRAM 0 MB
m = p6.match(line)
if m:
group = m.groupdict()
ospf_area.update({'memory-ddr-dram-size' : group['memory_ddr_dram_size']})
continue
#FIPS Capable False
m = p7.match(line)
if m:
group = m.groupdict()
ospf_area.update({'fips-capable' : group['fips_capable']})
continue
#FIPS Mode False
m = p8.match(line)
if m:
group = m.groupdict()
ospf_area.update({'fips-mode' : group['fips_mode']})
continue
#Start time 2019-08-29 09:09:16 UTC
m = p9.match(line)
if m:
group = m.groupdict()
start_time_dict = {}
start_time_dict["#text"] = group["start_time"]
ospf_area.update({'start-time' : start_time_dict})
continue
#Uptime 208 days, 22 hours, 50 minutes, 26 seconds
m = p10.match(line)
if m:
group = m.groupdict()
up_time_dict = {}
up_time_dict["#text"] = group["up_time"]
ospf_area.update({'up-time' : up_time_dict})
continue
return ret_dict
class ShowChassisEnvironmentRoutingEngineSchema(MetaParser):
schema = {
Optional("@xmlns:junos"): str,
"environment-component-information": {
Optional("@xmlns"): str,
"environment-component-item": {
"name": str,
"state": str
}
}
}
class ShowChassisEnvironmentRoutingEngine(ShowChassisEnvironmentRoutingEngineSchema):
""" Parser for:
* show chassis environment routing-engine
"""
cli_command = 'show chassis environment routing-engine'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
#Routing Engine 0 status:
p1 = re.compile(r'^(?P<name>[\S\s]+) +status:$')
#State Online Master
p2 = re.compile(r'^State +(?P<name>[\S\s]+)$')
ret_dict = {}
for line in out.splitlines():
line = line.strip()
#Routing Engine 0 status:
m = p1.match(line)
if m:
ospf_area = ret_dict.setdefault("environment-component-information", {})\
.setdefault("environment-component-item", {})
group = m.groupdict()
ospf_area.update({'name' : group['name']})
continue
#State Online Master
m = p2.match(line)
if m:
group = m.groupdict()
ospf_area.update({'state' : group['name']})
continue
return ret_dict
class ShowChassisFirmwareSchema(MetaParser):
""" schema = {
Optional("@xmlns:junos"): str,
"firmware-information": {
Optional("@xmlns"): str,
"chassis": {
Optional("@junos:style"): str,
"chassis-module": {
"firmware": [
{
"firmware-version": str,
"type": str
}
],
"name": str
}
}
}
} """
schema = {
"firmware-information": {
Optional("@xmlns"): str,
"chassis": {
Optional("@junos:style"): str,
"chassis-module": {
"firmware": ListOf({
"firmware-version": str,
"type": str
}),
"name": str
}
}
}
}
class ShowChassisFirmware(ShowChassisFirmwareSchema):
""" Parser for:
* show chassis firmware
"""
cli_command = 'show chassis firmware'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
#Part Type Version
p0 = re.compile(r'^Part +Type +Version$')
#FPC 0 ROM PC Bios
p1 = re.compile(r'^(?P<name>\S+\s+\d+) +(?P<type>\S+) +(?P<firmware>\S+\s+\S+)$')
#O/S Version 19.2R1.8 by builder on 2019-06-21 17:52:23 UTC
p2 = re.compile(r'^(?P<type>\S+) +(?P<firmware>[\s\S]+)$')
ret_dict = {}
for line in out.splitlines()[1:]:
line = line.strip()
#Part Type Version
m = p0.match(line)
if m:
continue
#FPC 0 ROM PC Bios
m = p1.match(line)
if m:
firmware_chassis_dict = ret_dict.setdefault("firmware-information", {})\
.setdefault("chassis", {}).setdefault("chassis-module", {})
firmware_entry_list = firmware_chassis_dict.setdefault("firmware", [])
group = m.groupdict()
entry_dict = {}
entry_dict["firmware-version"] = group["firmware"]
entry_dict["type"] = group["type"]
firmware_chassis_dict["name"] = group["name"]
firmware_entry_list.append(entry_dict)
continue
#O/S Version 19.2R1.8 by builder on 2019-06-21 17:52:23 UTC
m = p2.match(line)
if m:
group = m.groupdict()
entry_dict = {}
entry_dict["firmware-version"] = group["firmware"]
entry_dict["type"] = group["type"]
firmware_entry_list.append(entry_dict)
continue
return ret_dict
class ShowChassisFirmwareNoForwarding(ShowChassisFirmware):
""" Parser for:
- show chassis firmware no-forwarding
"""
cli_command = [
'show chassis firmware no-forwarding'
]
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command[0])
else:
out = output
return super().cli(output=out)
class ShowChassisHardwareSchema(MetaParser):
"""schema = {
Optional("@xmlns:junos"): str,
"chassis-inventory": {
Optional("@xmlns"): str,
"chassis": {
Optional("@junos:style"): str,
"chassis-module": [
{
"chassis-sub-module": [
{
"chassis-sub-sub-module": {
"description": str,
"name": str,
"part-number": str,
"serial-number": str
},
"description": str,
"name": str,
"part-number": str,
"serial-number": str,
"version": str
}
],
"description": str,
"name": str
}
],
"description": str,
"name": str,
"serial-number": str
}
}
}"""
schema = {
Optional("@xmlns:junos"): str,
"chassis-inventory": {
Optional("@xmlns"): str,
"chassis": {
Optional("@junos:style"): str,
Optional("chassis-module"): ListOf({
Optional("chassis-re-dimm-module"): ListOf({
"die-rev": str,
"mfr-id": str,
"name": str,
"part-number": str,
"pcb-rev": str,
}),
Optional("chassis-re-disk-module"): ListOf({
"description": str,
"disk-size": str,
"model": str,
"name": str,
"serial-number": str
}),
Optional("chassis-re-usb-module"): ListOf({
Optional("description"): str,
"name": str,
"product": str,
"product-number": str,
"vendor": str,
}),
Optional("chassis-sub-module"): ListOf({
Optional("chassis-sub-sub-module"): ListOf({
Optional("description"): str,
Optional("name"): str,
Optional("part-number"): str,
Optional("serial-number"): str,
Optional("chassis-sub-sub-sub-module"): ListOf({
Optional("description"): str,
Optional("name"): str,
Optional("part-number"): str,
Optional("serial-number"): str,
Optional("version"): str
})
}),
Optional("description"): str,
Optional("name"): str,
Optional("part-number"): str,
Optional("serial-number"): str,
Optional("version"): str
}),
Optional("description"): str,
Optional("name"): str,
Optional("part-number"): str,
Optional("serial-number"): str,
Optional("version"): str,
}),
Optional("description"): str,
Optional("name"): str,
Optional("serial-number"): str
}
}
}
class ShowChassisHardware(ShowChassisHardwareSchema):
""" Parser for:
* show chassis hardware
"""
cli_command = 'show chassis hardware'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
#Hardware inventory:
p1 = re.compile(r'^Hardware +(?P<style>\S+):$')
# Chassis VM5D4C6B3599 VMX
p_chassis = re.compile(r'^(?P<name>Chassis) +(?P<serial_number>[A-Z\d]+)'
r' +(?P<description>\S+)$')
# -------------------------------------------------------------------------------------
# For general chassis modules, for example:
# -------------------------------------------------------------------------------------
# Midplane REV 64 750-040240 ABAC9716 Lower Backplane
# Midplane 1 REV 06 711-032386 ABAC9742 Upper Backplane
p_module0 = re.compile(r'(?P<name>Midplane( \d+)?) +(?P<version>\w+ \d+)'
r' +(?P<part_number>[\d\-]+) +(?P<serial_number>[A-Z\d]+) '
r'+(?P<description>[\s\S]+)$')
# Routing Engine 0 REV 01 740-052100 9009237267 RE-S-1800x4
# CB 0 REV 10 750-051985 CAFC0322 Control Board
# FPC 0 REV 72 750-044130 ABDF7568 MPC6E 3D
# SPMB 0 REV 04 711-041855 ABDC5673 PMB Board
# SFB 0 REV 06 711-044466 ABCY8621 Switch Fabric Board
# ADC 9 REV 21 750-043596 ABDC2129 Adapter Card
# Fan Tray 0 REV 01 760-052467 ACAY4748 172mm FanTray - 6 Fans
# FPM Board REV 13 760-040242 ABDD0194 Front Panel Display
# PDM 3 REV 01 740-050036 1EFD3390136 DC Power Dist Module
# PSM 11 REV 04 740-050037 1EDB527002P DC 52V Power Supply Module
# PMP 1 REV 01 711-051408 ACAJ5284 Upper Power Midplane
p_module1 = re.compile(r'^(?P<name>(Routing Engine|CB|FPC|SPMB|SFB|ADC|Fan Tray|FPM|PDM|PSM|PMP) (\d+|Board))( +(?P<version>\w+ \d+)'
r' +(?P<part_number>[\d\-]+) +(?P<serial_number>[A-Z\d]+))? '
r'+(?P<description>[\s\S]+)$')
# Midplane
p_module2 = re.compile(r'^(?P<name>\S+)$')
# -------------------------------------------------------------------------------------
# For chassis-sub-module, for example:
# -------------------------------------------------------------------------------------
# CPU REV 12 711-045719 ABDF7304 RMPC PMB
# MIC 0 REV 19 750-049457 ABDJ2346 2X100GE CFP2 OTN
# XLM 0 REV 14 711-046638 ABDF2862 MPC6E XL
p_sub_module = re.compile(r'^(?P<name>CPU|(MIC|XLM)\s\d+) +(?P<version>\w+ \d+)'
r' +(?P<part_number>[\d\-]+) +(?P<serial_number>[A-Z\d]+) '
r'+(?P<description>[\s\S]+)$')
# CPU Rev. 1.0 RIOT-LITE BUILTIN
p_sub_module_2 = re.compile(r'(?P<name>CPU) +(?P<version>[\s\S]+) +(?P<part_number>[A-Z\-]+)'
r' +(?P<serial_number>[A-Z]+)')
# MIC 0 Virtual
p_sub_module_3 = re.compile(r'(?P<name>MIC\s\d+) +(?P<description>\S+)')
# -------------------------------------------------------------------------------------
# For chassis-sub-sub-module, for example:
# -------------------------------------------------------------------------------------
# PIC 0 BUILTIN BUILTIN 2X100GE CFP2 OTN
p_sub_sub_module = re.compile(r'^(?P<name>PIC\s\d+) +(?P<part_number>[A-Z]+) '
r'+(?P<serial_number>[A-Z]+) +(?P<description>[\s\S]+)$')
# -------------------------------------------------------------------------------------
# For chassis-sub-sub-sub-module, for example:
# -------------------------------------------------------------------------------------
# Xcvr 0 REV 01 740-052504 UW811XC CFP2-100G-LR4
p_sub_sub_sub_module = re.compile(r'^(?P<name>Xcvr\s\d+)( +(?P<version>(\w+ \d+)|(\S+)))?'
r' +(?P<part_number>[\d\-]+|NON-JNPR) +(?P<serial_number>[A-Z\d]+)'
r' +(?P<description>[\s\S]+)$')
res = {}
for line in out.splitlines():
line = line.strip()
#Hardware inventory:
m = p1.match(line)
if m:
group = m.groupdict()
res = {
"chassis-inventory":{
"chassis":{
}
}
}
chassis_inventory_dict = res["chassis-inventory"]["chassis"]
chassis_inventory_dict["@junos:style"] = group["style"]
chassis_inventory_dict["chassis-module"] = []
chassis_modules_list = chassis_inventory_dict["chassis-module"]
continue
# Chassis VM5D4C6B3599 VMX
m = p_chassis.match(line)
if m:
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
chassis_inventory_dict[k] = v.strip()
continue
# -------------------------------------------------------------------------------------
# For general chassis modules, for example:
# -------------------------------------------------------------------------------------
# Midplane REV 64 750-040240 ABAC9716 Lower Backplane
# Midplane 1 REV 06 711-032386 ABAC9742 Upper Backplane
# Routing Engine 0 REV 01 740-052100 9009237267 RE-S-1800x4
# Routing Engine 0 RE-VMX
# CB 0 VMX SCB
# FPC 0 Virtual FPC
# SPMB 0 REV 04 711-041855 ABDC5673 PMB Board
# SFB 0 REV 06 711-044466 ABCY8621 Switch Fabric Board
# ADC 9 REV 21 750-043596 ABDC2129 Adapter Card
# Fan Tray 0 REV 01 760-052467 ACAY4748 172mm FanTray - 6 Fans
# FPM Board REV 13 760-040242 ABDD0194 Front Panel Display
# Midplane
m = p_module0.match(line) or p_module1.match(line) or p_module2.match(line)
if m:
module_dict = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
module_dict[k] = v.strip()
chassis_modules_list.append(module_dict)
continue
# -------------------------------------------------------------------------------------
# For chassis-sub-module, for example:
# -------------------------------------------------------------------------------------
# CPU REV 12 711-045719 ABDF7304 RMPC PMB
# MIC 0 REV 19 750-049457 ABDJ2346 2X100GE CFP2 OTN
# XLM 0 REV 14 711-046638 ABDF2862 MPC6E XL
# MIC 0 Virtual
# CPU Rev. 1.0 RIOT-LITE BUILTIN
m = p_sub_module.match(line) or p_sub_module_2.match(line) or p_sub_module_3.match(line)
if m:
if "chassis-sub-module" not in module_dict:
module_dict["chassis-sub-module"] = []
re_sub_module_list = module_dict["chassis-sub-module"]
last_sub_sub_item = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
last_sub_sub_item[k] = v.strip()
re_sub_module_list.append(last_sub_sub_item)
continue
# -------------------------------------------------------------------------------------
# For chassis-sub-sub-module, for example:
# -------------------------------------------------------------------------------------
# PIC 0 BUILTIN BUILTIN 2X100GE CFP2 OTN
m = p_sub_sub_module.match(line)
if m:
# find the sub module
last_sub_item = module_dict["chassis-sub-module"][-1]
if "chassis-sub-sub-module" not in last_sub_item:
last_sub_item["chassis-sub-sub-module"] = []
re_sub_sub_module_item_list = last_sub_item["chassis-sub-sub-module"]
re_sub_sub_module_list_item = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
re_sub_sub_module_list_item[k] = v.strip()
re_sub_sub_module_item_list.append(re_sub_sub_module_list_item)
continue
# -------------------------------------------------------------------------------------
# For chassis-sub-sub-sub-module, for example:
# -------------------------------------------------------------------------------------
# Xcvr 0 REV 01 740-052504 UW811XC CFP2-100G-LR4
m = p_sub_sub_sub_module.match(line)
if m:
# the last appended item
last_sub_sub_item = module_dict["chassis-sub-module"][-1]["chassis-sub-sub-module"][-1]
if "chassis-sub-sub-sub-module" not in last_sub_sub_item:
last_sub_sub_item["chassis-sub-sub-sub-module"] = []
re_sub_sub_sub_module_list = last_sub_sub_item["chassis-sub-sub-sub-module"]
re_sub_sub_sub_module_item = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
re_sub_sub_sub_module_item[k] = v.strip()
re_sub_sub_sub_module_list.append(re_sub_sub_sub_module_item)
continue
return res
class ShowChassisHardwareDetailSchema(MetaParser):
'''
Schema for 'show chassis hardware detail'
schema = {
"chassis-inventory": {
"chassis": {
"chassis-module": [
{
Optional("chassis-re-dimm-module"): [
{
"die-rev": str,
"mfr-id": str,
"name": str,
"part-number": str,
"pcb-rev": str,
}
],
Optional("chassis-re-disk-module"): [
{
"description": str,
"disk-size": str,
"model": str,
"name": str,
"serial-number": str
},
],
Optional("chassis-re-usb-module"): [
{
"description": str,
"name": str,
"product": str,
"product-number": str,
"vendor": str,
},
],
Optional("chassis-sub-module"): [
{
"chassis-sub-sub-module": {
"description": str,
"name": str,
"part-number": str,
"serial-number": str,
Optional("chassis-sub-sub-sub-module"): [
"description": str,
"name": str,
"part-number": str,
"serial-number": str,
Optional("version"): str
]
},
Optional("description"): str,
"name": str,
"part-number": str,
"serial-number": str,
"version": str
}
],
"description": str,
"name": str,
Optional("part-number"): str,
Optional("serial-number"): str,
Optional("version"): str,
}
],
"description": str,
"name": str,
"serial-number": str
}
}
}
'''
schema = {
Optional("@xmlns:junos"): str,
"chassis-inventory": {
Optional("@xmlns"): str,
"chassis": {
Optional("@junos:style"): str,
Optional("chassis-module"): ListOf({
Optional("chassis-re-dimm-module"): ListOf({
"die-rev": str,
"mfr-id": str,
"name": str,
"part-number": str,
"pcb-rev": str,
}),
Optional("chassis-re-disk-module"): ListOf({
"description": str,
"disk-size": str,
"model": str,
"name": str,
"serial-number": str
}),
Optional("chassis-re-usb-module"): ListOf({
Optional("description"): str,
"name": str,
"product": str,
"product-number": str,
"vendor": str,
}),
Optional("chassis-sub-module"): ListOf({
Optional("chassis-sub-sub-module"): ListOf({
Optional("description"): str,
Optional("name"): str,
Optional("part-number"): str,
Optional("serial-number"): str,
Optional("chassis-sub-sub-sub-module"): ListOf({
Optional("description"): str,
Optional("name"): str,
Optional("part-number"): str,
Optional("serial-number"): str,
Optional("version"): str
})
}),
Optional("description"): str,
Optional("name"): str,
Optional("part-number"): str,
Optional("serial-number"): str,
Optional("version"): str
}),
Optional("description"): str,
Optional("name"): str,
Optional("part-number"): str,
Optional("serial-number"): str,
Optional("version"): str,
}),
Optional("description"): str,
Optional("name"): str,
Optional("serial-number"): str
}
}
}
class ShowChassisHardwareDetail(ShowChassisHardwareDetailSchema):
""" Parser for:
* show chassis hardware detail
"""
cli_command = 'show chassis hardware detail'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
# Hardware inventory:
p1 = re.compile(r'^Hardware +(?P<style>\S+):$')
# Chassis VM5D4C6B3599 VMX
p_chassis = re.compile(r'^(?P<name>Chassis) +(?P<serial_number>[A-Z\d]+)'
r' +(?P<description>\S+)$')
# -------------------------------------------------------------------------------------
# For general chassis modules, for example:
# -------------------------------------------------------------------------------------
# Midplane REV 64 750-040240 ABAC9716 Lower Backplane
# Midplane 1 REV 06 711-032386 ABAC9742 Upper Backplane
p_module0 = re.compile(r'(?P<name>Midplane( \d+)?) +(?P<version>\w+ \d+)'
r' +(?P<part_number>[\d\-]+) +(?P<serial_number>[A-Z\d]+) '
r'+(?P<description>[\s\S]+)$')
# Routing Engine 0 REV 01 740-052100 9009237267 RE-S-1800x4
# Routing Engine 0 RE-VMX
# CB 0 VMX SCB
# FPC 0 Virtual FPC
# SPMB 0 REV 04 711-041855 ABDC5673 PMB Board
# SFB 0 REV 06 711-044466 ABCY8621 Switch Fabric Board
# ADC 9 REV 21 750-043596 ABDC2129 Adapter Card
# Fan Tray 0 REV 01 760-052467 ACAY4748 172mm FanTray - 6 Fans
# FPM Board REV 13 760-040242 ABDD0194 Front Panel Display
# PDM 3 REV 01 740-050036 1EFD3390136 DC Power Dist Module
p_module1 = re.compile(r'^(?P<name>(Routing Engine|CB|FPC|SPMB|SFB|ADC|Fan Tray|FPM|PDM|PSM|PMP) (\d+|Board))( +(?P<version>\w+ \d+)'
r' +(?P<part_number>[\d\-]+) +(?P<serial_number>[A-Z\d]+))? '
r'+(?P<description>[\s\S]+)$')
# Midplane
p_module2 = re.compile(r'^(?P<name>Midplane)$')
# -------------------------------------------------------------------------------------
# For chassis-re-disk-module, for example:
# -------------------------------------------------------------------------------------
# ad0 3919 MB 604784 000060095234B000018D Compact Flash
# ad1 28496 MB StorFly - VSFA18PI032G- P1T12003591504100303 Disk 1
p_re_disk = re.compile(r'^(?P<name>\w+) +(?P<disk_size>\d+) +MB +(?P<model>[\s\S]+) '
r'+(?P<serial_number>[A-Z\d]{20}) +(?P<description>[\s\S]+)$')
# -------------------------------------------------------------------------------------
# For chassis-re-usb-module, for example:
# -------------------------------------------------------------------------------------
# usb0 (addr 1) EHCI root hub 0 Intel uhub0
# usb0 (addr 2) product 0x0020 32 vendor 0x8087 uhub1
p_re_usb = re.compile(r'^(?P<name>usb\d +\(addr +\d\)) +(?P<product>[\s\S]+) '
r'+(?P<product_number>\d+) +(?P<vendor>[\s\S]+) '
r'+(?P<description>[a-z0-9]+)$')
# -------------------------------------------------------------------------------------
# For chassis-re-dimm-module, for example:
# -------------------------------------------------------------------------------------
# DIMM 0 VL33B1G63F-K9SQ-KC DIE REV-0 PCB REV-0 MFR ID-ce80
p_re_dimm = re.compile(r'^(?P<name>[A-Z\s\d]+) +(?P<part_number>[A-Z\d\-]+) '
r'+(?P<die_rev>DIE REV-\d+) +(?P<pcb_rev>PCB REV-\d+) '
r'+(?P<mfr_id>MFR ID\-\w+)$')
# -------------------------------------------------------------------------------------
# For chassis-sub-module, for example:
# -------------------------------------------------------------------------------------
# CPU REV 12 711-045719 ABDF7304 RMPC PMB
# MIC 0 REV 19 750-049457 ABDJ2346 2X100GE CFP2 OTN
# XLM 0 REV 14 711-046638 ABDF2862 MPC6E XL
p_sub_module = re.compile(r'^(?P<name>CPU|(MIC|XLM)\s\d+) +(?P<version>\w+ \d+)'
r' +(?P<part_number>[\d\-]+) +(?P<serial_number>[A-Z\d]+) '
r'+(?P<description>[\s\S]+)$')
# CPU Rev. 1.0 RIOT-LITE BUILTIN
p_sub_module_2 = re.compile(r'(?P<name>CPU) +(?P<version>[\s\S]+) +(?P<part_number>[A-Z\-]+)'
r' +(?P<serial_number>[A-Z]+)')
# MIC 0 Virtual
p_sub_module_3 = re.compile(r'(?P<name>MIC\s\d+) +(?P<description>\S+)')
# -------------------------------------------------------------------------------------
# For chassis-sub-sub-module, for example:
# -------------------------------------------------------------------------------------
# PIC 0 BUILTIN BUILTIN 2X100GE CFP2 OTN
p_sub_sub_module = re.compile(r'^(?P<name>PIC\s\d+) +(?P<part_number>[A-Z]+) '
r'+(?P<serial_number>[A-Z]+) +(?P<description>[\s\S]+)$')
# -------------------------------------------------------------------------------------
# For chassis-sub-sub-sub-module, for example:
# -------------------------------------------------------------------------------------
# Xcvr 0 REV 01 740-052504 UW811XC CFP2-100G-LR4
# Xcvr 5 NON-JNPR AGM1049Q4E4 SFP-T
# Xcvr 8 l* NON-JNPR AGM17082139 SFP-T
# Xcvr 9 } NON-JNPR AGM1708212S SFP-T
p_sub_sub_sub_module = re.compile(r'^(?P<name>Xcvr\s\d+)( +(?P<version>(\w+ \d+)|(\S+)))?'
r' +(?P<part_number>[\d\-]+|NON-JNPR) +(?P<serial_number>[A-Z\d]+)'
r' +(?P<description>[\s\S]+)$')
res = {}
for line in out.splitlines():
line = line.strip()
#Hardware inventory:
m = p1.match(line)
if m:
group = m.groupdict()
res = {
"chassis-inventory":{
"chassis":{
}
}
}
chassis_inventory_dict = res["chassis-inventory"]["chassis"]
chassis_inventory_dict["@junos:style"] = group["style"]
chassis_inventory_dict["chassis-module"] = []
chassis_modules_list = chassis_inventory_dict["chassis-module"]
continue
# Chassis VM5D4C6B3599 VMX
m = p_chassis.match(line)
if m:
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
chassis_inventory_dict[k] = v.strip()
continue
# -------------------------------------------------------------------------------------
# For general chassis modules, for example:
# -------------------------------------------------------------------------------------
# Midplane REV 64 750-040240 ABAC9716 Lower Backplane
# Midplane 1 REV 06 711-032386 ABAC9742 Upper Backplane
# Routing Engine 0 REV 01 740-052100 9009237267 RE-S-1800x4
# Routing Engine 0 RE-VMX
# CB 0 VMX SCB
# FPC 0 Virtual FPC
# SPMB 0 REV 04 711-041855 ABDC5673 PMB Board
# SFB 0 REV 06 711-044466 ABCY8621 Switch Fabric Board
# ADC 9 REV 21 750-043596 ABDC2129 Adapter Card
# Fan Tray 0 REV 01 760-052467 ACAY4748 172mm FanTray - 6 Fans
# FPM Board REV 13 760-040242 ABDD0194 Front Panel Display
# Midplane
m = p_module0.match(line) or p_module1.match(line) or p_module2.match(line)
if m:
module_dict = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
module_dict[k] = v.strip()
chassis_modules_list.append(module_dict)
continue
# -------------------------------------------------------------------------------------
# For chassis-re-disk-module, for example:
# -------------------------------------------------------------------------------------
# ad0 3919 MB 604784 000060095234B000018D Compact Flash
# ad1 28496 MB StorFly - VSFA18PI032G- P1T12003591504100303 Disk 1
m = p_re_disk.match(line)
if m:
if "chassis-re-disk-module" not in module_dict:
module_dict["chassis-re-disk-module"] = []
re_disk_module_list = module_dict["chassis-re-disk-module"]
re_disk_module_item = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
re_disk_module_item[k] = v.strip()
re_disk_module_list.append(re_disk_module_item)
continue
# -------------------------------------------------------------------------------------
# For chassis-re-usb-module, for example:
# -------------------------------------------------------------------------------------
# usb0 (addr 1) EHCI root hub 0 Intel uhub0
# usb0 (addr 2) product 0x0020 32 vendor 0x8087 uhub1
m = p_re_usb.match(line)
if m:
if "chassis-re-usb-module" not in module_dict:
module_dict["chassis-re-usb-module"] = []
re_usb_module_list = module_dict["chassis-re-usb-module"]
re_usb_module_item = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
re_usb_module_item[k] = v.strip()
re_usb_module_list.append(re_usb_module_item)
continue
# -------------------------------------------------------------------------------------
# For chassis-re-dimm-module, for example:
# -------------------------------------------------------------------------------------
# DIMM 0 VL33B1G63F-K9SQ-KC DIE REV-0 PCB REV-0 MFR ID-ce80
m = p_re_dimm.match(line)
if m:
if "chassis-re-dimm-module" not in module_dict:
module_dict["chassis-re-dimm-module"] = []
re_usb_dimm_list = module_dict["chassis-re-dimm-module"]
re_usb_dimm_item = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
re_usb_dimm_item[k] = v.strip()
re_usb_dimm_list.append(re_usb_dimm_item)
continue
# -------------------------------------------------------------------------------------
# For chassis-sub-module, for example:
# -------------------------------------------------------------------------------------
# CPU REV 12 711-045719 ABDF7304 RMPC PMB
# MIC 0 REV 19 750-049457 ABDJ2346 2X100GE CFP2 OTN
# XLM 0 REV 14 711-046638 ABDF2862 MPC6E XL
# MIC 0 Virtual
# CPU Rev. 1.0 RIOT-LITE BUILTIN
m = p_sub_module.match(line) or p_sub_module_2.match(line) or p_sub_module_3.match(line)
if m:
if "chassis-sub-module" not in module_dict:
module_dict["chassis-sub-module"] = []
re_sub_module_list = module_dict["chassis-sub-module"]
last_sub_sub_item = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
last_sub_sub_item[k] = v.strip()
re_sub_module_list.append(last_sub_sub_item)
continue
# -------------------------------------------------------------------------------------
# For chassis-sub-sub-module, for example:
# -------------------------------------------------------------------------------------
# PIC 0 BUILTIN BUILTIN 2X100GE CFP2 OTN
m = p_sub_sub_module.match(line)
if m:
# find the sub module
last_sub_item = module_dict["chassis-sub-module"][-1]
if "chassis-sub-sub-module" not in last_sub_item:
last_sub_item["chassis-sub-sub-module"] = []
re_sub_sub_module_item_list = last_sub_item["chassis-sub-sub-module"]
re_sub_sub_module_list_item = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
re_sub_sub_module_list_item[k] = v.strip()
re_sub_sub_module_item_list.append(re_sub_sub_module_list_item)
continue
# -------------------------------------------------------------------------------------
# For chassis-sub-sub-sub-module, for example:
# -------------------------------------------------------------------------------------
# Xcvr 0 REV 01 740-052504 UW811XC CFP2-100G-LR4
m = p_sub_sub_sub_module.match(line)
if m:
# the last appended item
last_sub_sub_item = module_dict["chassis-sub-module"][-1]["chassis-sub-sub-module"][-1]
if "chassis-sub-sub-sub-module" not in last_sub_sub_item:
last_sub_sub_item["chassis-sub-sub-sub-module"] = []
re_sub_sub_sub_module_list = last_sub_sub_item["chassis-sub-sub-sub-module"]
re_sub_sub_sub_module_item = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
re_sub_sub_sub_module_item[k] = v.strip()
re_sub_sub_sub_module_list.append(re_sub_sub_sub_module_item)
continue
return res
class ShowChassisHardwareDetailNoForwarding(ShowChassisHardwareDetail):
""" Parser for:
- show chassis hardware detail no-forwarding
"""
cli_command = [
'show chassis hardware detail no-forwarding'
]
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command[0])
else:
out = output
return super().cli(output=out)
class ShowChassisHardwareExtensiveSchema(MetaParser):
"""schema = {
Optional("@xmlns:junos"): str,
"chassis-inventory": {
Optional("@xmlns"): str,
"chassis": {
Optional("@junos:style"): str,
"chassis-module": [
{
"chassis-re-disk-module": {
"description": str,
"disk-size": str,
"model": str,
"name": str,
"serial-number": str
},
"chassis-sub-module": [
{
"chassis-sub-sub-module": {
"description": str,
"name": str,
"part-number": str,
"serial-number": str
},
"description": str,
"name": str,
"part-number": str,
"serial-number": str,
"version": str
}
],
"description": str,
"name": str
}
],
"description": str,
"i2c-information": {
"assembly-flags": str,
"assembly-identifier": str,
"assembly-version": str,
"board-information-record": str,
"eeprom-version": str,
"i2c-data": list,
"i2c-identifier": str,
"i2c-version": str,
"jedec-code": str,
"manufacture-date": str,
"part-number": str,
"serial-number": str
},
"name": str,
"serial-number": str
}
}
}"""
schema = {
Optional("@xmlns:junos"): str,
"chassis-inventory": {
Optional("@xmlns"): str,
"chassis": {
Optional("@junos:style"): str,
"chassis-module": ListOf({
Optional("chassis-re-disk-module"): {
"description": str,
"disk-size": str,
"model": str,
"name": str,
"serial-number": str
},
Optional("chassis-sub-module"): ListOf({
Optional("chassis-sub-sub-module"): {
"description": str,
"name": str,
"part-number": str,
"serial-number": str
},
Optional("description"): str,
Optional("i2c-information"): {
"assembly-flags": str,
"assembly-identifier": str,
"assembly-version": str,
"board-information-record": str,
"eeprom-version": str,
Optional("i2c-data"): list,
Optional("i2c-identifier"): Or(str, None),
"i2c-version": Or(str, None),
"jedec-code": str,
"manufacture-date": str,
"part-number": Or(str, None),
Optional("serial-number"): Or(str,None)
},
"name": str,
Optional("part-number"): str,
Optional("serial-number"): str,
Optional("version"): str
}),
Optional("description"): str,
Optional("i2c-information"): {
"assembly-flags": str,
"assembly-identifier": str,
"assembly-version": str,
"board-information-record": str,
"eeprom-version": str,
Optional("i2c-data"): list,
Optional("i2c-identifier"): Or(str, None),
"i2c-version": Or(str, None),
"jedec-code": str,
"manufacture-date": str,
"part-number": Or(str, None),
Optional("serial-number"): Or(str,None)
},
"name": str,
Optional("serial-number"): str
}),
"description": str,
Optional("i2c-information"): {
"assembly-flags": str,
"assembly-identifier": str,
"assembly-version": str,
"board-information-record": str,
"eeprom-version": str,
Optional("i2c-data"): list,
Optional("i2c-identifier"): Or(str, None),
"i2c-version": Or(str, None),
"jedec-code": str,
"manufacture-date": str,
"part-number": Or(str, None),
Optional("serial-number"): Or(str, None)
},
"name": str,
"serial-number": str
}
}
}
class ShowChassisHardwareExtensive(ShowChassisHardwareExtensiveSchema):
""" Parser for:
* show chassis hardware extensive
"""
cli_command = 'show chassis hardware extensive'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
#Hardware inventory:
p1 = re.compile(r'^Hardware +(?P<style>\S+):$')
#Jedec Code: 0x7fb0 EEPROM Version: 0x02
p2 = re.compile(r'^Jedec Code: +(?P<jedec_code>\S+) '
r'+EEPROM Version: +(?P<eeprom_version>\S+)$')
#S/N: VM5D4C6B3599
p3 = re.compile(r'^S/N: +(?P<serial_number>\S+)$')
#Assembly ID: 0x0567 Assembly Version: 00.00
p4 = re.compile(r'^Assembly ID: +(?P<assembly_identifier>\S+) '
r'+Assembly Version: +(?P<assembly_version>\S+)$')
#Date: 00-00-0000 Assembly Flags: 0x00
p5 = re.compile(r'^Date: +(?P<manufacture_date>\S+) +Assembly Flags: '
r'+(?P<assembly_flags>\S+)$')
#ID: VMX
p6 = re.compile(r'^ID: +(?P<i2c_identifier>[\S\s]+)$')
#Board Information Record:
p7 = re.compile(r'^(?P<address_type>\ABoard Information Record):$')
#I2C Hex Data:
p8 = re.compile(r'^(?P<address_type>\AI2C Hex Data:)$')
#Address 0x00: 7f b0 02 00 fa 4e 01 00 52 65 76 2e 20 31 2e 30
p9 = re.compile(r'^(?P<address_info>\AAddress[\s\S]+)$')
#FPC 0 Virtual FPC
#CB 0 VMX SCB
p10 = re.compile(r'^(?P<name>(\S+\s\d+)) +(?P<description>\S+\s\S+)$')
#Routing Engine 0 RE-VMX
p11 = re.compile(r'^(?P<name>\S+\s+\S+\s+\d+) +(?P<description>\S+)$')
#cd0 27649 MB VMware Virtual IDE Har 00000000000000000001 Hard Disk
p12 = re.compile(r'^(?P<name>\S+) +(?P<disk_size>\d+) '
r'+MB +(?P<model>\S+\s+\S+\s+\S+\s+\S+) '
r'+(?P<serial_number>\d+) +(?P<description>'
r'\S+\s+\S+)$')
#CPU Rev. 1.0 RIOT-LITE BUILTIN
p13 = re.compile(r'^(?P<name>\S+) +(?P<version>[\S\.\d]+ '
r'[\S\.\d]+) +(?P<part_number>[\S\-]+) +'
r'(?P<serial_number>\S+)$')
#MIC 0 Virtual
p14 = re.compile(r'^(?P<name>\S+ \d+) +(?P<description>\S+)$')
#PIC 0 BUILTIN BUILTIN Virtual
p15 = re.compile(r'^(?P<name>\S+ \d+) +(?P<part_number>\S+) '
r'+(?P<serial_number>\S+) +(?P<description>\S+)$')
#Version: Rev. 1.0
p111 = re.compile(r'^Version: +(?P<version>[\S\s]+)$')
#Chassis VM5D4C6B3599 VMX
p16 = re.compile(r'^(?P<name>\S+) +(?P<serial_number>\S+) +'
r'(?P<description>\S+)$')
#Midplane
p17 = re.compile(r'^(?P<name>\S+)$')
ret_dict = {}
for line in out.splitlines()[1:]:
line = line.strip()
#Hardware inventory:
m = p1.match(line)
if m:
group = m.groupdict()
current_item = " "
chassis_inventory_dict = ret_dict.setdefault("chassis-inventory", {})\
.setdefault("chassis", {})
chassis_inventory_dict["@junos:style"] = group["style"]
chassis_entry_list = chassis_inventory_dict.setdefault("chassis-module", [])
continue
#Jedec Code: 0x7fb0 EEPROM Version: 0x02
m = p2.match(line)
if m:
group = m.groupdict()
i2c_dict = {}
i2c_dict["jedec-code"] = group["jedec_code"]
i2c_dict["eeprom-version"] = group["eeprom_version"]
continue
#S/N: VM5D4C6B3599
m = p3.match(line)
if m:
group = m.groupdict()
i2c_dict["serial-number"] = group["serial_number"]
continue
#Assembly ID: 0x0567 Assembly Version: 00.00
m = p4.match(line)
if m:
group = m.groupdict()
i2c_dict["assembly-identifier"] = group["assembly_identifier"]
i2c_dict["assembly-version"] = group["assembly_version"]
continue
#Date: 00-00-0000 Assembly Flags: 0x00
m = p5.match(line)
if m:
group = m.groupdict()
i2c_dict["manufacture-date"] = group["manufacture_date"]
i2c_dict["assembly-flags"] = group["assembly_flags"]
continue
#Version: Rev. 1.0
m = p111.match(line)
if m:
group = m.groupdict()
i2c_dict["i2c-version"] = group["version"]
continue
#ID: VMX
m = p6.match(line)
if m:
group = m.groupdict()
i2c_dict["i2c-identifier"] = group["i2c_identifier"]
continue
#Board Information Record:
m = p7.match(line)
if m:
group = m.groupdict()
complete_address = ""
address_type = group["address_type"]
continue
#I2C Hex Data:
m = p8.match(line)
if m:
group = m.groupdict()
complete_address = []
address_type = group["address_type"]
continue
#Address 0x00: 7f b0 02 00 fa 4e 01 00 52 65 76 2e 20 31 2e 30
m = p9.match(line)
if m:
group = m.groupdict()
if(address_type == "Board Information Record"):
i2c_dict["board-information-record"] = group["address_info"]
else:
#complete_address += group["address_info"] + '\n' + (' ')*5
complete_address.append(group["address_info"])
continue
#FPC 0 Virtual FPC
m = p10.match(line)
if m:
group = m.groupdict()
if(group["name"] == "CB 0"):
outter_dict = {}
current_item = group["name"]
outter_dict["description"] = group["description"]
outter_dict["name"] = group["name"]
else:
if(current_item == "CB 0"):
i2c_dict["i2c-data"] = complete_address
if "part-number" not in i2c_dict:
i2c_dict["part-number"] = None
if "i2c-version" not in i2c_dict:
i2c_dict["i2c-version"] = None
if "serial-number" not in i2c_dict:
i2c_dict["serial-number"] = None
outter_dict["i2c-information"] = i2c_dict
chassis_entry_list.append(outter_dict)
current_item = group["name"]
outter_dict = {}
outter_dict["description"] = group["description"]
outter_dict["name"] = group["name"]
continue
#Routing Engine 0 RE-VMX
m = p11.match(line)
if m:
group = m.groupdict()
i2c_dict["i2c-data"] = complete_address
if(current_item == "Chassis"):
if "part-number" not in i2c_dict:
i2c_dict["part-number"] = None
if "i2c-version" not in i2c_dict:
i2c_dict["i2c-version"] = None
chassis_inventory_dict["i2c-information"] = i2c_dict
current_item = group["name"]
outter_dict = {}
outter_dict["description"] = group["description"]
outter_dict["name"] = group["name"]
continue
#cd0 27649 MB VMware Virtual IDE Har 00000000000000000001 Hard Disk
m = p12.match(line)
if m:
group = m.groupdict()
i2c_dict["i2c-data"] = complete_address
if(current_item == "Routing Engine 0"):
if "part-number" not in i2c_dict:
i2c_dict["part-number"] = None
if "i2c-version" not in i2c_dict:
i2c_dict["i2c-version"] = None
if "serial-number" not in i2c_dict:
i2c_dict["serial-number"] = None
outter_dict["i2c-information"] = i2c_dict
re_disk_entry_dict = {}
re_disk_entry_dict["description"] = group["description"]
re_disk_entry_dict["disk-size"] = group["disk_size"]
re_disk_entry_dict["model"] = group["model"]
re_disk_entry_dict["name"] = group["name"]
re_disk_entry_dict["serial-number"] = group["serial_number"]
outter_dict["chassis-re-disk-module"] = re_disk_entry_dict
chassis_entry_list.append(outter_dict)
continue
#CPU Rev. 1.0 RIOT-LITE BUILTIN
m = p13.match(line)
if m:
group = m.groupdict()
i2c_dict["i2c-data"] = complete_address
if(current_item == "FPC 0"):
if "part-number" not in i2c_dict:
i2c_dict["part-number"] = None
if "i2c-version" not in i2c_dict:
i2c_dict["i2c-version"] = None
if "serial-number" not in i2c_dict:
i2c_dict["serial-number"] = None
outter_dict["i2c-information"] = i2c_dict
current_item = group["name"]
chassis_inner_list = []
chassis_inner_dict = {}
chassis_inner_dict["name"] = group["name"]
chassis_inner_dict["part-number"] = group["part_number"]
chassis_inner_dict["serial-number"] = group["serial_number"]
chassis_inner_dict["version"] = group["version"]
continue
#MIC 0 Virtual
m = p14.match(line)
if m:
group = m.groupdict()
i2c_dict["i2c-data"] = complete_address
if(current_item == "CPU"):
if "part-number" not in i2c_dict:
i2c_dict["part-number"] = None
if "i2c-version" not in i2c_dict:
i2c_dict["i2c-version"] = None
if "serial-number" not in i2c_dict:
i2c_dict["serial-number"] = chassis_inner_dict["serial-number"]
if "i2c-identifier" not in i2c_dict:
i2c_dict["i2c-identifier"] = None
chassis_inner_dict["i2c-information"] = i2c_dict
current_item = group["name"]
chassis_inner_dict2 = {}
chassis_inner_dict2["description"] = group["description"]
chassis_inner_dict2["name"] = group["name"]
continue
#PIC 0 BUILTIN BUILTIN Virtual
m = p15.match(line)
if m:
group = m.groupdict()
chassis_inner_inner_dict = {}
i2c_dict["i2c-data"] = complete_address
if(current_item == "MIC 0"):
if "part-number" not in i2c_dict:
i2c_dict["part-number"] = None
if "i2c-version" not in i2c_dict:
i2c_dict["i2c-version"] = None
if "serial-number" not in i2c_dict:
i2c_dict["serial-number"] = None
chassis_inner_dict2["i2c-information"] = i2c_dict
chassis_inner_inner_dict["description"] = group["description"]
chassis_inner_inner_dict["name"] = group["name"]
chassis_inner_inner_dict["part-number"] = group["part_number"]
chassis_inner_inner_dict["serial-number"] = group["serial_number"]
chassis_inner_dict2["chassis-sub-sub-module"] = chassis_inner_inner_dict
chassis_inner_list.append(chassis_inner_dict2)
chassis_inner_list.append(chassis_inner_dict)
outter_dict["chassis-sub-module"] = chassis_inner_list
chassis_entry_list.append(outter_dict)
continue
#Chassis VM5D4C6B3599 VMX
m = p16.match(line)
if m:
group = m.groupdict()
current_item = group["name"]
chassis_inventory_dict["description"] = group["description"]
chassis_inventory_dict["name"] = group["name"]
chassis_inventory_dict["serial-number"] = group["serial_number"]
chassis_entry_dict = {}
continue
#Midplane
m = p17.match(line)
if m:
group = m.groupdict()
if(current_item == "CPU"):
if "part-number" not in i2c_dict:
i2c_dict["part-number"] = None
if "i2c-version" not in i2c_dict:
i2c_dict["i2c-version"] = None
chassis_inventory_dict["i2c-information"] = i2c_dict
entry_dict = {}
entry_dict["name"] = group["name"]
chassis_entry_list.append(entry_dict)
continue
return ret_dict
class ShowChassisHardwareExtensiveNoForwarding(ShowChassisHardwareExtensive):
""" Parser for:
- show chassis hardware extensive no-forwarding
"""
cli_command = [
'show chassis hardware extensive no-forwarding'
]
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command[0])
else:
out = output
return super().cli(output=out)
class ShowChassisFpcSchema(MetaParser):
""" schema = {
Optional("@xmlns:junos"): str,
"fpc-information": {
Optional("@junos:style"): str,
Optional("@xmlns"): str,
"fpc": [
{
"cpu-15min-avg": str,
"cpu-1min-avg": str,
"cpu-5min-avg": str,
"cpu-interrupt": str,
"cpu-total": str,
"memory-buffer-utilization": str,
"memory-dram-size": str,
"memory-heap-utilization": str,
"slot": str,
"state": str,
"temperature": {
"#text": str,
Optional("@junos:celsius"): str
}
}
]
}
}
"""
schema = {
Optional("@xmlns:junos"): str,
"fpc-information": {
Optional("@junos:style"): str,
Optional("@xmlns"): str,
"fpc": ListOf({
Optional("cpu-15min-avg"): str,
Optional("cpu-1min-avg"): str,
Optional("cpu-5min-avg"): str,
Optional("cpu-interrupt"): str,
Optional("cpu-total"): str,
Optional("memory-buffer-utilization"): str,
Optional("memory-dram-size"): str,
Optional("memory-heap-utilization"): str,
Optional("comment"): str,
"slot": str,
"state": str,
Optional("temperature"): {
"#text": str,
Optional("@junos:celsius"): str
}
})
}
}
class ShowChassisFpc(ShowChassisFpcSchema):
""" Parser for:
* show chassis fpc
"""
cli_command = 'show chassis fpc'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
# 0 Online Testing 3 0 2 2 2 511 31 0
# 0 Present Testing
p1 = re.compile(r'^(?P<slot>\d+) +(?P<state>\S+) '
r'+(?P<text>\S+)( +(?P<cpu_total>\d+) '
r'+(?P<cpu_interrupt>\d+)( +(?P<cpu_1min>\d+) '
r'+(?P<cpu_5min>\d+) +(?P<cpu_15min>\d+))? +'
r'(?P<dram>\d+) +(?P<heap>\d+) +(?P<buffer>\d+))?$')
#2 Empty
p2 = re.compile(r'^(?P<slot>\d+) +(?P<state>\S+)$')
# 0 Offline ---Offlined by cli command---
p3 = re.compile(r'^(?P<slot>\d+)\s+(?P<state>\S+)\s+---(?P<comment>Offlined\s+by\s+cli\s+command)---$')
ret_dict = {}
for line in out.splitlines():
line = line.strip()
#0 Online Testing 3 0 2 2 2 511 31 0
m = p1.match(line)
if m:
fpc_chassis_list = ret_dict.setdefault("fpc-information", {})\
.setdefault("fpc", [])
group = m.groupdict()
fpc_entry_dict = {}
fpc_entry_dict["slot"] = group["slot"]
fpc_entry_dict["state"] = group["state"]
fpc_temp_dict = {}
fpc_temp_dict["#text"] = group["text"]
fpc_entry_dict["temperature"] = fpc_temp_dict
if group["cpu_total"]:
fpc_entry_dict["cpu-total"] = group["cpu_total"]
if group["cpu_interrupt"]:
fpc_entry_dict["cpu-interrupt"] = group["cpu_interrupt"]
if group["cpu_1min"]:
fpc_entry_dict["cpu-1min-avg"] = group["cpu_1min"]
if group["cpu_5min"]:
fpc_entry_dict["cpu-5min-avg"] = group["cpu_5min"]
if group["cpu_15min"]:
fpc_entry_dict["cpu-15min-avg"] = group["cpu_15min"]
if group["dram"]:
fpc_entry_dict["memory-dram-size"] = group["dram"]
if group["heap"]:
fpc_entry_dict["memory-heap-utilization"] = group["heap"]
if group["buffer"]:
fpc_entry_dict["memory-buffer-utilization"] = group["buffer"]
fpc_chassis_list.append(fpc_entry_dict)
continue
#2 Empty
m = p2.match(line)
if m:
group = m.groupdict()
fpc_chassis_list = ret_dict.setdefault("fpc-information", {})\
.setdefault("fpc", [])
fpc_entry_dict = {}
fpc_entry_dict["slot"] = group["slot"]
fpc_entry_dict["state"] = group["state"]
fpc_chassis_list.append(fpc_entry_dict)
continue
# 0 Offline ---Offlined by cli command---
m = p3.match(line)
if m:
group = m.groupdict()
fpc_chassis_list = ret_dict.setdefault("fpc-information", {})\
.setdefault("fpc", [])
fpc_entry_dict = {}
fpc_entry_dict["slot"] = group["slot"]
fpc_entry_dict["state"] = group["state"]
fpc_entry_dict["comment"] = group["comment"]
fpc_chassis_list.append(fpc_entry_dict)
continue
return ret_dict
class ShowChassisRoutingEngineSchema(MetaParser):
schema = {
Optional("@xmlns:junos"): str,
"route-engine-information": {
Optional("@xmlns"): str,
"route-engine": [{
"cpu-background-5sec": str,
"cpu-background-1min": str,
"cpu-background-5min": str,
"cpu-background-15min": str,
"cpu-idle-5sec": str,
"cpu-idle-1min": str,
"cpu-idle-5min": str,
"cpu-idle-15min": str,
"cpu-interrupt-5sec": str,
"cpu-interrupt-1min": str,
"cpu-interrupt-5min": str,
"cpu-interrupt-15min": str,
"cpu-system-5sec": str,
"cpu-system-1min": str,
"cpu-system-5min": str,
"cpu-system-15min": str,
"cpu-user-5sec": str,
"cpu-user-1min": str,
"cpu-user-5min": str,
"cpu-user-15min": str,
"last-reboot-reason": str,
"load-average-fifteen": str,
"load-average-five": str,
"load-average-one": str,
"mastership-priority": str,
"mastership-state": str,
"memory-buffer-utilization": str,
"memory-dram-size": str,
"memory-installed-size": str,
"model": str,
"slot": str,
"start-time": {
"#text": str,
Optional("@junos:seconds"): str
},
Optional("status"): str,
"up-time": {
"#text": str,
Optional("@junos:seconds"): str
}
}],
Optional("re-state"): str
}
}
schema = {
Optional("@xmlns:junos"): str,
"route-engine-information": {
Optional("@xmlns"): str,
"route-engine": ListOf({
Optional("cpu-background"): str,
Optional("cpu-background-5sec"): str,
Optional("cpu-background-1min"): str,
Optional("cpu-background-5min"): str,
Optional("cpu-background-15min"): str,
Optional("cpu-idle"): str,
Optional("cpu-idle-5sec"): str,
Optional("cpu-idle-1min"): str,
Optional("cpu-idle-5min"): str,
Optional("cpu-idle-15min"): str,
Optional("cpu-interrupt"): str,
Optional("cpu-interrupt-5sec"): str,
Optional("cpu-interrupt-1min"): str,
Optional("cpu-interrupt-5min"): str,
Optional("cpu-interrupt-15min"): str,
Optional("cpu-system"): str,
Optional("cpu-system-5sec"): str,
Optional("cpu-system-1min"): str,
Optional("cpu-system-5min"): str,
Optional("cpu-system-15min"): str,
Optional("cpu-temperature"):{
"#text": str
},
Optional("cpu-user"): str,
Optional("cpu-user-5sec"): str,
Optional("cpu-user-1min"): str,
Optional("cpu-user-5min"): str,
Optional("cpu-user-15min"): str,
Optional("last-reboot-reason"): str,
Optional("load-average-fifteen"): str,
Optional("load-average-five"): str,
Optional("load-average-one"): str,
Optional("mastership-priority"): str,
"mastership-state": str,
Optional("memory-buffer-utilization"): str,
Optional("memory-dram-size"): str,
Optional("memory-installed-size"): str,
Optional("model"): str,
Optional("serial-number"): str,
"slot": str,
Optional("start-time"): {
"#text": str,
Optional("@junos:seconds"): str
},
Optional("status"): str,
Optional("temperature"):{
"#text": str
},
Optional("up-time"): {
"#text": str,
Optional("@junos:seconds"): str
}
}),
Optional("re-state"): str
}
}
class ShowChassisRoutingEngine(ShowChassisRoutingEngineSchema):
""" Parser for:
* show chassis routing-engine
"""
cli_command = 'show chassis routing-engine'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
#Slot 0:
p1 = re.compile(r'^Slot +(?P<slot>\d+):$')
#Current state Master
p2 = re.compile(r'^Current state +(?P<mastership_state>\S+)$')
#Election priority Master (default)
p3 = re.compile(r'^Election priority +(?P<mastership_priority>[\S\s]+)$')
#DRAM 2002 MB (2048 MB installed)
p4 = re.compile(r'^DRAM +(?P<memory_dram_size>\S+\s\S+) +(?P<memory_installed_size>[\S\s]+)$')
#Memory utilization 19 percent
p5 = re.compile(r'^Memory utilization +(?P<memory_buffer_utilization>\d+) +percent$')
#5 sec CPU utilization:
p6 = re.compile(r'^(?P<state>\d+\s+\S+) +CPU utilization:$')
#User 1 percent
p7 = re.compile(r'^User +(?P<user>\d+) +percent$')
#Background 0 percent
p8 = re.compile(r'^Background +(?P<background>\d+) +percent$')
#Kernel 1 percent
p9 = re.compile(r'^Kernel +(?P<system>\d+) +percent$')
#Temperature 42 degrees C / 107 degrees F
p9_1 = re.compile(r'^Temperature +(?P<cpu_temperature>[\S\s]+)$')
#Interrupt 0 percent
p10 = re.compile(r'^Interrupt +(?P<interrupt>\d+) +percent$')
#Idle 98 percent
p11 = re.compile(r'^Idle +(?P<idle>\d+) +percent$')
#Model RE-VMX
p12 = re.compile(r'^Model +(?P<system>\S+)$')
#Serial ID 9009237474
p12_1 = re.compile(r'^Serial +ID +(?P<serial_number>\d+)$')
#Start time 2019-08-29 09:02:22 UTC
p13 = re.compile(r'^Start time +(?P<start_time>[\S\s]+)$')
#CPU temperature 38 degrees C / 100 degrees F
p13_1 = re.compile(r'^CPU +[tT]emperature +(?P<cpu_temperature>[\S\s]+)$')
#Uptime 208 days, 23 hours, 14 minutes, 9 seconds
p14 = re.compile(r'^Uptime +(?P<uptime>[\S\s]+)$')
#Last reboot reason Router rebooted after a normal shutdown.
p15 = re.compile(r'^Last reboot reason +(?P<last_reboot_reason>[\S\s]+)$')
#0.72 0.46 0.40
p16 = re.compile(r'^(?P<load_average_one>[\d\.]+) '
r'+(?P<load_average_five>[\d\.]+) '
r'+(?P<load_average_fifteen>[\d\.]+)$')
#{master}
p17 = re.compile(r'^(?P<re_state>[\{\S\s]+\})$')
ret_dict = {}
for line in out.splitlines():
line = line.strip()
#Slot 0:
m = p1.match(line)
if m:
current_state = " "
route_engine_list = ret_dict.setdefault("route-engine-information", {})\
.setdefault("route-engine", [])
group = m.groupdict()
route_engine_entry_dict = {}
route_engine_list.append(route_engine_entry_dict)
tag = ''
route_engine_entry_dict["slot"] = group["slot"]
continue
#Current state Master
m = p2.match(line)
if m:
group = m.groupdict()
route_engine_entry_dict["mastership-state"] = group["mastership_state"]
continue
#Election priority Master (default)
m = p3.match(line)
if m:
group = m.groupdict()
route_engine_entry_dict["mastership-priority"] = group["mastership_priority"]
continue
#DRAM 2002 MB (2048 MB installed)
m = p4.match(line)
if m:
group = m.groupdict()
route_engine_entry_dict["memory-dram-size"] = group["memory_dram_size"]
route_engine_entry_dict["memory-installed-size"] = group["memory_installed_size"]
continue
#Memory utilization 19 percent
m = p5.match(line)
if m:
group = m.groupdict()
route_engine_entry_dict["memory-buffer-utilization"] = group["memory_buffer_utilization"]
continue
#5 sec CPU utilization:
m = p6.match(line)
if m:
group = m.groupdict()
current_state = group["state"]
tag = '-'+group["state"].replace(' ','')
continue
#User 1 percent
m = p7.match(line)
if m:
group = m.groupdict()
route_engine_entry_dict["cpu-user"+tag] = group["user"]
continue
#Background 0 percent
m = p8.match(line)
if m:
group = m.groupdict()
route_engine_entry_dict["cpu-background"+tag] = group["background"]
continue
#Kernel 1 percent
m = p9.match(line)
if m:
group = m.groupdict()
route_engine_entry_dict["cpu-system"+tag] = group["system"]
continue
#Temperature 42 degrees C / 107 degrees F
m = p9_1.match(line)
if m:
group = m.groupdict()
temp_dict = {}
temp_dict["#text"] = group["cpu_temperature"]
route_engine_entry_dict["temperature"] = temp_dict
continue
#Interrupt 0 percent
m = p10.match(line)
if m:
group = m.groupdict()
route_engine_entry_dict["cpu-interrupt"+tag] = group["interrupt"]
continue
#Idle 98 percent
m = p11.match(line)
if m:
group = m.groupdict()
route_engine_entry_dict["cpu-idle"+tag] = group["idle"]
continue
#Model RE-VMX
m = p12.match(line)
if m:
group = m.groupdict()
route_engine_entry_dict["model"] = group["system"]
continue
#Serial ID 9009237474
m = p12_1.match(line)
if m:
group = m.groupdict()
route_engine_entry_dict["serial-number"] = group["serial_number"]
continue
#Start time 2019-08-29 09:02:22 UTC
m = p13.match(line)
if m:
group = m.groupdict()
start_time_dict = {}
start_time_dict["#text"] = group["start_time"]
route_engine_entry_dict["start-time"] = start_time_dict
continue
#CPU temperature 38 degrees C / 100 degrees F
m = p13.match(line)
if m:
group = m.groupdict()
cpu_temp_dict = {}
cpu_temp_dict["#text"] = group["cpu_temperature"]
route_engine_entry_dict["cpu-temperature"] = cpu_temp_dict
continue
#Uptime 208 days, 23 hours, 14 minutes, 9 seconds
m = p14.match(line)
if m:
group = m.groupdict()
up_time_dict = {}
up_time_dict["#text"] = group["uptime"]
route_engine_entry_dict["up-time"] = up_time_dict
continue
#Last reboot reason Router rebooted after a normal shutdown.
m = p15.match(line)
if m:
group = m.groupdict()
route_engine_entry_dict["last-reboot-reason"] = group["last_reboot_reason"]
continue
#0.72 0.46 0.40
m = p16.match(line)
if m:
group = m.groupdict()
route_engine_entry_dict["load-average-one"] = group["load_average_one"]
route_engine_entry_dict["load-average-five"] = group["load_average_five"]
route_engine_entry_dict["load-average-fifteen"] = group["load_average_fifteen"]
continue
#{master}
m = p17.match(line)
if m:
group = m.groupdict()
ret_dict["route-engine-information"]["re-state"] = group["re_state"]
continue
return ret_dict
class ShowChassisRoutingEngineNoForwarding(ShowChassisRoutingEngine):
""" Parser for:
- show chassis routing-engine no-forwarding
"""
cli_command = 'show chassis routing-engine no-forwarding'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command[0])
else:
out = output
return super().cli(output=out)
class ShowChassisEnvironmentSchema(MetaParser):
schema = {
'environment-information': {
'environment-item': ListOf({
Optional('class'): str,
Optional('comment'): str,
'name': str,
'status': str,
Optional('temperature'): {
'#text': str,
'@junos:celsius': str,
}
})
}
}
class ShowChassisEnvironment(ShowChassisEnvironmentSchema):
"""Parser for show chassis environment"""
cli_command = 'show chassis environment'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
# Class Item Status Measurement
# Temp PSM 0 OK 25 degrees C / 77 degrees F
# Fans Fan Tray 0 Fan 1 OK 2760 RPM
# PSM 1 OK 24 degrees C / 75 degrees F
# CB 0 IntakeA-Zone0 OK 39 degrees C / 102 degrees F
# PSM 4 Check
# Fan Tray 2 Fan 2 OK 2640 RPM
# FPC 0 Intake Testing
p1 = re.compile(r'^((?P<class>Temp|Fans) +)?(?P<name>[\s\S]+) '
r'+(?P<status>OK|Check|Testing)( +(?P<measurement>[\s\S]+))?$')
# 24 degrees C / 75 degrees F
celsius_pattern = re.compile(r'(?P<celsius>\d+) degrees C / (?P<fahr>\d+) degrees F')
res = {}
environment_item_list = []
class_flag = ''
for line in out.splitlines():
line = line.strip()
m = p1.match(line)
if m:
res = {
'environment-information': {
'environment-item': environment_item_list
}}
group = m.groupdict()
# environment_item schema:
# {
# Optional('class'): str,
# 'name': str,
# 'status': str,
# Optional('temperature'): {
# '#text': str,
# '@junos:celsius': str,
# }
# }
environment_item = {}
if group['class']:
class_flag = group['class']
environment_item['class'] = class_flag
environment_item['name'] = group['name'].strip()
environment_item['status'] = group['status'].strip()
text = group['measurement']
if text:
# CB 0 IntakeA-Zone0 OK 39 degrees C / 102 degrees F
if celsius_pattern.match(text):
celsius_value = celsius_pattern.match(text).groupdict()['celsius']
temperature_dict = {}
temperature_dict['@junos:celsius'] = celsius_value
temperature_dict['#text'] = text
environment_item['temperature'] = temperature_dict
# Fan Tray 2 Fan 2 OK 2640 RPM
else:
environment_item['comment'] = text
environment_item_list.append(environment_item)
return res
class ShowChassisEnvironmentFpcSchema(MetaParser):
'''
Schema for show chassis environment fpc
schema = {
"environment-component-information": {
"environment-component-item": [
{
"name": str,
"power-information": {
"power-title": {
"power-type": str
}
"voltage": [
{
"actual-voltage": str,
"reference-voltage": str,
},
]
},
"slave-revision": str,
"state": str,
"temperature-reading": [
{
"temperature": {
"#text": str,
"@junos:celsius": str,
},
"temperature-name": str,
},
]
}
]
}
}
'''
schema = {
'environment-component-information': {
'environment-component-item': ListOf({
"name": str,
Optional("power-information"): {
"power-title": {
"power-type": str
},
Optional("voltage"): ListOf({
"actual-voltage": str,
"reference-voltage": str,
}),
},
Optional("slave-revision"): str,
"state": str,
"temperature-reading": ListOf({
"temperature": {
"#text": str,
"@junos:celsius": str,
},
"temperature-name": str,
}),
})
}
}
class ShowChassisEnvironmentFpc(ShowChassisEnvironmentFpcSchema):
'''Parser for show chassis environment fpc'''
cli_command = 'show chassis environment fpc'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
# Regex
# FPC 0 status:
p1 = re.compile(r'^(?P<name>.*) +status:$')
# State Online
p2 = re.compile(r'^State+\s+(?P<state>\S+)$')
# Temperature Intake 27 degrees C / 80 degrees F
# Temperature I3 0 Chip 38 degrees C / 100 degrees F
p_temp = re.compile(r'^(?P<temperature_name>[\s\S]+) '
r'+(?P<text>(?P<celsius>\d+)\sdegrees\sC) +.*')
# Power
# Power Disabled
p_power = re.compile(r'^Power(\s+Disabled)?$')
# 1.2 V PFE 0 1231 mV
# 1.5 V 1498 mV
p_voltage = re.compile(r'(?P<reference_voltage>[\s\S]+) '
r'+(?P<actual_voltage>\d+) +mV')
# I2C Slave Revision 42
p_slave_revision = re.compile(r'^.* +Slave +Revision +(?P<slave_revision>\S+)$')
# Read line from output and build parsed output
res = {}
for line in out.splitlines():
line = line.strip()
# FPC 0 status:
m = p1.match(line)
if m:
if "environment-component-information" not in res:
res = {
"environment-component-information": {
"environment-component-item": []
}
}
env_list = res["environment-component-information"]["environment-component-item"]
env_item = {
"name": m.groupdict()["name"]
}
env_list.append(env_item)
continue
# State Online
m = p2.match(line)
if m:
env_item["state"] = m.groupdict()["state"]
continue
# Temperature Intake 27 degrees C / 80 degrees F
# Temperature I3 0 Chip 38 degrees C / 100 degrees F
m = p_temp.match(line)
if m:
group = m.groupdict()
if "temperature-reading" not in env_item:
env_item["temperature-reading"] = []
temp_list = env_item["temperature-reading"]
temp_item = {
"temperature": {
"#text": group["text"].strip(),
"@junos:celsius": group["celsius"]
},
"temperature-name": group["temperature_name"].strip()
}
temp_list.append(temp_item)
continue
# Power
m = p_power.match(line)
if m:
env_item["power-information"] = {
"power-title": {
"power-type": "Power"
},
}
continue
# 1.2 V PFE 0 1231 mV
# 1.5 V 1498 mV
m = p_voltage.match(line)
if m:
if "voltage" not in env_item["power-information"]:
env_item["power-information"]["voltage"] = []
voltage_list = env_item["power-information"]["voltage"]
voltage_item = {
"actual-voltage": m.groupdict()["actual_voltage"].strip(),
"reference-voltage": m.groupdict()["reference_voltage"].strip()
}
voltage_list.append(voltage_item)
continue
# I2C Slave Revision 42
m = p_slave_revision.match(line)
if m:
env_item["slave-revision"] = m.groupdict()["slave_revision"].strip()
continue
return res
class ShowChassisAlarmsSchema(MetaParser):
""" Schema for show chassis alarms"""
# {
# "alarm-information": {
# Optional("alarm-detail"): [
# {
# "alarm-class": "Major",
# "alarm-description": str,
# "alarm-short-description": str,
# "alarm-time": {
# "#text": str,
# },
# "alarm-type": str
# },
# ],
# "alarm-summary": {
# Optional("active-alarm-count"): str,
# Optional("no-active-alarms"): bool
# }
# },
# }
schema = {
"alarm-information": {
Optional("alarm-detail"): ListOf({
"alarm-class": str,
"alarm-description": str,
"alarm-short-description": str,
"alarm-time": {
"#text": str,
},
"alarm-type": str
}),
"alarm-summary": {
Optional("active-alarm-count"): str,
Optional("no-active-alarms"): bool
}
},
}
class ShowChassisAlarms(ShowChassisAlarmsSchema):
"""Parser for show chassis alarms"""
cli_command = 'show chassis alarms'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
# 1 alarms currently active
p1 = re.compile(r'^(?P<active_alarms>\d+) +alarms +currently +active$')
# Alarm time Class Description
# 2020-07-16 13:38:21 EST Major PSM 15 Not OK
p2 = re.compile(r'^(?P<text>\S+ +\d\d\:\d\d\:\d\d +\S+) '
r'+(?P<alarm_class>\S+) +(?P<description>[\s\S]+)$')
# No alarms currently active
p3 = re.compile(r'^No alarms currently active$')
res = {}
for line in out.splitlines():
line = line.strip()
# 1 alarms currently active
m = p1.match(line)
if m:
res = {
"alarm-information": {
"alarm-summary": {
"active-alarm-count": m.groupdict()['active_alarms']
}
}
}
continue
# Alarm time Class Description
# 2020-07-16 13:38:21 EST Major PSM 15 Not OK
m = p2.match(line)
if m:
group = m.groupdict()
text = group['text']
alarm_class = group['alarm_class']
description = group['description']
if 'alarm-detail' not in res['alarm-information']:
res['alarm-information']['alarm-detail'] = []
alarm_detail_list = res['alarm-information']['alarm-detail']
short_description_dict = {
"SPMB 1 not online":"SPMB 1 offline",
"Loss of communication with Backup RE":"Backup RE communica",
}
alarm_detail_item = {
'alarm-class':alarm_class,
'alarm-description':description,
'alarm-time':{
'#text':text
},
"alarm-type": "Chassis"
}
if description in short_description_dict:
alarm_detail_item['alarm-short-description'] = short_description_dict[description]
else:
alarm_detail_item['alarm-short-description'] = description
alarm_detail_list.append(alarm_detail_item)
continue
# No alarms currently active
m = p3.match(line)
if m:
res = {
"alarm-information": {
"alarm-summary": {
"no-active-alarms": True
}
}
}
continue
return res
class ShowChassisFabricSummarySchema(MetaParser):
"""
schema = {
"fm-state-information": {
"fm-state-item": [
{
"plane-slot": str,
"state": str,
"up-time": str
}
]
}
}"""
schema = {
"fm-state-information": {
"fm-state-item": ListOf({
"plane-slot": str,
"state": str,
Optional("up-time"): str
})
}
}
class ShowChassisFabricSummary(ShowChassisFabricSummarySchema):
""" Parser for:
* show chassis fabric summary
"""
cli_command = 'show chassis fabric summary'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
# 0 Online 34 days, 18 hours, 43 minutes, 48 seconds
# 0 Online
p1 = re.compile(r'^(?P<plane_slot>\d+) +(?P<state>\S+)( +(?P<up_time>[\S\s]+))?$')
ret_dict = {}
for line in out.splitlines():
line = line.strip()
# 0 Online 34 days, 18 hours, 43 minutes, 48 seconds
m = p1.match(line)
if m:
fm_state_information = ret_dict.setdefault("fm-state-information", {})\
.setdefault("fm-state-item", [])
group = m.groupdict()
fm_state_dict = {}
for key, value in m.groupdict().items():
if value != None:
key = key.replace('_', '-')
fm_state_dict[key] = value
fm_state_information.append(fm_state_dict)
continue
return ret_dict
class ShowChassisFabricPlaneSchema(MetaParser):
"""
schema = {
"fm-plane-state-information": {
"fmp-plane": [
{
"fru-name": "list",
"fru-slot": "list",
"pfe-link-status": "list",
"pfe-slot": "list",
"slot": str,
"state": str
}
]
}
}"""
schema = {
"fm-plane-state-information": {
"fmp-plane": ListOf({
"fru-name": list,
"fru-slot": list,
"pfe-link-status": list,
"pfe-slot": list,
"slot": str,
"state": str
})
}
}
class ShowChassisFabricPlane(ShowChassisFabricPlaneSchema):
""" Parser for:
* show chassis fabric plane
"""
cli_command = 'show chassis fabric plane'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
# Plane 0
p1 = re.compile(r'^Plane +(?P<slot>\d+)$')
# Plane state: ACTIVE
p2 = re.compile(r'^Plane +state: +(?P<state>\S+)$')
# FPC 0
p3 = re.compile(r'^(?P<fpc_name>\S+) +(?P<fpc_slot>\d+)$')
# PFE 1 :Links ok
p4 = re.compile(r'^PFE +(?P<pfe>\d+) +:+(?P<links>[\S\s]+)$')
ret_dict = {}
for line in out.splitlines():
line = line.strip()
# Plane 0
m = p1.match(line)
if m:
fm_plane_state_information = ret_dict.setdefault("fm-plane-state-information", {})\
.setdefault("fmp-plane", [])
group = m.groupdict()
fm_state_dict = {}
fru_name = fm_state_dict.setdefault("fru-name",[])
fru_slot = fm_state_dict.setdefault("fru-slot",[])
pfe_link_status = fm_state_dict.setdefault("pfe-link-status",[])
pfe_slot = fm_state_dict.setdefault("pfe-slot",[])
fm_plane_state_information.append(fm_state_dict)
fm_state_dict.update({'slot' : group['slot']})
continue
# Plane state: ACTIVE
m = p2.match(line)
if m:
group = m.groupdict()
fm_state_dict.update({'state' : group['state']})
continue
# FPC 0
m = p3.match(line)
if m:
group = m.groupdict()
fru_name.append(group['fpc_name'])
fru_slot.append(group['fpc_slot'])
continue
# PFE 1 :Links ok
m = p4.match(line)
if m:
group = m.groupdict()
pfe_link_status.append(group['links'])
pfe_slot.append(group['pfe'])
continue
return ret_dict
""" Schema for:
* show chassis power
"""
class ShowChassisPowerSchema(MetaParser):
schema = {
Optional("@xmlns:junos"): str,
"power-usage-information": {
"power-usage-item": ListOf({
Optional("dc-input-detail2"): {
Optional("dc-input-status"): str,
Optional("str-dc-actual-feed"): str,
Optional("str-dc-expect-feed"): str
},
Optional("dc-output-detail2"): {
"str-dc-current": str,
"str-dc-load": str,
"str-dc-power": str,
"str-dc-voltage": str,
"str-zone": str
},
"name": str,
Optional("pem-capacity-detail"): {
"capacity-actual": str,
"capacity-max": str
},
"state": str,
Optional("input"): str,
}),
"power-usage-system": {
"capacity-sys-actual": str,
"capacity-sys-max": str,
"capacity-sys-remaining": str,
"power-usage-zone-information": ListOf({
"capacity-actual": str,
"capacity-actual-usage": str,
"capacity-allocated": str,
"capacity-max": str,
"capacity-remaining": str,
"str-zone": str
})
}
}
}
class ShowChassisPower(ShowChassisPowerSchema):
""" Parser for:
* show chassis power
"""
cli_command = 'show chassis power'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
# PSM 0:
p1 = re.compile(r'^(?P<name>\S+ +\d+):$')
# State Online Master
p2 = re.compile(r'^State: +(?P<state>[\S\s]+)$')
# Input: Absent
p2_1 = re.compile(r'^Input: +(?P<input>[\S\s]+)$')
# DC input: OK (INP0 feed expected, INP0 feed connected)
p3 = re.compile(r'^DC +input: +(?P<dc_input_status>[\S\s]+)( +\((?P<str_dc_expect_feed>\S+) +'
r'feed +expected, +(?P<str_dc_actual_feed>\S+) +feed +connected\))?$')
# Capacity: 2100 W (maximum 2500 W)
p4 = re.compile(r'^Capacity: +(?P<capacity_actual>\d+) +\S+ +\(maximum +(?P<capacity_max>\d+) +\S+\)$')
# DC output: 489.25 W (Lower Zone, 9.50 A at 51.50 V, 23.30% of capacity)
p5 = re.compile(r'^DC +output: +(?P<str_dc_power>\S+) +\S+ +\((?P<str_zone>\S+) +'
r'Zone, +(?P<str_dc_current>\S+) +\S+ +at +(?P<str_dc_voltage>\S+) +\S+, +'
r'(?P<str_dc_load>\S+)\% +of +capacity\)$')
# Total system capacity: 14700 W (maximum 17500 W)
p6 = re.compile(r'^Total +system +capacity: +(?P<capacity_sys_actual>\S+) +\S+ +'
r'\(maximum +(?P<capacity_sys_max>\S+) +\S+\)$')
# Total remaining power: 5074 W
p7 = re.compile(r'^Total +remaining +power: +(?P<capacity_sys_remaining>\S+) +\S+$')
# Upper Zone:
# Lower Zone:
p8 = re.compile(r'^(?P<str_zone>\S+) +Zone:$')
# Allocated power: 3332 W (2968 W remaining)
p9 = re.compile(r'^Allocated +power: +(?P<capacity_allocated>\S+) +\S+ +\((?P<capacity_remaining>\S+) +\S+ +remaining\)$')
# Actual usage: 925.50 W
p10 = re.compile(r'^Actual +usage: +(?P<capacity_actual_usage>\S+) +\S+$')
ret_dict = {}
power_usage_system_found = False
for line in out.splitlines():
line = line.strip()
# PSM 0:
m = p1.match(line)
if m:
power_usage_information_list = ret_dict.setdefault("power-usage-information", {})\
.setdefault("power-usage-item", [])
power_usage_item_dict = {}
group = m.groupdict()
power_usage_information_list.append(power_usage_item_dict)
power_usage_item_dict.update({'name' : group['name']})
continue
# State Online Master
m = p2.match(line)
if m:
group = m.groupdict()
power_usage_item_dict.update({'state' : group['state']})
continue
# Input: Absent
m = p2_1.match(line)
if m:
group = m.groupdict()
power_usage_item_dict.update({'input' : group['input']})
continue
# DC input: OK (INP0 feed expected, INP0 feed connected)
m = p3.match(line)
if m:
group = m.groupdict()
dc_input_detail2_dict = power_usage_item_dict.setdefault('dc-input-detail2', {})
dc_input_detail2_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
# Capacity: 2100 W (maximum 2500 W)
m = p4.match(line)
if m:
group = m.groupdict()
if power_usage_system_found:
power_usage_zone_information_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
else:
pem_capacity_detail_dict = power_usage_item_dict.setdefault('pem-capacity-detail', {})
pem_capacity_detail_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
# DC output: 489.25 W (Lower Zone, 9.50 A at 51.50 V, 23.30% of capacity)
m = p5.match(line)
if m:
group = m.groupdict()
dc_output_detail2_dict = power_usage_item_dict.setdefault('dc-output-detail2', {})
dc_output_detail2_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
# Total system capacity: 14700 W (maximum 17500 W)
m = p6.match(line)
if m:
group = m.groupdict()
power_usage_system_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
# Total remaining power: 5074 W
m = p7.match(line)
if m:
group = m.groupdict()
power_usage_system_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
# Upper Zone:
# Lower Zone:
m = p8.match(line)
if m:
group = m.groupdict()
power_usage_system_found = True
power_usage_system_dict = ret_dict.setdefault("power-usage-information", {})\
.setdefault("power-usage-system", {})
power_usage_zone_information_list = power_usage_system_dict.setdefault("power-usage-zone-information", [])
power_usage_zone_information_dict = {k.replace('_', '-'):v for k, v in group.items() if v is not None}
power_usage_zone_information_list.append(power_usage_zone_information_dict)
continue
# Allocated power: 3332 W (2968 W remaining)
m = p9.match(line)
if m:
group = m.groupdict()
power_usage_zone_information_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
# Actual usage: 925.50 W
m = p10.match(line)
if m:
group = m.groupdict()
power_usage_zone_information_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
return ret_dict
"""
Schema for:
* show chassis fpc pic-status
"""
class ShowChassisFpcPicStatusSchema(MetaParser):
"""
schema = {
"fpc-information": {
"fpc": [
{
"description": str,
"slot": str,
"state": str,
"pic": [
{
"pic-slot": str,
"pic-state": str,
"pic-type": str,
}
]
}
]
}
}
"""
schema = {
"fpc-information": {
"fpc": ListOf({
"description": str,
"slot": str,
"state": str,
"pic": ListOf({
"pic-slot": str,
"pic-state": str,
"pic-type": str,
})
})
}
}
"""
Parser for:
* show chassis fpc pic-status
"""
class ShowChassisFpcPicStatus(ShowChassisFpcPicStatusSchema):
cli_command = 'show chassis fpc pic-status'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
# Regex patterns
# Slot 0 Online DPCE 2x 10GE R
p_fpc = re.compile(r'Slot +(?P<slot>\d+) +(?P<state>\S+)'
r' +(?P<description>[\s\S]+)')
# PIC 0 Online 1x 10GE(LAN/WAN)
p_pic = re.compile(r'PIC +(?P<pic_slot>\d+) '
r'+(?P<pic_state>\S+) +(?P<pic_type>[\s\S]+)')
# Build result dictionary
res = {}
for line in out.splitlines():
line = line.strip()
# Slot 0 Online DPCE 2x 10GE R
m = p_fpc.match(line)
if m:
group = m.groupdict()
if "fpc-information" not in res:
res = {
"fpc-information": {
"fpc": []
}
}
fpc_list = res["fpc-information"]["fpc"]
fpc_item = {}
for k, v in group.items():
fpc_item[k] = v
fpc_list.append(fpc_item)
continue
# PIC 0 Online 1x 10GE(LAN/WAN)
m = p_pic.match(line)
if m:
group = m.groupdict()
if "pic" not in fpc_item:
fpc_item["pic"] = []
pic_list = fpc_item["pic"]
pic_item = {}
for k, v in group.items():
k = k.replace('_' ,'-')
pic_item[k] = v
pic_list.append(pic_item)
continue
return res
class ShowChassisEnvironmentComponentSchema(MetaParser):
""" Schema for:
* show chassis environment {component}
"""
schema = {
Optional("@xmlns:junos"): str,
"environment-component-information": {
Optional("@xmlns"):
str,
"environment-component-item": ListOf({
"name": str,
"state": str,
Optional("bus-revision"): str,
Optional("fpga-revision"): str,
Optional("power-information"): {
Optional("power-title"): {
"power-type": str
},
Optional("psm-hours-used"): str,
Optional("voltage"): ListOf({
"actual-voltage": str,
"reference-voltage": str
})
},
Optional("dc-information"): {
"dc-detail": {
"str-dc-current": str,
"str-dc-load": str,
"str-dc-power": str,
"str-dc-voltage": str
},
"dc-feed0-current": str,
"dc-feed0-power": str,
"dc-feed0-voltage": str,
"dc-feed1-current": str,
"dc-feed1-power": str,
"dc-feed1-voltage": str
},
Optional("temperature-reading"): ListOf({
"temperature": {
"#text": str,
Optional("@junos:celsius"): str
},
"temperature-name": str
})
})
}
}
class ShowChassisEnvironmentComponent(ShowChassisEnvironmentComponentSchema):
""" Parser for:
* show chassis environment {component}
"""
cli_command = 'show chassis environment {component}'
def cli(self, component, output=None):
if not output:
out = self.device.execute(self.cli_command.format(
component=component
))
else:
out = output
ret_dict = {}
# CB 0 status:
p1 = re.compile(r'^(?P<name>\S+ +\d+) +status:$')
# State Online Master
p2 = re.compile(r'^State +(?P<state>[\S\s]+)$')
# Power 1
# Power
p3 = re.compile(r'^\w+( +(?P<power_type>\d+))?$')
# 1.0 V 1005 mV
p4 = re.compile(r'^(?P<temperature_name>.*) +(?P<text>\d+ +degrees +\w+ +\/ +\d+ +degrees +\w+)$')
# TCBC-Zone0 Temperature 45 degrees C / 113 degrees F
p5 = re.compile(r'^(?P<reference_voltage>[\d\.]+ +\w+( +\w+)?) +(?P<actual_voltage>\d+) +\w+$')
# Bus Revision 100
p6 = re.compile(r'^Bus +Revision +(?P<bus_revision>\d+)$')
# FPGA Revision 272
p7 = re.compile(r'^FPGA +Revision +(?P<fpga_revision>\d+)$')
# DC Input Feed Voltage(V) Current(A) Power(W)
p8 = re.compile(r'^DC +Input +Feed +Voltage\S+ +Current\S+ +Power\S+$')
# INP0 50.00 11.20 560.00
p9 = re.compile(r'^\w+ +(?P<voltage>[\d\.]+) +(?P<current>[\d\.]+) +(?P<power>[\d\.]+)$')
# DC Output Voltage(V) Current(A) Power(W) Load(%)
p10 = re.compile(r'^DC +Output +Voltage\S+ +Current\S+ +Power\S+ +Load\S+$')
# 50.1 50.00 11.20 560.00
p11 = re.compile(r'^(?P<voltage>[\d\.]+) +(?P<current>[\d\.]+) +(?P<power>[\d\.]+) +(?P<load>[\d\.]+)$')
# Hours Used 45607
p12 = re.compile(r'^Hours +Used +(?P<psm_hours_used>\d+)$')
for line in out.splitlines():
line = line.strip()
# CB 0 status:
m = p1.match(line)
if m:
group = m.groupdict()
environment_component_item_list = ret_dict.setdefault('environment-component-information', {}). \
setdefault('environment-component-item', [])
environment_component_item_dict = {k.replace('_', '-'):v for k, v in group.items() if v is not None}
environment_component_item_list.append(environment_component_item_dict)
continue
# State Online Master
m = p2.match(line)
if m:
group = m.groupdict()
environment_component_item_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
# Power 1
m = p3.match(line)
if m:
group = m.groupdict()
power_info_dict = environment_component_item_dict.setdefault('power-information', {})
power_title_dict = power_info_dict.setdefault('power-title', {}). \
setdefault('power-type', group['power_type'])
continue
# IntakeC-Zone0 Temperature 51 degrees C / 123 degrees F
m = p4.match(line)
if m:
group = m.groupdict()
temperature_reading_list = environment_component_item_dict.setdefault('temperature-reading', [])
temperature_name = group['temperature_name']
text = group['text']
temperature_reading_dict = {'temperature-name': temperature_name,
'temperature': {'#text': text}}
temperature_reading_list.append(temperature_reading_dict)
continue
# 1.0 V 1005 mV
m = p5.match(line)
if m:
group = m.groupdict()
voltage_list = power_info_dict.setdefault('voltage', [])
voltage_dict = {k.replace('_', '-'):v for k, v in group.items() if v is not None}
voltage_list.append(voltage_dict)
continue
# Bus Revision 100
m = p6.match(line)
if m:
group = m.groupdict()
environment_component_item_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
# FPGA Revision 272
m = p7.match(line)
if m:
group = m.groupdict()
environment_component_item_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
# DC Input Feed Voltage(V) Current(A) Power(W)
m = p8.match(line)
if m:
group = m.groupdict()
dc_information_dict = environment_component_item_dict.setdefault('dc-information', {})
feed_cnt = 0
continue
# INP0 50.00 11.20 560.00
m = p9.match(line)
if m:
group = m.groupdict()
voltage = group['voltage']
current = group['current']
power = group['power']
dc_information_dict.update({'dc-feed' + str(feed_cnt) +'-current': current})
dc_information_dict.update({'dc-feed' + str(feed_cnt) +'-voltage': voltage})
dc_information_dict.update({'dc-feed' + str(feed_cnt) +'-power': power})
feed_cnt = feed_cnt + 1
continue
# DC Output Feed Voltage(V) Current(A) Power(W)
m = p10.match(line)
if m:
group = m.groupdict()
dc_information_dict = environment_component_item_dict.setdefault('dc-information', {})
dc_detail_dict = dc_information_dict.setdefault('dc-detail', {})
feed_cnt = 0
continue
# INP0 50.00 11.20 560.00
m = p11.match(line)
if m:
group = m.groupdict()
voltage = group['voltage']
current = group['current']
power = group['power']
load = group['load']
dc_detail_dict.update({'str-dc-voltage': current})
dc_detail_dict.update({'str-dc-current': voltage})
dc_detail_dict.update({'str-dc-power': power})
dc_detail_dict.update({'str-dc-load': load})
continue
# Hours Used 45557
m = p12.match(line)
if m:
group = m.groupdict()
power_information_dict = environment_component_item_dict.setdefault('power-information', {})
power_information_dict.update({'psm-hours-used': group['psm_hours_used']})
continue
return ret_dict
# Schema for show chassis pic fpc-slot {fpc-slot} pic-slot {pic-slot}
class ShowChassisPicFpcSlotPicSlotSchema(MetaParser):
'''
schema = {
"fpc-information": {
"@junos:style": str,
"fpc": {
"pic-detail": {
"pic-slot": str,
"pic-type": str,
"pic-version": str,
"port-information": {
"port": [
{
"cable-type": str,
"fiber-mode": str,
"port-number": str,
"sfp-vendor-fw-ver": str,
"sfp-vendor-name": str,
"sfp-vendor-pno": str,
"wavelength": str,
},
]
},
"slot": str,
"state": str,
"up-time": {
"#text": str,
"@junos:seconds": str,
}
}
}
}
}
'''
# main schema
schema = {
"fpc-information": {
"fpc": {
"pic-detail": {
"pic-slot": str,
"pic-type": str,
"pic-version": str,
"port-information": {
"port": ListOf({
"cable-type": str,
"fiber-mode": str,
"port-number": str,
"sfp-vendor-fw-ver": str,
"sfp-vendor-name": str,
"sfp-vendor-pno": str,
"wavelength": str,
}),
},
"slot": str,
"state": str,
"up-time": {
"#text": str,
"@junos:seconds": str,
}
}
}
}
}
# Parser for show chassis pic fpc-slot {fpc-slot} pic-slot {pic-slot}
class ShowChassisPicFpcSlotPicSlot(ShowChassisPicFpcSlotPicSlotSchema):
"""
Parser for
* show chassis pic fpc-slot {fpc-slot} pic-slot {pic-slot}
"""
cli_command = "show chassis pic fpc-slot {fpc_slot} pic-slot {pic_slot}"
def cli(self, fpc_slot=None, pic_slot=None, output=None):
if not output:
out = self.device.execute(self.cli_command.format(
fpc_slot=fpc_slot,
pic_slot=pic_slot
))
else:
out = output
# Regular Expressions
# FPC slot 0, PIC slot 0 information:
p1 = re.compile(r'^FPC +slot +(?P<slot>\d+), +PIC +slot +(?P<pic_slot>\d+) +information:$')
# Type 2X100GE CFP2 OTN
p2 = re.compile(r'^Type +(?P<pic_type>[\s\S]+)$')
# State Online
p3 = re.compile(r'^State +(?P<state>\S+)$')
# PIC version 1.19
p4 = re.compile(r'^PIC version +(?P<pic_version>\S+)$')
# Uptime 18 minutes, 56 seconds
# Uptime 6 hours, 24 minutes, 1 second
# Uptime 2 hours, 36 minutes, 32 seconds
p5 = re.compile(r'^Uptime\s+(?P<up_time>((?P<hours>\d+) +hours, +)?(?P<minutes>\d+) +minutes, +(?P<seconds>\d+) +seconds?)$')
# PIC port information:
p6 = re.compile(r'PIC port information:')
# Fiber Xcvr vendor Wave- Xcvr
# Port Cable type type Xcvr vendor part number length Firmware
# 0 100GBASE LR4 SM FINISAR CORP. FTLC1121RDNL-J3 1310 nm 1.5
p7 = re.compile(r'(?P<port_number>\d+) +(?P<cable_type>[0-9A-Z\s]+) '
r'+(?P<fiber_mode>[A-Z]{2}) +(?P<sfp_vendor_name>[A-Z\s.]+) '
r'+(?P<sfp_vendor_pno>[A-Z0-9\-]+) +(?P<wavelength>\d+ nm) '
r'+(?P<sfp_vendor_fw_ver>\S+)')
# Build output
res = {}
for line in out.splitlines():
line = line.strip()
# FPC slot 0, PIC slot 0 information:
m = p1.match(line)
if m:
group = m.groupdict()
res = {
"fpc-information":{
"fpc":{
"pic-detail":{
"pic-slot": group["pic_slot"],
"slot": group["slot"],
}
}
}
}
continue
# Type 2X100GE CFP2 OTN
# State Online
# PIC version 1.19
m = p2.match(line) or p3.match(line) or p4.match(line)
if m:
group = m.groupdict()
pic_detail_dict = res["fpc-information"]["fpc"]["pic-detail"]
for k, v in group.items():
k = k.replace('_', '-')
pic_detail_dict[k] = v.strip()
continue
# Uptime 18 minutes, 56 seconds
# Uptime 6 hours, 24 minutes, 1 second
# Uptime 2 hours, 36 minutes, 32 seconds
m = p5.match(line)
if m:
group = m.groupdict()
up_time = group["up_time"]
total_seconds = int(group.get("hours") or 0)*60*60+int(group["minutes"])*60+int(group["seconds"])
pic_detail_dict["up-time"] = {
"#text": up_time,
"@junos:seconds": str(total_seconds),
}
continue
# PIC port information:
m = p6.match(line)
if m:
pic_detail_dict["port-information"] = {
"port": []
}
port_list = pic_detail_dict["port-information"]["port"]
continue
# Fiber Xcvr vendor Wave- Xcvr
# Port Cable type type Xcvr vendor part number length Firmware
# 0 100GBASE LR4 SM FINISAR CORP. FTLC1121RDNL-J3 1310 nm 1.5
m = p7.match(line)
if m:
group = m.groupdict()
port_item = {}
for k, v in group.items():
k = k.replace('_', '-')
port_item[k] = v.strip()
port_list.append(port_item)
continue
return res
| 37.38853 | 141 | 0.421998 |
192ff82d87053cfd201bbbc317211620064a38ca | 1,803 | py | Python | nautobot_device_lifecycle_mgmt/choices.py | networktocode-llc/nautobot-plugin-device-lifecycle-mgmt | b960a61f8169bcc6667b4e648db4616686e188a0 | [
"Apache-2.0"
] | null | null | null | nautobot_device_lifecycle_mgmt/choices.py | networktocode-llc/nautobot-plugin-device-lifecycle-mgmt | b960a61f8169bcc6667b4e648db4616686e188a0 | [
"Apache-2.0"
] | null | null | null | nautobot_device_lifecycle_mgmt/choices.py | networktocode-llc/nautobot-plugin-device-lifecycle-mgmt | b960a61f8169bcc6667b4e648db4616686e188a0 | [
"Apache-2.0"
] | null | null | null | """Static choices uses for the Device Lifecycle plugin."""
from pycountry import countries
from nautobot.utilities.choices import ChoiceSet
class ContractTypeChoices(ChoiceSet):
"""Choices for the types of supported contracts."""
HARDWARE = "Hardware"
SOFTWARE = "Software"
CHOICES = (
(HARDWARE, "Hardware"),
(SOFTWARE, "Software"),
)
class PoCTypeChoices(ChoiceSet):
"""Choices for the types of point-of-contacts."""
PRIMARY = "Primary"
TIER1 = "Tier 1"
TIER2 = "Tier 2"
TIER3 = "Tier 3"
OWNER = "Owner"
UNASSIGNED = "Unassigned"
CHOICES = (
(UNASSIGNED, UNASSIGNED),
(PRIMARY, PRIMARY),
(TIER1, TIER1),
(TIER2, TIER2),
(TIER3, TIER3),
(OWNER, OWNER),
)
class CurrencyChoices(ChoiceSet):
"""List of currencies for representing contract amounts."""
USD = "USD"
EUR = "EUR"
DKK = "DKK"
GBP = "GBP"
CAD = "CAD"
JPY = "JPY"
CHF = "CHF"
ZAR = "ZAR"
AUD = "AUD"
NZD = "NZD"
CHOICES = (
(AUD, f"{AUD} $"),
(CAD, f"{CAD} $"),
(CHF, f"{CHF} Fr."),
(DKK, f"{DKK} kr"),
(EUR, f"{EUR} €"),
(GBP, f"{GBP} £"),
(JPY, f"{JPY} ¥"),
(NZD, f"{NZD} $"),
(USD, f"{USD} $"),
(ZAR, f"{ZAR} R"),
)
class CountryCodes(ChoiceSet):
"""List of support country codes."""
CHOICES = tuple((c.alpha_3, f"{c.name} ({c.alpha_3})") for c in countries)
class ReportRunTypeChoices(ChoiceSet):
"""Choices for the types of report runs."""
REPORT_SINGLE_OBJECT_RUN = "single-object-run"
REPORT_FULL_RUN = "full-report-run"
CHOICES = (
(REPORT_SINGLE_OBJECT_RUN, "Single Object Run"),
(REPORT_FULL_RUN, "Full Report Run"),
)
| 21.722892 | 78 | 0.552967 |
f6d3c3ec9de6504cbc9743a27a65a78bbf5d9931 | 1,259 | py | Python | arakneed/crawler.py | somarlyonks/crawler | 466e64b2e743d98e4dbe7dafdc4a4e0af2b21e42 | [
"MIT"
] | null | null | null | arakneed/crawler.py | somarlyonks/crawler | 466e64b2e743d98e4dbe7dafdc4a4e0af2b21e42 | [
"MIT"
] | null | null | null | arakneed/crawler.py | somarlyonks/crawler | 466e64b2e743d98e4dbe7dafdc4a4e0af2b21e42 | [
"MIT"
] | 1 | 2021-06-02T03:52:49.000Z | 2021-06-02T03:52:49.000Z | import asyncio
from .shared import Config
from .logger import Logger
from .cacher import Cacher
from .visitor import Visitor
from .spider import Spider
from .scheduler import Scheduler
class Crawler:
Logger = Logger
Cacher = Cacher
Scheduler = Scheduler
Visitor = Visitor
Spider = Spider
def __init__(self, config: Config = Config()):
self.config = config
self.logger = self.Logger(config)
self.cacher = self.Cacher(config, logger=self.logger)
self.scheduler = self.Scheduler(config, cacher=self.cacher, logger=self.logger)
self.visitor = self.Visitor(config, cacher=self.cacher, logger=self.logger)
async def run(self, tasks, resolver):
await self.scheduler.run(tasks)
aio_tasks = []
for _ in range(self.config.spider):
spider = self.Spider(self.config, cacher=self.cacher, logger=self.logger)
spider.install(scheduler=self.scheduler, visitor=self.visitor)
aio_tasks.append(asyncio.create_task(spider.run(resolver)))
await self.scheduler.all_done()
for task in aio_tasks:
task.cancel()
self.logger.info('DONE')
return await asyncio.gather(*aio_tasks, return_exceptions=True)
| 29.27907 | 87 | 0.675933 |
d27783f71dc1fe5a1b53d2ff6174c181d61ccae4 | 5,586 | py | Python | sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2019_07_01/operations/_nodes_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2019_07_01/operations/_nodes_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2019_07_01/operations/_nodes_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NodesOperations(object):
"""NodesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_data_box_edge_device(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NodeList"]
"""Gets all the nodes currently configured under this Data Box Edge device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NodeList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.v2019_07_01.models.NodeList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NodeList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NodeList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/nodes'} # type: ignore
| 45.786885 | 207 | 0.656284 |
97f630b84473f2f853d0b3a4b69cde2e5f002fef | 15,874 | py | Python | mmdet/models/detectors/cascade_rcnn.py | daniel616/DL | b62087bb86bcfa4cdaa692bb0ae724d416761de3 | [
"Apache-2.0"
] | 2 | 2020-03-22T14:27:38.000Z | 2020-06-20T02:35:14.000Z | mmdet/models/detectors/cascade_rcnn.py | daniel616/DL | b62087bb86bcfa4cdaa692bb0ae724d416761de3 | [
"Apache-2.0"
] | null | null | null | mmdet/models/detectors/cascade_rcnn.py | daniel616/DL | b62087bb86bcfa4cdaa692bb0ae724d416761de3 | [
"Apache-2.0"
] | null | null | null | from __future__ import division
import torch
import torch.nn as nn
from .base import BaseDetector
from .test_mixins import RPNTestMixin
from .. import builder
from ..registry import DETECTORS
from mmdet.core import (build_assigner, bbox2roi, bbox2result, build_sampler,
merge_aug_masks)
@DETECTORS.register_module
class CascadeRCNN(BaseDetector, RPNTestMixin):
def __init__(self,
num_stages,
backbone,
neck=None,
shared_head=None,
rpn_head=None,
bbox_roi_extractor=None,
bbox_head=None,
mask_roi_extractor=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
assert bbox_roi_extractor is not None
assert bbox_head is not None
super(CascadeRCNN, self).__init__()
self.num_stages = num_stages
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
if rpn_head is not None:
self.rpn_head = builder.build_head(rpn_head)
if shared_head is not None:
self.shared_head = builder.build_shared_head(shared_head)
if bbox_head is not None:
self.bbox_roi_extractor = nn.ModuleList()
self.bbox_head = nn.ModuleList()
if not isinstance(bbox_roi_extractor, list):
bbox_roi_extractor = [
bbox_roi_extractor for _ in range(num_stages)
]
if not isinstance(bbox_head, list):
bbox_head = [bbox_head for _ in range(num_stages)]
assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages
for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):
self.bbox_roi_extractor.append(
builder.build_roi_extractor(roi_extractor))
self.bbox_head.append(builder.build_head(head))
if mask_head is not None:
self.mask_head = nn.ModuleList()
if not isinstance(mask_head, list):
mask_head = [mask_head for _ in range(num_stages)]
assert len(mask_head) == self.num_stages
for head in mask_head:
self.mask_head.append(builder.build_head(head))
if mask_roi_extractor is not None:
self.share_roi_extractor = False
self.mask_roi_extractor = nn.ModuleList()
if not isinstance(mask_roi_extractor, list):
mask_roi_extractor = [
mask_roi_extractor for _ in range(num_stages)
]
assert len(mask_roi_extractor) == self.num_stages
for roi_extractor in mask_roi_extractor:
self.mask_roi_extractor.append(
builder.build_roi_extractor(roi_extractor))
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
@property
def with_rpn(self):
return hasattr(self, 'rpn_head') and self.rpn_head is not None
def init_weights(self, pretrained=None):
super(CascadeRCNN, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_rpn:
self.rpn_head.init_weights()
if self.with_shared_head:
self.shared_head.init_weights(pretrained=pretrained)
for i in range(self.num_stages):
if self.with_bbox:
self.bbox_roi_extractor[i].init_weights()
self.bbox_head[i].init_weights()
if self.with_mask:
if not self.share_roi_extractor:
self.mask_roi_extractor[i].init_weights()
self.mask_head[i].init_weights()
def extract_feat(self, img):
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_train(self,
img,
img_meta,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None):
x = self.extract_feat(img)
losses = dict()
if self.with_rpn:
rpn_outs = self.rpn_head(x)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta,
self.train_cfg.rpn)
rpn_losses = self.rpn_head.loss(
*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
proposal_inputs = rpn_outs + (img_meta, proposal_cfg)
proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
else:
proposal_list = proposals
for i in range(self.num_stages):
self.current_stage = i
rcnn_train_cfg = self.train_cfg.rcnn[i]
lw = self.train_cfg.stage_loss_weights[i]
# assign gts and sample proposals
sampling_results = []
if self.with_bbox or self.with_mask:
bbox_assigner = build_assigner(rcnn_train_cfg.assigner)
bbox_sampler = build_sampler(
rcnn_train_cfg.sampler, context=self)
num_imgs = img.size(0)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
for j in range(num_imgs):
assign_result = bbox_assigner.assign(
proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],
gt_labels[j])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[j],
gt_bboxes[j],
gt_labels[j],
feats=[lvl_feat[j][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
bbox_roi_extractor = self.bbox_roi_extractor[i]
bbox_head = self.bbox_head[i]
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = bbox_head(bbox_feats)
bbox_targets = bbox_head.get_target(sampling_results, gt_bboxes,
gt_labels, rcnn_train_cfg)
loss_bbox = bbox_head.loss(cls_score, bbox_pred, *bbox_targets)
for name, value in loss_bbox.items():
losses['s{}.{}'.format(i, name)] = (
value * lw if 'loss' in name else value)
# mask head forward and loss
if self.with_mask:
if not self.share_roi_extractor:
mask_roi_extractor = self.mask_roi_extractor[i]
pos_rois = bbox2roi(
[res.pos_bboxes for res in sampling_results])
mask_feats = mask_roi_extractor(
x[:mask_roi_extractor.num_inputs], pos_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
else:
# reuse positive bbox feats
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(
torch.ones(
res.pos_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds.append(
torch.zeros(
res.neg_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
mask_feats = bbox_feats[pos_inds]
mask_head = self.mask_head[i]
mask_pred = mask_head(mask_feats)
mask_targets = mask_head.get_target(sampling_results, gt_masks,
rcnn_train_cfg)
pos_labels = torch.cat(
[res.pos_gt_labels for res in sampling_results])
loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels)
for name, value in loss_mask.items():
losses['s{}.{}'.format(i, name)] = (
value * lw if 'loss' in name else value)
# refine bboxes
if i < self.num_stages - 1:
pos_is_gts = [res.pos_is_gt for res in sampling_results]
roi_labels = bbox_targets[0] # bbox_targets is a tuple
with torch.no_grad():
proposal_list = bbox_head.refine_bboxes(
rois, roi_labels, bbox_pred, pos_is_gts, img_meta)
return losses
def simple_test(self, img, img_meta, proposals=None, rescale=False, **kwargs):
x = self.extract_feat(img)
proposal_list = self.simple_test_rpn(
x, img_meta, self.test_cfg.rpn) if proposals is None else proposals
img_shape = img_meta[0]['img_shape']
ori_shape = img_meta[0]['ori_shape']
scale_factor = img_meta[0]['scale_factor']
# "ms" in variable names means multi-stage
ms_bbox_result = {}
ms_segm_result = {}
ms_scores = []
rcnn_test_cfg = self.test_cfg.rcnn
rois = bbox2roi(proposal_list)
for i in range(self.num_stages):
bbox_roi_extractor = self.bbox_roi_extractor[i]
bbox_head = self.bbox_head[i]
bbox_feats = bbox_roi_extractor(
x[:len(bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = bbox_head(bbox_feats)
ms_scores.append(cls_score)
if self.test_cfg.keep_all_stages:
det_bboxes, det_labels = bbox_head.get_det_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=rescale,
cfg=rcnn_test_cfg)
bbox_result = bbox2result(det_bboxes, det_labels,
bbox_head.num_classes)
ms_bbox_result['stage{}'.format(i)] = bbox_result
if self.with_mask:
mask_roi_extractor = self.mask_roi_extractor[i]
mask_head = self.mask_head[i]
if det_bboxes.shape[0] == 0:
segm_result = [
[] for _ in range(mask_head.num_classes - 1)
]
else:
_bboxes = (
det_bboxes[:, :4] * scale_factor
if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)],
mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats, i)
mask_pred = mask_head(mask_feats)
segm_result = mask_head.get_seg_masks(
mask_pred, _bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale)
ms_segm_result['stage{}'.format(i)] = segm_result
if i < self.num_stages - 1:
bbox_label = cls_score.argmax(dim=1)
rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred,
img_meta[0])
cls_score = sum(ms_scores) / self.num_stages
det_bboxes, det_labels = self.bbox_head[-1].get_det_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=rescale,
cfg=rcnn_test_cfg)
bbox_result = bbox2result(det_bboxes, det_labels,
self.bbox_head[-1].num_classes)
ms_bbox_result['ensemble'] = bbox_result
if self.with_mask:
if det_bboxes.shape[0] == 0:
segm_result = [
[] for _ in range(self.mask_head[-1].num_classes - 1)
]
else:
_bboxes = (
det_bboxes[:, :4] * scale_factor
if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
aug_masks = []
for i in range(self.num_stages):
mask_roi_extractor = self.mask_roi_extractor[i]
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
mask_pred = self.mask_head[i](mask_feats)
aug_masks.append(mask_pred.sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks,
[img_meta] * self.num_stages,
self.test_cfg.rcnn)
segm_result = self.mask_head[-1].get_seg_masks(
merged_masks, _bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale)
ms_segm_result['ensemble'] = segm_result
if not self.test_cfg.keep_all_stages:
if self.with_mask:
results = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
results = ms_bbox_result['ensemble']
else:
if self.with_mask:
results = {
stage: (ms_bbox_result[stage], ms_segm_result[stage])
for stage in ms_bbox_result
}
else:
results = ms_bbox_result
return results
def aug_test(self, img, img_meta, proposals=None, rescale=False):
raise NotImplementedError
def show_result(self, data, result, img_norm_cfg, **kwargs):
if self.with_mask:
ms_bbox_result, ms_segm_result = result
if isinstance(ms_bbox_result, dict):
result = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
if isinstance(result, dict):
result = result['ensemble']
super(CascadeRCNN, self).show_result(data, result,
**kwargs)
| 41.773684 | 82 | 0.524253 |
5d4d826c217720973de23234b1ca0a75b88ac031 | 4,021 | py | Python | alipay/aop/api/request/MybankCreditCreditriskGuarschemeQueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/request/MybankCreditCreditriskGuarschemeQueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/request/MybankCreditCreditriskGuarschemeQueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MybankCreditCreditriskGuarschemeQueryModel import MybankCreditCreditriskGuarschemeQueryModel
class MybankCreditCreditriskGuarschemeQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, MybankCreditCreditriskGuarschemeQueryModel):
self._biz_content = value
else:
self._biz_content = MybankCreditCreditriskGuarschemeQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'mybank.credit.creditrisk.guarscheme.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.731034 | 148 | 0.649092 |
9c7512b4ffa7fcb9caac0620a2f67894d8d8db40 | 2,605 | py | Python | examples/data_augmentation/falsifier.py | shromonag/VerifAI | ace214d1c3282ed5ea63ee3f52457e35f54ebb62 | [
"BSD-3-Clause"
] | 1 | 2020-07-27T13:32:01.000Z | 2020-07-27T13:32:01.000Z | examples/data_augmentation/falsifier.py | shromonag/VerifAI | ace214d1c3282ed5ea63ee3f52457e35f54ebb62 | [
"BSD-3-Clause"
] | null | null | null | examples/data_augmentation/falsifier.py | shromonag/VerifAI | ace214d1c3282ed5ea63ee3f52457e35f54ebb62 | [
"BSD-3-Clause"
] | null | null | null | from verifai.features.features import *
from verifai.samplers.feature_sampler import *
from verifai.falsifier import generic_falsifier
from verifai.monitor import specification_monitor
from dotmap import DotMap
from renderer.generator import genImage
from renderer.kittiLib import getLib
import pickle
# Sampling domain
carDomain = Struct({
'xPos': Box([0, 1]),
'yPos': Box([0, 1]),
'carID': Categorical(*np.arange(0,37))
})
space = FeatureSpace({
'backgroundID': Feature(Categorical(*np.arange(0, 35))),
'cars': Feature(carDomain, lengthDomain=DiscreteBox([1, 2])),
'brightness': Feature(Box([0.5, 1])),
'sharpness': Feature(Box([0, 1])),
'contrast': Feature(Box([0.5, 1.5])),
'color': Feature(Box([0, 1]))
})
sampler = FeatureSampler.randomSamplerFor(space)
class confidence_spec(specification_monitor):
def __init__(self):
def specification(traj):
return traj['yTrue'] == traj['yPred']
super().__init__(specification)
MAX_ITERS = 20
PORT = 8888
MAXREQS = 5
BUFSIZE = 4096
falsifier_params = DotMap()
falsifier_params.port = PORT
falsifier_params.n_iters = MAX_ITERS
falsifier_params.maxreqs = MAXREQS
falsifier_params.bufsize = BUFSIZE
falsifier_params.compute_error_table = True
falsifier_params.fal_thres = 0.5
falsifier = generic_falsifier(sampler=sampler,
monitor=confidence_spec(), falsifier_params=falsifier_params)
falsifier.run_falsifier()
analysis_params = DotMap()
analysis_params.k_closest_params.k = 4
analysis_params.random_params.count = 4
analysis_params.pca_params.n_components = 2
analysis_params.pca = True
falsifier.analyze_error_table(analysis_params=analysis_params)
lib = getLib()
print("Error table")
print(falsifier.error_table.table)
print("Results of error table analysis")
print("Random samples from error table")
for i, sample in enumerate(falsifier.error_analysis.random_samples):
print("Random Sample : ", i)
print(sample)
img, _ = genImage(lib, sample)
img.save("counterexample_images/random_"+str(i)+".png")
img.show()
print("k closest samples from error table")
for i, sample in enumerate(falsifier.error_analysis.k_closest_samples):
print("Sample : ", i)
print(sample)
img, _ = genImage(lib, sample)
img.save("counterexample_images/kclosest_" + str(i) + ".png")
print("PCA analysis")
print("PCA pivot: ", falsifier.error_analysis.pca['pivot'])
print("Directions: ", falsifier.error_analysis.pca['directions'])
# To save all samples: uncomment this
# pickle.dump(falsifier.samples, open("generated_samples.pickle", "wb"))
| 30.647059 | 90 | 0.732054 |
5ebdad5e696666fd63dc6e24cbfb218898f3c84e | 1,604 | py | Python | sample/python/TankJoystick.py | orikuma/choreonoid-org | d63dff5fa2249a586ffb2dbdbfa0aef0081bad66 | [
"MIT"
] | null | null | null | sample/python/TankJoystick.py | orikuma/choreonoid-org | d63dff5fa2249a586ffb2dbdbfa0aef0081bad66 | [
"MIT"
] | null | null | null | sample/python/TankJoystick.py | orikuma/choreonoid-org | d63dff5fa2249a586ffb2dbdbfa0aef0081bad66 | [
"MIT"
] | null | null | null | from cnoid.Util import *
from cnoid.Base import *
from cnoid.Body import *
from cnoid.BodyPlugin import *
import math;
sceneWidget = SceneView.instance.sceneWidget
sceneWidget.setHeadLightEnabled(False)
sceneWidget.setFloorGrid(False)
sceneWidget.setWorldLightIntensity(0.1)
sceneWidget.setWorldLightAmbient(0.0)
sceneWidget.setBackgroundColor([0, 0, 0])
sceneWidget.setCameraPosition(
[ -2.86824, 6.25331, 2.49127 ],
[ 0.412288, -0.847325, -0.334751 ],
[ 0.146464, -0.301009, 0.942307 ])
joystickView = ViewManager.getOrCreateView("Base", "VirtualJoystickView")
MainWindow.instance.viewArea.addView(joystickView)
joystickView.bringToFront()
worldItem = WorldItem()
RootItem.instance.addChildItem(worldItem)
laboItem = BodyItem()
laboItem.load("${SHARE}/model/Labo1/Labo1.body")
laboItem.setChecked(True)
worldItem.addChildItem(laboItem)
tankItem = BodyItem()
tankItem.load("${SHARE}/model/Tank/Tank.body")
tankItem.setChecked(True)
tank = tankItem.body
tank.rootLink.setTranslation([-0.8, 2.4, 0.1])
tank.rootLink.setRotation(rotFromRpy([0, 0, math.radians(-90.0)]))
tank.calcForwardKinematics()
tankItem.storeInitialState()
worldItem.addChildItem(tankItem)
controllerItem = SimpleControllerItem()
controllerItem.setController("TankJoystickController")
tankItem.addChildItem(controllerItem)
simulatorItem = AISTSimulatorItem()
simulatorItem.setTimeStep(0.001)
simulatorItem.setRealtimeSyncMode(True)
simulatorItem.setTimeRangeMode(SimulatorItem.TimeRangeMode.TR_UNLIMITED)
worldItem.addChildItem(simulatorItem)
simulatorItem.setSelected(True)
simulatorItem.startSimulation()
| 30.846154 | 73 | 0.801746 |
de291565e6ecffc7990597d8ca48ae36d1315b94 | 2,675 | py | Python | dynamics/DubinsCapture.py | SFU-MARS/optimized_dp | 4a3b33f7da29479a3bfdc61905b61c05c76e6795 | [
"MIT"
] | 41 | 2020-06-23T01:58:03.000Z | 2022-03-28T01:45:12.000Z | dynamics/DubinsCapture.py | molumitu/optimized_dp | ad1135e98cd5fb5ba4365bee37cd88808b3d4039 | [
"MIT"
] | 1 | 2021-08-01T06:58:57.000Z | 2021-08-01T06:58:57.000Z | dynamics/DubinsCapture.py | molumitu/optimized_dp | ad1135e98cd5fb5ba4365bee37cd88808b3d4039 | [
"MIT"
] | 20 | 2020-06-05T20:52:02.000Z | 2022-03-01T03:17:39.000Z | import heterocl as hcl
class DubinsCapture:
def __init__(self, x=[0,0,0], wMax=1.0, speed=1.0, dMax=1.0, uMode="max", dMode="min"):
self.x = x
self.wMax = wMax
self.speed = speed
self.dMax = dMax
self.uMode = uMode
self.dMode = dMode
def opt_ctrl(self, t, state, spat_deriv):
"""
:param spat_deriv: tuple of spatial derivative in all dimensions
state: x1, x2, x3
t: time
:return: a tuple of optimal disturbances
"""
opt_w = hcl.scalar(self.wMax, "opt_w")
# Just create and pass back, even though they're not used
in3 = hcl.scalar(0, "in3")
in4 = hcl.scalar(0, "in4")
#a_term = spat_deriv[0] * self.x[1] - spat_deriv[1]*self.x[0] - spat_deriv[2]
# Declare a variable
a_term = hcl.scalar(0, "a_term")
# use the scalar by indexing 0 everytime
a_term[0] = spat_deriv[0] * state[1] - spat_deriv[1] * state[0] - spat_deriv[2]
with hcl.if_(a_term >= 0):
with hcl.if_(self.uMode == "min"):
opt_w[0] = -opt_w[0]
with hcl.elif_(a_term < 0):
with hcl.if_(self.uMode == "max"):
opt_w[0] = -opt_w[0]
return (opt_w[0], in3[0], in4[0])
def opt_dstb(self, t, state, spat_deriv):
"""
:param spat_deriv: tuple of spatial derivative in all dimensions
state: x1, x2, x3
t: time
:return: a tuple of optimal disturbances
"""
# Graph takes in 4 possible inputs, by default, for now
d1 = hcl.scalar(self.dMax, "d1")
d2 = hcl.scalar(0, "d2")
# Just create and pass back, even though they're not used
d3 = hcl.scalar(0, "d3")
# Declare a variable
b_term = hcl.scalar(0, "b_term")
# use the scalar by indexing 0 everytime
b_term[0] = spat_deriv[2]
with hcl.if_(b_term[0] >= 0):
with hcl.if_(self.dMode == "min"):
d1[0] = -d1[0]
with hcl.elif_(b_term[0] < 0):
with hcl.if_(self.dMode == "max"):
d1[0] = -d1[0]
return (d1[0], d2[0], d3[0])
def dynamics(self, t, state, uOpt, dOpt):
x_dot = hcl.scalar(0, "x_dot")
y_dot = hcl.scalar(0, "y_dot")
theta_dot = hcl.scalar(0, "theta_dot")
x_dot[0] = -self.speed + self.speed*hcl.cos(state[2]) + uOpt[0]*state[1]
y_dot[0] = self.speed*hcl.sin(state[2]) - uOpt[0]*state[0]
theta_dot[0] = dOpt[0] - uOpt[0]
return (x_dot[0], y_dot[0], theta_dot[0])
| 34.74026 | 91 | 0.520748 |
3475e768dda266ee0b855e2f87c2e5fbe8d9d38e | 1,307 | py | Python | notebooks/reloader.py | LeonardoGentile/modular-notebooks-starter | b1769c2973e28a0bff41a46abe1768e791753808 | [
"CC0-1.0"
] | null | null | null | notebooks/reloader.py | LeonardoGentile/modular-notebooks-starter | b1769c2973e28a0bff41a46abe1768e791753808 | [
"CC0-1.0"
] | null | null | null | notebooks/reloader.py | LeonardoGentile/modular-notebooks-starter | b1769c2973e28a0bff41a46abe1768e791753808 | [
"CC0-1.0"
] | null | null | null | import sys
# https://stackoverflow.com/questions/49299033/how-to-force-ipython-deep-reload
def _is_module_deletable(modname, modpath):
if modname.startswith('_cython_inline'):
# Don't return cached inline compiled .PYX files
return False
for path in [sys.prefix]:
if modpath.startswith(path):
return False
else:
return set(modname.split('.'))
def clear():
"""
Del user modules to force Python to deeply reload them
Do not del modules which are considered as system modules, i.e.
modules installed in subdirectories of Python interpreter's binary
Do not del C modules
"""
log = []
for modname, module in list(sys.modules.items()):
modpath = getattr(module, '__file__', None)
if modpath is None:
# *module* is a C module that is statically linked into the
# interpreter. There is no way to know its path, so we
# choose to ignore it.
continue
if modname == 'reloader':
# skip this module
continue
modules_to_delete = _is_module_deletable(modname, modpath)
if modules_to_delete:
log.append(modname)
del sys.modules[modname]
print("Reloaded modules:\n\n%s" % ", ".join(log)) | 30.395349 | 79 | 0.625861 |
c1e407299ef48e7dfcc78984ff3337fecc3b88cf | 926 | py | Python | scripts/who.py | georgthegreat/psutil | f716afc81d4c9ded08b23d376612e1491d3ea5da | [
"BSD-3-Clause"
] | 8,285 | 2015-01-03T15:47:51.000Z | 2022-03-31T19:55:30.000Z | scripts/who.py | georgthegreat/psutil | f716afc81d4c9ded08b23d376612e1491d3ea5da | [
"BSD-3-Clause"
] | 1,360 | 2015-01-01T12:52:25.000Z | 2022-03-29T06:06:30.000Z | scripts/who.py | georgthegreat/psutil | f716afc81d4c9ded08b23d376612e1491d3ea5da | [
"BSD-3-Clause"
] | 1,496 | 2015-01-01T12:24:40.000Z | 2022-03-31T13:54:35.000Z | #!/usr/bin/env python3
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A clone of 'who' command; print information about users who are
currently logged in.
$ python3 scripts/who.py
giampaolo console 2017-03-25 22:24 loginwindow
giampaolo ttys000 2017-03-25 23:28 (10.0.2.2) sshd
"""
from datetime import datetime
import psutil
def main():
users = psutil.users()
for user in users:
proc_name = psutil.Process(user.pid).name() if user.pid else ""
print("%-12s %-10s %-10s %-14s %s" % (
user.name,
user.terminal or '-',
datetime.fromtimestamp(user.started).strftime("%Y-%m-%d %H:%M"),
"(%s)" % user.host if user.host else "",
proc_name
))
if __name__ == '__main__':
main()
| 25.722222 | 76 | 0.605832 |
a3e7c317d993fffc752dbe3fec4651f6bf37e0b2 | 4,949 | py | Python | venv/lib/python3.6/site-packages/celery/backends/cache.py | kuldeep24680/recurring_payments | 79e589c3d3f4fb1a0791725065e2c068750ef6b2 | [
"MIT"
] | 13 | 2018-03-28T23:07:01.000Z | 2022-03-12T06:01:21.000Z | newenv/lib/python3.8/site-packages/celery/backends/cache.py | palakshivlani-11/cryptorium | eebb78c061007519e527b3d18b8df6bc13679c46 | [
"Apache-2.0"
] | 11 | 2018-06-18T15:49:07.000Z | 2021-11-25T01:45:33.000Z | newenv/lib/python3.8/site-packages/celery/backends/cache.py | palakshivlani-11/cryptorium | eebb78c061007519e527b3d18b8df6bc13679c46 | [
"Apache-2.0"
] | 5 | 2018-03-28T23:07:05.000Z | 2021-12-09T19:02:00.000Z | # -*- coding: utf-8 -*-
"""Memcached and in-memory cache result backend."""
from __future__ import absolute_import, unicode_literals
from kombu.utils.encoding import bytes_to_str, ensure_bytes
from kombu.utils.objects import cached_property
from celery.exceptions import ImproperlyConfigured
from celery.five import PY3
from celery.utils.functional import LRUCache
from .base import KeyValueStoreBackend
__all__ = ('CacheBackend',)
_imp = [None]
REQUIRES_BACKEND = """\
The Memcached backend requires either pylibmc or python-memcached.\
"""
UNKNOWN_BACKEND = """\
The cache backend {0!r} is unknown,
Please use one of the following backends instead: {1}\
"""
def import_best_memcache():
if _imp[0] is None:
is_pylibmc, memcache_key_t = False, ensure_bytes
try:
import pylibmc as memcache
is_pylibmc = True
except ImportError:
try:
import memcache # noqa
except ImportError:
raise ImproperlyConfigured(REQUIRES_BACKEND)
if PY3: # pragma: no cover
memcache_key_t = bytes_to_str
_imp[0] = (is_pylibmc, memcache, memcache_key_t)
return _imp[0]
def get_best_memcache(*args, **kwargs):
# pylint: disable=unpacking-non-sequence
# This is most definitely a sequence, but pylint thinks it's not.
is_pylibmc, memcache, key_t = import_best_memcache()
Client = _Client = memcache.Client
if not is_pylibmc:
def Client(*args, **kwargs): # noqa
kwargs.pop('behaviors', None)
return _Client(*args, **kwargs)
return Client, key_t
class DummyClient(object):
def __init__(self, *args, **kwargs):
self.cache = LRUCache(limit=5000)
def get(self, key, *args, **kwargs):
return self.cache.get(key)
def get_multi(self, keys):
cache = self.cache
return {k: cache[k] for k in keys if k in cache}
def set(self, key, value, *args, **kwargs):
self.cache[key] = value
def delete(self, key, *args, **kwargs):
self.cache.pop(key, None)
def incr(self, key, delta=1):
return self.cache.incr(key, delta)
def touch(self, key, expire):
pass
backends = {
'memcache': get_best_memcache,
'memcached': get_best_memcache,
'pylibmc': get_best_memcache,
'memory': lambda: (DummyClient, ensure_bytes),
}
class CacheBackend(KeyValueStoreBackend):
"""Cache result backend."""
servers = None
supports_autoexpire = True
supports_native_join = True
implements_incr = True
def __init__(self, app, expires=None, backend=None,
options=None, url=None, **kwargs):
options = {} if not options else options
super(CacheBackend, self).__init__(app, **kwargs)
self.url = url
self.options = dict(self.app.conf.cache_backend_options,
**options)
self.backend = url or backend or self.app.conf.cache_backend
if self.backend:
self.backend, _, servers = self.backend.partition('://')
self.servers = servers.rstrip('/').split(';')
self.expires = self.prepare_expires(expires, type=int)
try:
self.Client, self.key_t = backends[self.backend]()
except KeyError:
raise ImproperlyConfigured(UNKNOWN_BACKEND.format(
self.backend, ', '.join(backends)))
self._encode_prefixes() # rencode the keyprefixes
def get(self, key):
return self.client.get(key)
def mget(self, keys):
return self.client.get_multi(keys)
def set(self, key, value):
return self.client.set(key, value, self.expires)
def delete(self, key):
return self.client.delete(key)
def _apply_chord_incr(self, header_result, body, **kwargs):
chord_key = self.get_key_for_chord(header_result.id)
self.client.set(chord_key, 0, time=self.expires)
return super(CacheBackend, self)._apply_chord_incr(
header_result, body, **kwargs)
def incr(self, key):
return self.client.incr(key)
def expire(self, key, value):
return self.client.touch(key, value)
@cached_property
def client(self):
return self.Client(self.servers, **self.options)
def __reduce__(self, args=(), kwargs=None):
kwargs = {} if not kwargs else kwargs
servers = ';'.join(self.servers)
backend = '{0}://{1}/'.format(self.backend, servers)
kwargs.update(
{'backend': backend,
'expires': self.expires,
'options': self.options})
return super(CacheBackend, self).__reduce__(args, kwargs)
def as_uri(self, *args, **kwargs):
"""Return the backend as an URI.
This properly handles the case of multiple servers.
"""
servers = ';'.join(self.servers)
return '{0}://{1}/'.format(self.backend, servers)
| 29.813253 | 71 | 0.632047 |
6f1e68e8d4fe6f5f07936b0a03a69fe0f88021a6 | 2,588 | py | Python | igcollect/linux_disklatency.py | brainexe/igcollect | 12a2fa81331f305f8852b5a30c8d90d2a8895738 | [
"MIT"
] | 15 | 2016-04-13T11:13:41.000Z | 2020-12-04T17:25:43.000Z | igcollect/linux_disklatency.py | brainexe/igcollect | 12a2fa81331f305f8852b5a30c8d90d2a8895738 | [
"MIT"
] | 10 | 2016-12-01T15:15:31.000Z | 2020-05-07T13:54:57.000Z | igcollect/linux_disklatency.py | brainexe/igcollect | 12a2fa81331f305f8852b5a30c8d90d2a8895738 | [
"MIT"
] | 18 | 2016-03-16T11:06:10.000Z | 2022-03-14T14:56:05.000Z | #!/usr/bin/env python3
"""igcollect - Linux Disk I/O latency
Copyright (c) 2019 InnoGames GmbH
"""
from argparse import ArgumentParser
from subprocess import check_output
from time import time
import sys
from libvirt import (
open as libvirt_open,
libvirtError,
)
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'--prefix', dest='prefix', type=str, default='None',
help='Graphite Metric Prefix')
parser.add_argument(
'--storagepool', dest='storagepool', type=str, required=True,
help='Libvirt Storage Pool Name')
parser.add_argument(
'--storagevol', dest='storagevol', type=str, required=True,
help='Libvirt Storage Volume Name')
return parser.parse_args()
def main():
args = parse_args()
template = args.prefix + '.{}.{} {} ' + str(int(time()))
try:
conn = libvirt_open('qemu:///system')
except libvirtError as e:
print(
'An Exception has occured while openning a connection to '
'qemu:///system: {}'.format(e)
)
exit(1)
try:
storage_pool = conn.storagePoolLookupByName(args.storagepool)
except libvirtError as e:
print(
'An Exception has occured while finding {} pool: {}'
.format(args.storagepool, e)
)
exit(1)
try:
storage_volume = storage_pool.storageVolLookupByName(args.storagevol)
except libvirtError as e:
print(
'An exception has occured while opening the volume: {} {}'
.format(args.storagevol, e)
)
exit(1)
storage_volume_path = storage_volume.path()
output_read = check_output(
[
'/usr/bin/sudo',
'/usr/bin/ioping',
'-BD',
'-c10',
'-i0.1',
storage_volume_path
],
universal_newlines=True
)
output_write = check_output(
[
'/usr/bin/sudo',
'/usr/bin/ioping',
'-BDWWW',
'-c10',
'-i0.1',
storage_volume_path
],
universal_newlines=True
)
output = {
'read': output_read,
'write': output_write,
}
for mode, data in output.items():
measurement = data.split()
results = {
'min': measurement[4],
'avg': measurement[5],
'max': measurement[6],
}
for k, v in results.items():
print(template.format(mode, k, int(v)))
if __name__ == '__main__':
main()
| 23.107143 | 77 | 0.551777 |
5bd5d81bc6a3449840f6692adccd4f32c4c7500d | 9,056 | py | Python | eFELunit/tests/test_MultipleCurrentStepTest.py | appukuttan-shailesh/eFELunit | 055385254875249293da72c1daf2d489033cb9da | [
"BSD-3-Clause"
] | null | null | null | eFELunit/tests/test_MultipleCurrentStepTest.py | appukuttan-shailesh/eFELunit | 055385254875249293da72c1daf2d489033cb9da | [
"BSD-3-Clause"
] | null | null | null | eFELunit/tests/test_MultipleCurrentStepTest.py | appukuttan-shailesh/eFELunit | 055385254875249293da72c1daf2d489033cb9da | [
"BSD-3-Clause"
] | null | null | null | """
Tests of the neuron responses to current steps of different amplitudes match experimental data.
The responses are quantified by extracting features from the voltage traces using eFEL.
Reference data (features extracted from experimental recordings) and experimental protocol configurations
are extracted from .zip files produced by BluePyOpt.
Andrew Davison and Shailesh Appukuttan, UNIC, CNRS.
March 2017
"""
import os.path
from datetime import datetime
import json
import sciunit
from neuronunit.capabilities import ProducesMembranePotential, ReceivesSquareCurrent
import neo
import efel
import matplotlib.pyplot as plt
from quantities import ms
from eFELunit.scores import RMS_ZScore
class MultipleCurrentStepTest(sciunit.Test):
"""
Tests of the neuron responses to current steps of different amplitudes match
experimental data.
The responses are quantified by extracting features from the voltage traces
using eFEL.
"""
required_capabilities = (ProducesMembranePotential, ReceivesSquareCurrent)
score_type = RMS_ZScore
def __init__(self, observation=None, name=None, protocol=None, plot_figure=False):
sciunit.Test.__init__(self, observation, name)
self.plot_figure = plot_figure
if protocol is None:
raise ValueError("Must provide a stimulation protocol")
self.protocol = protocol
self.timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
self.figures = []
def validate_observation(self, observation):
"""
Checks that the observation has the correct format, i.e.
- a top-level dict with one entry per current step.
- the key should be a label for the step
- the value should be a dict containing one entry per feature of the voltage trace
- the key of the feature dict should be a label for the feature
- the value should be a dict with keys 'mean' and 'value'
"""
pass # todo
def generate_prediction(self, model, verbose=False):
use_cache = True
self.test_base_dir = os.path.join(model.base_path, "validations", self.name)
if not os.path.isdir(self.test_base_dir):
os.makedirs(self.test_base_dir)
cache_filename = os.path.join(self.test_base_dir, "results.pkl")
if use_cache and os.path.exists(cache_filename):
print("***** Using cache to retrieve relevant model data *****")
io = neo.io.get_io(cache_filename)
self.recordings = io.read_block()
else:
print("Note: no cached data for this model specification!")
self.recordings = self._run_simulations(model)
io = neo.io.PickleIO(cache_filename)
io.write_block(self.recordings)
if self.plot_figure:
for i, seg in enumerate(self.recordings.segments):
plt.plot(seg.analogsignals[0].times.rescale('ms'),
seg.analogsignals[0].rescale('mV').magnitude + i * 110.0,
label=seg.name)
plt.legend()
self.figure_path = os.path.join(self.test_base_dir, "{}_{}.png".format(model.name, self.timestamp))
plt.savefig(self.figure_path)
self.figures.append(self.figure_path)
return self._calculate_features(self.recordings)
def _run_simulations(self, model):
"""For each step in the protocol, run simulation and store recordings"""
recordings = neo.Block()
print("Total protocols: {}".format(len(self.protocol)))
for idx, item in enumerate(self.protocol.items()):
step_name = item[0]
step = item[1]
segment = neo.Segment(name=step_name)
recordings.segments.append(segment)
segment.block = recordings
print("{}. Current protocol: {}".format(idx+1, step_name))
model.inject_current(step["stimuli"])
model.run(tstop=step["total_duration"])
signal = model.get_membrane_potential()
stimulus_on = neo.Epoch(times=step["stimuli"]["delay"]*ms,
durations=step["stimuli"]["duration"]*ms,
labels="stimulus")
segment.analogsignals.append(signal)
segment.epochs.append(stimulus_on)
return recordings
def _calculate_features(self, recordings):
"""For each recorded step, calculate the features."""
features_from_simulation = {}
for segment in recordings.segments:
step_name = segment.name
feature_names = self.observation[step_name].keys()
trace = {
'T': segment.analogsignals[0].times.rescale('ms').magnitude,
'V': segment.analogsignals[0].rescale('mV').magnitude,
'stim_start': [segment.epochs[0].times],
'stim_end': [segment.epochs[0].times + segment.epochs[0].durations]
}
features = efel.getFeatureValues([trace], feature_names)[0]
features_from_simulation[step_name] = dict([(k, {'value': v[0]})
for k, v in features.items()])
return features_from_simulation
def compute_score(self, observation, prediction, verbose=False):
"""
Generates a score given the observations provided in the constructor
and the prediction generated by generate_prediction.
"""
# reformat the observations and predictions into the form needed by RMS_ZScore
# i.e. from dict of dicts into a flat list of dicts
observation_list = []
prediction_list = []
for step_name in observation:
for feature_name in observation[step_name]:
key = "{}_{}".format(step_name, feature_name)
observation_list.append({key: observation[step_name][feature_name]})
prediction_list.append({key: prediction[step_name][feature_name]})
return self.score_type.compute(observation_list, prediction_list)
def bind_score(self, score, model, observation,
prediction):
"""
For the user to bind additional features to the score.
"""
if hasattr(self, "figure_path"):
score.related_data["figures"] = self.figures
for key, val in list(score.__dict__["model"].__dict__.items()):
if val.__class__.__name__ == "HocObject":
score.__dict__["model"].__dict__.pop(key)
return score
class BluePyOpt_MultipleCurrentStepTest(MultipleCurrentStepTest):
"""
Tests if the neuron responses to current steps of different amplitudes match
experimental data.
The responses are quantified by extracting features from the voltage traces
using eFEL.
Experimental protocol definitions and experimental features obtained from
zip files produced by BluePyOpt
"""
def __init__(self, observation_dir=None, name=None, plot_figure=False, **test_kwargs):
# This test does not employ external observation data, but is
# rather located within each model's directory. So test results for
# various models are not necessarily judged against the same data.
# extract model specific observation data
# load the protocol definition and the reference data
with open(os.path.join(observation_dir, "config", "protocols.json")) as fp:
protocols = json.load(fp)
assert len(protocols) == 1
template_name = list(protocols.keys())[0]
with open(os.path.join(observation_dir, "config", "features.json")) as fp:
reference_features = json.load(fp)
assert list(reference_features.keys())[0] == template_name
# reformat the reference_features dict into the necessary form
observations = {}
protocol = {}
for step_name, value in reference_features[template_name].items():
if "soma" in value.keys():
observations[step_name] = {}
for feature_name, (mean, std) in value["soma"].items():
observations[step_name][feature_name] = {"mean": mean, "std": std}
# reformat the protocol definition into the form requested by NeuronUnit
content = protocols[template_name][step_name]
stim = content["stimuli"][0]
stim["amplitude"] = stim["amp"]
protocol[step_name] = {
"stimuli": stim,
"total_duration": stim["totduration"]
}
del stim["amp"]
del stim["totduration"]
MultipleCurrentStepTest.__init__(self,
observation=observations,
name=name,
protocol=protocol,
plot_figure=plot_figure)
| 43.330144 | 111 | 0.622019 |
0ea2a2ed0f3a07b0fe172561d53b1f8aec157587 | 580 | py | Python | djauth/users/migrations/0002_auto_20190516_0755.py | amateur-dev/Django_CustomUser | 6bb7a8676c48d80c0817a164ca801a1008e874dc | [
"BSD-3-Clause"
] | null | null | null | djauth/users/migrations/0002_auto_20190516_0755.py | amateur-dev/Django_CustomUser | 6bb7a8676c48d80c0817a164ca801a1008e874dc | [
"BSD-3-Clause"
] | null | null | null | djauth/users/migrations/0002_auto_20190516_0755.py | amateur-dev/Django_CustomUser | 6bb7a8676c48d80c0817a164ca801a1008e874dc | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.2.1 on 2019-05-16 07:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='customuser',
name='condo_name',
field=models.CharField(default='Not Provided', max_length=64),
),
migrations.AddField(
model_name='customuser',
name='unit',
field=models.CharField(default='Not Provided', max_length=64),
),
]
| 24.166667 | 74 | 0.582759 |
00d6f13e59b9965a3039d0c87470b6fe8a4945f9 | 726 | py | Python | airbyte-integrations/bases/source-acceptance-test/setup.py | onaio/airbyte | 38302e82a25f1b66742c3febfbff0668556920f2 | [
"MIT"
] | 22 | 2020-08-27T00:47:20.000Z | 2020-09-17T15:39:39.000Z | airbyte-integrations/bases/source-acceptance-test/setup.py | onaio/airbyte | 38302e82a25f1b66742c3febfbff0668556920f2 | [
"MIT"
] | 116 | 2020-08-27T01:11:27.000Z | 2020-09-19T02:47:52.000Z | airbyte-integrations/bases/source-acceptance-test/setup.py | onaio/airbyte | 38302e82a25f1b66742c3febfbff0668556920f2 | [
"MIT"
] | 1 | 2020-09-15T06:10:01.000Z | 2020-09-15T06:10:01.000Z | #
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
import setuptools
MAIN_REQUIREMENTS = [
"airbyte-cdk~=0.1.56",
"docker~=5.0.3",
"PyYAML~=5.4",
"icdiff~=1.9",
"inflection~=0.5",
"pdbpp~=0.10",
"pydantic~=1.6",
"pytest~=6.1",
"pytest-sugar~=0.9",
"pytest-timeout~=1.4",
"pprintpp~=0.4",
"dpath~=2.0.1",
"jsonschema~=3.2.0",
"jsonref==0.2",
]
setuptools.setup(
name="source-acceptance-test",
description="Contains acceptance tests for source connectors.",
author="Airbyte",
author_email="contact@airbyte.io",
url="https://github.com/airbytehq/airbyte",
packages=setuptools.find_packages(),
install_requires=MAIN_REQUIREMENTS,
)
| 21.352941 | 67 | 0.618457 |
1a060d6d04b2234d5cb26fdacf919ba7ca21edd5 | 609 | py | Python | apps/__manifest__.py | trojikman/access-addons | 5b056b4d0928e83f687ea7978de6f96f826c28a6 | [
"MIT"
] | null | null | null | apps/__manifest__.py | trojikman/access-addons | 5b056b4d0928e83f687ea7978de6f96f826c28a6 | [
"MIT"
] | null | null | null | apps/__manifest__.py | trojikman/access-addons | 5b056b4d0928e83f687ea7978de6f96f826c28a6 | [
"MIT"
] | 1 | 2020-11-24T01:29:44.000Z | 2020-11-24T01:29:44.000Z | {
"name": "Custom Apps",
"summary": """Simplify Apps Interface""",
"images": [],
"vesion": "13.0.1.0.0",
"application": False,
"author": "IT-Projects LLC, Dinar Gabbasov",
"support": "apps@itpp.dev",
"website": "https://twitter.com/gabbasov_dinar",
"category": "Access",
"license": "Other OSI approved licence", # MIT
"depends": ["access_apps"],
"data": ["views/apps_view.xml", "security.xml", "data/ir_config_parameter.xml"],
"post_load": None,
"pre_init_hook": None,
"post_init_hook": None,
"auto_install": False,
"installable": False,
}
| 30.45 | 84 | 0.600985 |
5765a44e19e0eb1c59a99a42de28124c363e9907 | 484 | py | Python | tests/test_stats_test_correl_diff.py | ikanago/statistics.py | 3bbe3fcdd4169a56547227d5e31a61de1be68446 | [
"MIT"
] | null | null | null | tests/test_stats_test_correl_diff.py | ikanago/statistics.py | 3bbe3fcdd4169a56547227d5e31a61de1be68446 | [
"MIT"
] | null | null | null | tests/test_stats_test_correl_diff.py | ikanago/statistics.py | 3bbe3fcdd4169a56547227d5e31a61de1be68446 | [
"MIT"
] | null | null | null | from unittest import TestCase
import math
import stats_test
import stats_test_correl_diff
class TestStatsTestCorrelDiff(TestCase):
def test_stats_test_correl_diff(self):
n1 = 80
n2 = 65
r1 = 0.538
r2 = 0.743
sigificance = 0.05
(is_reject, test_stat) = stats_test_correl_diff.test_correl_diff(
n1, n2, r1, r2, sigificance)
self.assertTrue(is_reject)
self.assertAlmostEqual(test_stat, -2.085, places=3)
| 25.473684 | 73 | 0.667355 |
762ec81374cabea707cc1ddea7501dc7cc76677e | 409 | py | Python | answers/leetcode/Remove Duplicates from Sorted Array II/Remove Duplicates from Sorted Array II.py | FeiZhan/Algo-Collection | 708c4a38112e0b381864809788b9e44ac5ae4d05 | [
"MIT"
] | 3 | 2015-09-04T21:32:31.000Z | 2020-12-06T00:37:32.000Z | answers/leetcode/Remove Duplicates from Sorted Array II/Remove Duplicates from Sorted Array II.py | FeiZhan/Algo-Collection | 708c4a38112e0b381864809788b9e44ac5ae4d05 | [
"MIT"
] | null | null | null | answers/leetcode/Remove Duplicates from Sorted Array II/Remove Duplicates from Sorted Array II.py | FeiZhan/Algo-Collection | 708c4a38112e0b381864809788b9e44ac5ae4d05 | [
"MIT"
] | null | null | null | class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
prev0 = 0
prev1 = 1
for i in range(2, len(nums)):
if nums[i] != nums[prev0] or nums[i] != nums[prev1]:
prev0 += 1
prev1 += 1
nums[prev1] = nums[i]
return min(len(nums), prev1 + 1)
| 25.5625 | 64 | 0.440098 |
e2f12606ffbef2c60f1eeff3fadb853d68c9bdc5 | 527 | py | Python | test/nni_test/setup.py | dutxubo/nni | c16f4e1c89b54b8b80661ef0072433d255ad2d24 | [
"MIT"
] | 9,680 | 2019-05-07T01:42:30.000Z | 2022-03-31T16:48:33.000Z | test/nni_test/setup.py | dutxubo/nni | c16f4e1c89b54b8b80661ef0072433d255ad2d24 | [
"MIT"
] | 1,957 | 2019-05-06T21:44:21.000Z | 2022-03-31T09:21:53.000Z | test/nni_test/setup.py | dutxubo/nni | c16f4e1c89b54b8b80661ef0072433d255ad2d24 | [
"MIT"
] | 1,571 | 2019-05-07T06:42:55.000Z | 2022-03-31T03:19:24.000Z | from setuptools import setup, find_packages
setup(
name="nnitest",
version="0.0.1",
author = 'Microsoft NNI team',
author_email = 'nni@microsoft.com',
description = 'Neural Network Intelligence package',
license = 'MIT',
url = 'https://github.com/Microsoft/nni',
packages=find_packages('nnitest'),
long_description="",
classifiers = [
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
"Operating System :: OS Independent"
],
)
| 27.736842 | 56 | 0.629981 |
d4abf18a62136110b3c0b661139f7908d1cb9ac3 | 1,073 | py | Python | trivial data analysis/txt_file_recombine.py | Nemo-Cartman/trivial_data_analysis | d3494d1dd301dc11b6c752011e4661d01c43ff14 | [
"MIT"
] | 1 | 2018-07-31T15:18:37.000Z | 2018-07-31T15:18:37.000Z | trivial data analysis/txt_file_recombine.py | Nemo-Cartman/trivial_data_analysis | d3494d1dd301dc11b6c752011e4661d01c43ff14 | [
"MIT"
] | null | null | null | trivial data analysis/txt_file_recombine.py | Nemo-Cartman/trivial_data_analysis | d3494d1dd301dc11b6c752011e4661d01c43ff14 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 23 20:21:18 2018
@author: GX
.txt file recombined
"""
import os
import re
def process(path):
for participant in participants:
for trial_number in trials:
trial=str(participant)+'_'+str(trial_number)
pattern=trial+r'_\ds_recordData_1'
matcher(pattern,path,trial)
def matcher(pattern,path,trial):
pattern=re.compile(pattern)
files=os.listdir(path)
selected_files=re.findall(pattern,str(files))
new_file_name=trial+r'_recordData_1'+r'.txt'
new_file=open(path+'\\'+new_file_name,'a')
for file in selected_files:
f=open(path+'\\'+file+r'.txt','r',encoding='UTF-8')
content=f.readlines()
for element in content:
new_file.write(element)
f.close()
new_file.close()
if __name__=='__main__':
path=r'D:\C盘备份\Tencent Files\391059727\FileRecv\EMG信号强度与速度关系\new_folder\新建文件夹'
participants=['S1','S2','S3','S4','S5']
trials=['1','2','3','4']
process(path)
| 26.825 | 83 | 0.61137 |
ce34f8ae3ad5378b0df860d78a33473c550ea0a1 | 7,021 | py | Python | hops/distribute/parameter_server.py | tabularaza27/hops-util-py | 3e2733ebe4783f88ecf22cbeb4d8f1f8d56c65cd | [
"Apache-2.0"
] | null | null | null | hops/distribute/parameter_server.py | tabularaza27/hops-util-py | 3e2733ebe4783f88ecf22cbeb4d8f1f8d56c65cd | [
"Apache-2.0"
] | null | null | null | hops/distribute/parameter_server.py | tabularaza27/hops-util-py | 3e2733ebe4783f88ecf22cbeb4d8f1f8d56c65cd | [
"Apache-2.0"
] | null | null | null | """
Utility functions to retrieve information about available services and setting up security for the Hops platform.
These utils facilitates development by hiding complexity for programs interacting with Hops services.
"""
import os
from hops import hdfs as hopshdfs
from hops import tensorboard
from hops import devices
from hops import util
import pydoop.hdfs
import threading
import datetime
import socket
import json
from . import parameter_server_reservation
run_id = 0
def _launch(sc, map_fun, local_logdir=False, name="no-name"):
"""
Args:
sc:
map_fun:
local_logdir:
name:
Returns:
"""
global run_id
app_id = str(sc.applicationId)
num_executions = util.num_executors()
#Each TF task should be run on 1 executor
nodeRDD = sc.parallelize(range(num_executions), num_executions)
#Make SparkUI intuitive by grouping jobs
sc.setJobGroup("ParameterServerStrategy", "{} | Distributed Training".format(name))
server = parameter_server_reservation.Server(num_executions)
server_addr = server.start()
num_ps = util.num_param_servers()
#Force execution on executor, since GPU is located on executor
nodeRDD.foreachPartition(_prepare_func(app_id, run_id, map_fun, local_logdir, server_addr, num_ps))
logdir = _get_logdir(app_id)
path_to_metric = logdir + '/metric'
if pydoop.hdfs.path.exists(path_to_metric):
with pydoop.hdfs.open(path_to_metric, "r") as fi:
metric = float(fi.read())
fi.close()
return metric, logdir
print('Finished Experiment \n')
return None, logdir
def _get_logdir(app_id):
"""
Args:
app_id:
Returns:
"""
global run_id
return hopshdfs._get_experiments_dir() + '/' + app_id + '/parameter_server/run.' + str(run_id)
def _prepare_func(app_id, run_id, map_fun, local_logdir, server_addr, num_ps):
"""
Args:
app_id:
run_id:
map_fun:
local_logdir:
server_addr:
num_ps:
Returns:
"""
def _wrapper_fun(iter):
"""
Args:
iter:
Returns:
"""
for i in iter:
executor_num = i
tb_hdfs_path = ''
hdfs_exec_logdir = ''
t = threading.Thread(target=devices._print_periodic_gpu_utilization)
if devices.get_num_gpus() > 0:
t.start()
role = None
client = parameter_server_reservation.Client(server_addr)
try:
host = util._get_ip_address()
tmp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tmp_socket.bind(('', 0))
port = tmp_socket.getsockname()[1]
host_port = host + ":" + str(port)
exec_spec = {}
if executor_num < num_ps:
exec_spec["task_type"] = "ps"
else:
exec_spec["task_type"] = "worker"
exec_spec["host_port"] = host_port
exec_spec["gpus_present"] = devices.get_num_gpus() > 0
client.register(exec_spec)
cluster = client.await_reservations()
tmp_socket.close()
role, index = _find_task_and_index(host_port, cluster)
cluster_spec = {}
cluster_spec["cluster"] = cluster
cluster_spec["task"] = {"type": role, "index": index}
print(cluster_spec)
os.environ["TF_CONFIG"] = json.dumps(cluster_spec)
if role == "chief":
hdfs_exec_logdir, hdfs_appid_logdir = hopshdfs._create_directories(app_id, run_id, None, 'parameter_server')
pydoop.hdfs.dump('', os.environ['EXEC_LOGFILE'], user=hopshdfs.project_user())
hopshdfs._init_logger()
tb_hdfs_path, tb_pid = tensorboard._register(hdfs_exec_logdir, hdfs_appid_logdir, executor_num, local_logdir=local_logdir)
gpu_str = '\nChecking for GPUs in the environment' + devices._get_gpu_info()
if role == "chief":
hopshdfs.log(gpu_str)
print(gpu_str)
print('-------------------------------------------------------')
print('Started running task \n')
if role == "chief":
hopshdfs.log('Started running task')
task_start = datetime.datetime.now()
retval=None
if role == "ps":
ps_thread = threading.Thread(target=lambda: map_fun())
ps_thread.start()
print("waiting for workers")
client.await_all_workers_finished()
print("waiting finished")
else:
retval = map_fun()
if role == "chief":
if retval:
_handle_return(retval, hdfs_exec_logdir)
task_end = datetime.datetime.now()
time_str = 'Finished task - took ' + util._time_diff(task_start, task_end)
print('\n' + time_str)
print('-------------------------------------------------------')
if role == "chief":
hopshdfs.log(time_str)
except:
raise
finally:
if role == "worker" or role == "chief":
client.register_worker_finished()
client.close()
if role == "chief":
if local_logdir:
local_tb = tensorboard.local_logdir_path
util._store_local_tensorboard(local_tb, hdfs_exec_logdir)
if devices.get_num_gpus() > 0:
t.do_run = False
t.join(20)
_cleanup(tb_hdfs_path)
return _wrapper_fun
def _cleanup(tb_hdfs_path):
"""
Args:
tb_hdfs_path:
Returns:
"""
handle = hopshdfs.get()
if not tb_hdfs_path == None and not tb_hdfs_path == '' and handle.exists(tb_hdfs_path):
handle.delete(tb_hdfs_path)
hopshdfs._kill_logger()
def _find_task_and_index(host_port, cluster_spec):
"""
Args:
host_port:
cluster_spec:
Returns:
"""
index = 0
for entry in cluster_spec["worker"]:
if entry == host_port:
return "worker", index
index = index + 1
index = 0
for entry in cluster_spec["ps"]:
if entry == host_port:
return "ps", index
index = index + 1
if cluster_spec["chief"][0] == host_port:
return "chief", 0
def _handle_return(val, hdfs_exec_logdir):
"""
Args:
val:
hdfs_exec_logdir:
Returns:
"""
try:
test = int(val)
except:
raise ValueError('Your function should return a metric (number).')
metric_file = hdfs_exec_logdir + '/metric'
fs_handle = hopshdfs.get_fs()
try:
fd = fs_handle.open_file(metric_file, mode='w')
except:
fd = fs_handle.open_file(metric_file, flags='w')
fd.write(str(float(val)).encode())
fd.flush()
fd.close()
| 26.100372 | 138 | 0.574704 |
94e406f19327fa19084faf51f6523c3abe068772 | 1,914 | py | Python | flask/lib/python3.6/site-packages/stem/response/authchallenge.py | JOFLIX/grapevines | 34576e01184570d79cc140b42ffb71d322132da6 | [
"MIT",
"Unlicense"
] | null | null | null | flask/lib/python3.6/site-packages/stem/response/authchallenge.py | JOFLIX/grapevines | 34576e01184570d79cc140b42ffb71d322132da6 | [
"MIT",
"Unlicense"
] | 3 | 2019-07-29T09:47:34.000Z | 2019-07-29T09:47:35.000Z | flask/lib/python3.6/site-packages/stem/response/authchallenge.py | JOFLIX/grapevines | 34576e01184570d79cc140b42ffb71d322132da6 | [
"MIT",
"Unlicense"
] | null | null | null | # Copyright 2012-2017, Damian Johnson and The Tor Project
# See LICENSE for licensing information
import binascii
import stem.response
import stem.socket
import stem.util.str_tools
import stem.util.tor_tools
class AuthChallengeResponse(stem.response.ControlMessage):
"""
AUTHCHALLENGE query response.
:var str server_hash: server hash provided by tor
:var str server_nonce: server nonce provided by tor
"""
def _parse_message(self):
# Example:
# 250 AUTHCHALLENGE SERVERHASH=680A73C9836C4F557314EA1C4EDE54C285DB9DC89C83627401AEF9D7D27A95D5 SERVERNONCE=F8EA4B1F2C8B40EF1AF68860171605B910E3BBCABADF6FC3DB1FA064F4690E85
self.server_hash = None
self.server_nonce = None
if not self.is_ok():
raise stem.ProtocolError("AUTHCHALLENGE response didn't have an OK status:\n%s" % self)
elif len(self) > 1:
raise stem.ProtocolError('Received multiline AUTHCHALLENGE response:\n%s' % self)
line = self[0]
# sanity check that we're a AUTHCHALLENGE response
if not line.pop() == 'AUTHCHALLENGE':
raise stem.ProtocolError('Message is not an AUTHCHALLENGE response (%s)' % self)
if line.is_next_mapping('SERVERHASH'):
value = line.pop_mapping()[1]
if not stem.util.tor_tools.is_hex_digits(value, 64):
raise stem.ProtocolError('SERVERHASH has an invalid value: %s' % value)
self.server_hash = binascii.unhexlify(stem.util.str_tools._to_bytes(value))
else:
raise stem.ProtocolError('Missing SERVERHASH mapping: %s' % line)
if line.is_next_mapping('SERVERNONCE'):
value = line.pop_mapping()[1]
if not stem.util.tor_tools.is_hex_digits(value, 64):
raise stem.ProtocolError('SERVERNONCE has an invalid value: %s' % value)
self.server_nonce = binascii.unhexlify(stem.util.str_tools._to_bytes(value))
else:
raise stem.ProtocolError('Missing SERVERNONCE mapping: %s' % line)
| 33.578947 | 178 | 0.731452 |
538dee041aa7ae6257fb237b79dc3801c3c6b73a | 2,562 | py | Python | game.py | pedropaulo91/shmup-game | ebf51ffbc36c7ebc0088fcca18ab0bd9da57ee5c | [
"MIT"
] | null | null | null | game.py | pedropaulo91/shmup-game | ebf51ffbc36c7ebc0088fcca18ab0bd9da57ee5c | [
"MIT"
] | null | null | null | game.py | pedropaulo91/shmup-game | ebf51ffbc36c7ebc0088fcca18ab0bd9da57ee5c | [
"MIT"
] | null | null | null | import pygame
import game_objects
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
FPS = 60
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
clock = pygame.time.Clock()
pygame.display.set_caption("Shmup Game")
game_objects.init()
background_image = game_objects.all_images["background"]
meteor_explotion = game_objects.all_sounds["meteor_explosion"]
# Sprite Groups
all_sprites = pygame.sprite.Group()
laser_group = pygame.sprite.Group()
meteors_group = pygame.sprite.Group()
spaceship = game_objects.SpaceShip(SCREEN_WIDTH, SCREEN_HEIGHT)
player_life_num = game_objects.Player_Life_Num(spaceship)
player_life_img = game_objects.Player_Life_Img()
all_sprites.add(player_life_img)
all_sprites.add(player_life_num)
all_sprites.add(spaceship)
# Number of meteors that will be drawn
for i in range(10):
meteor = game_objects.Meteor(SCREEN_WIDTH, SCREEN_HEIGHT)
all_sprites.add(meteor)
meteors_group.add(meteor)
def new_meteor():
meteor = game_objects.Meteor(SCREEN_WIDTH, SCREEN_HEIGHT)
all_sprites.add(meteor)
meteors_group.add(meteor)
music_game = game_objects.all_sounds["music_game"]
music_game.play(-1)
def main() :
# Main loop
running = True
while running:
# Keep loop running at the right speed
clock.tick(FPS)
# Setting timer
pygame.time.set_timer(1, 15000)
# Process input
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
spaceship.shoot(all_sprites, laser_group)
# update
all_sprites.update()
# Check if a laser hit a meteor
for hit in pygame.sprite.groupcollide(laser_group, meteors_group, True, True):
# For each meteor that is destroyed, a new one is created and a sound is played
meteor_explotion.play()
new_meteor()
# Check if a meteor hit the spaceship
for hit in pygame.sprite.spritecollide(spaceship, meteors_group, True):
spaceship.lives -= 1
player_life_num.image = game_objects.all_images["player_life_num"][spaceship.lives]
if spaceship.lives == 0:
running = False
# draw / render
screen.blit(background_image, (0,0))
all_sprites.draw(screen)
# flip the display
pygame.display.flip()
pygame.quit()
if __name__ == "__main__":
main()
| 24.873786 | 95 | 0.676425 |
b18180cd19a73d9e087a4a75ef084673190c4f94 | 6,288 | py | Python | S1/SI/S4/main.py | HerbeMalveillante/ecole | bebbc73cd678c58c9cd40389ea1cf229a0200308 | [
"MIT"
] | null | null | null | S1/SI/S4/main.py | HerbeMalveillante/ecole | bebbc73cd678c58c9cd40389ea1cf229a0200308 | [
"MIT"
] | null | null | null | S1/SI/S4/main.py | HerbeMalveillante/ecole | bebbc73cd678c58c9cd40389ea1cf229a0200308 | [
"MIT"
] | null | null | null | # Fichier main de gestion des ressources du robot
from micropython import const
from machine import *
from DRV8833 import *
from BME280 import *
from VL6180X import *
import pycom
import time
import os
# Variables globales pour moteurs et pont en H
DRV8833_Sleep_pin = "P20" # Pin SLEEP
DRV8833_AIN1 = "P22" # Entrée PWM moteur A : AIN1
DRV8833_AIN2 = "P21" # Entrée PWM moteur A : AIN2
DRV8833_BIN1 = "P19" # Entrée PWM moteur B : BIN1
DRV8833_BIN2 = "P12" # Entrée PWM moteur B : BIN2
# Vitesse de rotation des roues
V_MAX = 1.0
V_MOYEN = 0.5
V_MIN = 0.25
# ---------------------------------------------------------------------------
# Routines de déplacements du robot
def Avancer(vitesse):
Moteur_Droit.Cmde_moteur(SENS_HORAIRE, vitesse)
Moteur_Gauche.Cmde_moteur(SENS_ANTI_HORAIRE, vitesse - 0.2)
def Reculer(vitesse):
Moteur_Droit.Cmde_moteur(SENS_ANTI_HORAIRE, vitesse)
Moteur_Gauche.Cmde_moteur(SENS_HORAIRE, vitesse)
def Pivoter_droite(vitesse):
Moteur_Droit.Cmde_moteur(SENS_ANTI_HORAIRE, vitesse)
Moteur_Gauche.Cmde_moteur(SENS_ANTI_HORAIRE, vitesse)
def Pivoter_gauche(vitesse):
Moteur_Droit.Cmde_moteur(SENS_HORAIRE, vitesse)
Moteur_Gauche.Cmde_moteur(SENS_HORAIRE, vitesse)
def Arret():
Moteur_Droit.Cmde_moteur(SENS_HORAIRE, 0)
Moteur_Gauche.Cmde_moteur(SENS_HORAIRE, 0)
# ------------------------------------------------------------------------
# Initialisation des moteurs
# IN1_pin : entrée PWM 1 DRV8833
# IN2_pin : entrée PWM 2 DRV8833
# sleep_pin : SLP pin pour désactiver les ponts en H du DRV8833
# timer_number : dans [0,1,2,3]. Choix du timer utilisé pour générer le signal pwm
# freq : fréquence du signal pwm
# num_channel_pwm_In1 : numéro de l'Id du canal PWM associé à la broche In1_pin
# num_channel_pwm_In2 : numéro de l'Id du canal PWM associé à la broche In2_pin
# DRV8833 (In1_pin, In2_pin, sleep_pin, timer_number, freq, num_channel_pwm_In1, num_channel_pwm_In2)
Moteur_Gauche = DRV8833(
DRV8833_AIN1, DRV8833_AIN2, DRV8833_Sleep_pin, 1, 500, 0, 1
) # Sur connecteur Encoder1
Moteur_Droit = DRV8833(
DRV8833_BIN1, DRV8833_BIN2, DRV8833_Sleep_pin, 1, 500, 2, 3
) # Sur connecteur Encoder2
Arret()
bus_i2c = I2C()
bus_i2c.init(I2C.MASTER, baudrate=400000)
adr = bus_i2c.scan()
Id_BME280 = bus_i2c.readfrom_mem(BME280_I2C_ADR, BME280_CHIP_ID_ADDR, 1)
capteur_BME280 = BME280(BME280_I2C_ADR, bus_i2c) # --Calibrage du capteur
capteur_BME280.Calibration_Param_Load()
rtc = RTC()
rtc.init((2020, 10, 26, 0, 0, 0, 0, 0))
jour = rtc.now()
# CAPTEUR DE DISTANCE
# Variables globales pour les trois capteurs VL6180X
# tableaux de 3 cases initialisees a -1
DISTANCE = [-1, -1]
LUMINOSITE = [-1.0, -1.0]
# Nombre de capteurs VL6180X utilisés
N_VL6180X = const(2)
# Ressource GIPo de la carte WiPy3.0 affectée au contrôle
# du capteur VL6180X
VL6180X_CE_Pin = ("P3", "P5")
# Adressage I2C des capteurs VL6180X : par défaut 0x29 soit 41
VL6180X_I2C_adr_defaut = const(0x29)
VL6180X_I2C_Adr = (const(0x2A), const(0x2B))
print("Configuration des broches CE des capteurs VL6180X")
# Liste des variables Pin correspondant aux broches CE
VL6180X_GPIO_CE_Pin = []
for pin in VL6180X_CE_Pin:
VL6180X_GPIO_CE_Pin.append(Pin(pin, mode=Pin.OUT))
# inhiber chacun des capteurs de distances
VL6180X_GPIO_CE_Pin[-1].value(0)
print("Fin de la configuration des broches CE des capteurs VL6180X")
print("Initialisation des capteurs de distance")
capteur_VL6180X = []
for i in range(N_VL6180X):
VL6180X_GPIO_CE_Pin[i].value(1)
time.sleep(0.002)
capteur_VL6180X.append(VL6180X(VL6180X_I2C_adr_defaut, bus_i2c))
# init nouvelle adr I2C
capteur_VL6180X[i].Modif_Adr_I2C(
VL6180X_GPIO_CE_Pin[i], VL6180X_I2C_Adr[i], VL6180X_I2C_adr_defaut
)
print("Fin de l'initialisation des capteurs de distance")
# configuration de la broche dédiée au contrôle du capteur :
# VL6180X_GPIO_CE_Pin = Pin(VL6180X_CE_Pin, mode=Pin.OUT)
# VL6180X_GPIO_CE_Pin.value(1) # Activer le capteur de distance
# capteur_d_l_VL6180X = VL6180X(VL6180X_I2C_adr_defaut, bus_i2c)
date = "Date : " + str(jour[0]) + "/" + str(jour[1]) + "/" + str(jour[2])
print("L'adresse du périphérique I2C est :", adr)
print("Valeur ID BME280 :", hex(Id_BME280[0]))
while True:
# pression et température
jour = rtc.now()
temps = str(jour[3]) + "h " + str(jour[4]) + "m " + str(jour[5]) + "s"
temp = capteur_BME280.read_temp()
humi = capteur_BME280.read_humidity()
pres = capteur_BME280.read_pression()
for i in range(N_VL6180X):
DISTANCE[i] = capteur_VL6180X[i].range_mesure()
LUMINOSITE[i] = capteur_VL6180X[i].ambiant_light_mesure()
print("-------------------------------------------------------------------")
print(
"Temps passé :",
temps,
"- Température :",
"%.2f" % temp,
"- Humidité :",
"%.2f" % humi,
"- Préssion :",
"%.2f" % pres,
)
print("Distance", DISTANCE)
print("Luminosité", LUMINOSITE)
print("--------------")
print(LUMINOSITE)
nombreAlea = int(str(LUMINOSITE[0] * LUMINOSITE[1])[-1])
print("nombreAlea", nombreAlea)
if nombreAlea % 4 == 0:
print("CHANGEMENT DE DIRECTION ALEATOIRE")
direction = "gauche" if nombreAlea % 2 == 0 else "droite"
if direction == "gauche":
Pivoter_gauche(V_MOYEN)
print("PIVOTER GAUCHE")
time.sleep(2)
else:
Pivoter_droite(V_MOYEN)
print("PIVOTER DROITE")
time.sleep(2)
else:
if DISTANCE[0] >= 100:
Avancer(V_MOYEN)
print("AVANCER")
time.sleep(1)
else:
print("OULALAH ON VA SE COGNER")
direction = "gauche" if nombreAlea % 2 == 0 else "droite"
if direction == "gauche":
Pivoter_gauche(V_MOYEN)
print("ON EVITE PAR LA GAUCHE")
time.sleep(2)
else:
Pivoter_droite(V_MOYEN)
print("ON EVITE PAR LA DROITE")
time.sleep(2)
| 32.921466 | 102 | 0.637087 |
85af36c007794fa4a407a3bcbcf23dffc16f7217 | 1,685 | py | Python | scripts/papyrus_compile.py | clayne/ConsoleUtilF4 | 0939e09bbb7ff6b06f2e076d1cf12f97f24dcb44 | [
"MIT"
] | 3 | 2021-05-09T00:44:46.000Z | 2021-11-12T14:35:48.000Z | scripts/papyrus_compile.py | clayne/ConsoleUtilF4 | 0939e09bbb7ff6b06f2e076d1cf12f97f24dcb44 | [
"MIT"
] | null | null | null | scripts/papyrus_compile.py | clayne/ConsoleUtilF4 | 0939e09bbb7ff6b06f2e076d1cf12f97f24dcb44 | [
"MIT"
] | 1 | 2021-11-10T17:16:25.000Z | 2021-11-10T17:16:25.000Z | import argparse
import os
import shutil
import subprocess
def compile(a_args):
compiler = os.path.join(a_args.f4dir, "Papyrus Compiler", "PapyrusCompiler.exe")
vanilla = os.path.join(a_args.f4dir, "Data", "Scripts", "Source", "Base")
output = os.getcwd()
flags = "Institute_Papyrus_Flags.flg"
for root, dirs, files in os.walk(os.path.join(a_args.src_dir, "src", "papyrus")):
for file in files:
if file.endswith(".psc"):
pargs = [
compiler,
os.path.join(root, file),
"-import={}".format(vanilla),
"-output={}".format(output),
"-flags={}".format(flags),
]
if a_args.debug == "false":
pargs.append("-optimize")
pargs.append("-release")
pargs.append("-final")
subprocess.run(pargs, check=True)
if a_args.copy_build == "true":
for root, dirs, files in os.walk(output):
for file in files:
if file.endswith(".pex"):
shutil.copy(file, os.path.join(a_args.f4dir, "Data", "Scripts"))
def parse_arguments():
parser = argparse.ArgumentParser(description="compile papyrus source scripts")
parser.add_argument("--copy-build", type=str, help="copy the compiled artifacts into the fallout 4 directory", required=True)
parser.add_argument("--debug", type=str, help="enable papyrus debugging commands", required=True)
parser.add_argument("--f4dir", type=str, help="the fallout 4 directory", required=True)
parser.add_argument("--src-dir", type=str, help="the project root source directory", required=True)
return parser.parse_args()
def main():
out = "artifacts"
try:
os.mkdir(out)
except FileExistsError:
pass
os.chdir(out)
args = parse_arguments()
compile(args)
if __name__ == "__main__":
main()
| 30.636364 | 126 | 0.689614 |
1a60ddd8d9b55d3215efed4a22a0981f656db1ec | 1,482 | py | Python | ioc/extra/locator/di.py | rande/python-simple-ioc | 36ddf667c1213a07a53cd4cdd708d02494e5190b | [
"Apache-2.0"
] | 13 | 2015-04-11T13:35:07.000Z | 2020-10-30T18:36:19.000Z | ioc/extra/locator/di.py | pytheons/ioc | 21aa08dfbe4596f50b5e24032426310e7904db07 | [
"Apache-2.0"
] | null | null | null | ioc/extra/locator/di.py | pytheons/ioc | 21aa08dfbe4596f50b5e24032426310e7904db07 | [
"Apache-2.0"
] | 3 | 2015-06-02T09:20:12.000Z | 2020-09-06T14:50:02.000Z | #
# Copyright 2014 Thomas Rabaix <thomas.rabaix@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ioc.loader, ioc.component, ioc.exceptions
from ioc.component import Definition
class Extension(ioc.component.Extension):
def load(self, config, container_builder):
extensions = container_builder.parameters.get('ioc.extensions')
locator_map = {}
for extension in extensions:
locator_map[extension] = Definition('ioc.locator.ChoiceLocator',
arguments=[[
Definition('ioc.locator.FileSystemLocator', arguments=["%s/resources/%s" % (container_builder.parameters.get('project.root_folder'), extension)]),
Definition('ioc.locator.PackageLocator', arguments=[extension], kwargs={'package_path': 'resources'})
]]
)
container_builder.add('ioc.locator', Definition('ioc.locator.PrefixLocator', arguments=[locator_map], kwargs={'delimiter': ':'}))
| 43.588235 | 166 | 0.703779 |
11533bfcb275f59a9304bb31ddcf0aa9254673b3 | 5,023 | py | Python | finrl_meta/data_processors/processor_ricequant.py | Laezerus/FinRL-Meta | 33781fee191836ecc2d5f5bb0ade968002b8de30 | [
"MIT"
] | null | null | null | finrl_meta/data_processors/processor_ricequant.py | Laezerus/FinRL-Meta | 33781fee191836ecc2d5f5bb0ade968002b8de30 | [
"MIT"
] | null | null | null | finrl_meta/data_processors/processor_ricequant.py | Laezerus/FinRL-Meta | 33781fee191836ecc2d5f5bb0ade968002b8de30 | [
"MIT"
] | 1 | 2022-01-07T15:11:43.000Z | 2022-01-07T15:11:43.000Z | from typing import List
import rqdatac as ricequant
from finrl_meta.data_processors.basic_processor import BaseProcessor
class RicequantProcessor(BaseProcessor):
def __init__(self, data_source: str, start_date: str, end_date: str, time_interval: str, **kwargs):
super().__init__(data_source, start_date, end_date, time_interval, **kwargs)
if kwargs['username'] is None or kwargs['password'] is None:
ricequant.init() # if the lisence is already set, you can init without username and password
else:
ricequant.init(kwargs['username'], kwargs['password']) # init with username and password
def download_data(self, ticker_list: List[str]):
# download data by calling RiceQuant API
dataframe = ricequant.get_price(ticker_list, frequency=self.time_interval,
start_date=self.start_date, end_date=self.end_date)
self.dataframe = dataframe
# def clean_data(self, df) -> pd.DataFrame:
# ''' RiceQuant data is already cleaned, we only need to transform data format here.
# No need for filling NaN data'''
# df = df.copy()
# # raw df uses multi-index (tic,time), reset it to single index (time)
# df = df.reset_index(level=[0,1])
# # rename column order_book_id to tic
# df = df.rename(columns={'order_book_id':'tic', 'datetime':'time'})
# # reserve columns needed
# df = df[['tic','time','open','high','low','close','volume']]
# # check if there is NaN values
# assert not df.isnull().values.any()
# return df
# def add_vix(self, data):
# print('VIX is NOT applicable to China A-shares')
# return data
# def calculate_turbulence(self, data, time_period=252):
# # can add other market assets
# df = data.copy()
# df_price_pivot = df.pivot(index="date", columns="tic", values="close")
# # use returns to calculate turbulence
# df_price_pivot = df_price_pivot.pct_change()
#
# unique_date = df.date.unique()
# # start after a fixed time period
# start = time_period
# turbulence_index = [0] * start
# # turbulence_index = [0]
# count = 0
# for i in range(start, len(unique_date)):
# current_price = df_price_pivot[df_price_pivot.index == unique_date[i]]
# # use one year rolling window to calcualte covariance
# hist_price = df_price_pivot[
# (df_price_pivot.index < unique_date[i])
# & (df_price_pivot.index >= unique_date[i - time_period])
# ]
# # Drop tickers which has number missing values more than the "oldest" ticker
# filtered_hist_price = hist_price.iloc[hist_price.isna().sum().min():].dropna(axis=1)
#
# cov_temp = filtered_hist_price.cov()
# current_temp = current_price[[x for x in filtered_hist_price]] - np.mean(filtered_hist_price, axis=0)
# temp = current_temp.values.dot(np.linalg.pinv(cov_temp)).dot(
# current_temp.values.T
# )
# if temp > 0:
# count += 1
# if count > 2:
# turbulence_temp = temp[0][0]
# else:
# # avoid large outlier because of the calculation just begins
# turbulence_temp = 0
# else:
# turbulence_temp = 0
# turbulence_index.append(turbulence_temp)
#
# turbulence_index = pd.DataFrame(
# {"date": df_price_pivot.index, "turbulence": turbulence_index}
# )
# return turbulence_index
#
# def add_turbulence(self, data, time_period=252):
# """
# add turbulence index from a precalcualted dataframe
# :param data: (df) pandas dataframe
# :return: (df) pandas dataframe
# """
# df = data.copy()
# turbulence_index = self.calculate_turbulence(df, time_period=time_period)
# df = df.merge(turbulence_index, on="date")
# df = df.sort_values(["date", "tic"]).reset_index(drop=True)
# return df
# def df_to_array(self, df, tech_indicator_list, if_vix):
# df = df.copy()
# unique_ticker = df.tic.unique()
# if_first_time = True
# for tic in unique_ticker:
# if if_first_time:
# price_array = df[df.tic==tic][['close']].values
# tech_array = df[df.tic==tic][tech_indicator_list].values
# #risk_array = df[df.tic==tic]['turbulence'].values
# if_first_time = False
# else:
# price_array = np.hstack([price_array, df[df.tic==tic][['close']].values])
# tech_array = np.hstack([tech_array, df[df.tic==tic][tech_indicator_list].values])
# print('Successfully transformed into array')
# return price_array, tech_array, None
| 45.252252 | 115 | 0.588692 |
426bf9c18ee54247f3a7f3f460324a1cd3a549bd | 5,043 | py | Python | test/orm/test_defaults.py | gujun4990/sqlalchemy | 057bae2295feb86529a04f09cd2f3d4c2c6d88a8 | [
"MIT"
] | 1 | 2018-04-02T18:41:52.000Z | 2018-04-02T18:41:52.000Z | test/orm/test_defaults.py | gujun4990/sqlalchemy | 057bae2295feb86529a04f09cd2f3d4c2c6d88a8 | [
"MIT"
] | null | null | null | test/orm/test_defaults.py | gujun4990/sqlalchemy | 057bae2295feb86529a04f09cd2f3d4c2c6d88a8 | [
"MIT"
] | 3 | 2017-09-26T13:59:24.000Z | 2020-12-04T17:51:54.000Z | import sqlalchemy as sa
from sqlalchemy import Integer, String, ForeignKey, event
from sqlalchemy import testing
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import mapper, relationship, create_session
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import eq_
class TriggerDefaultsTest(fixtures.MappedTest):
__requires__ = ('row_triggers',)
@classmethod
def define_tables(cls, metadata):
dt = Table('dt', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col1', String(20)),
Column('col2', String(20),
server_default=sa.schema.FetchedValue()),
Column('col3', String(20),
sa.schema.FetchedValue(for_update=True)),
Column('col4', String(20),
sa.schema.FetchedValue(),
sa.schema.FetchedValue(for_update=True)))
for ins in (
sa.DDL("CREATE TRIGGER dt_ins AFTER INSERT ON dt "
"FOR EACH ROW BEGIN "
"UPDATE dt SET col2='ins', col4='ins' "
"WHERE dt.id = NEW.id; END",
on='sqlite'),
sa.DDL("CREATE TRIGGER dt_ins ON dt AFTER INSERT AS "
"UPDATE dt SET col2='ins', col4='ins' "
"WHERE dt.id IN (SELECT id FROM inserted);",
on='mssql'),
sa.DDL("CREATE TRIGGER dt_ins BEFORE INSERT "
"ON dt "
"FOR EACH ROW "
"BEGIN "
":NEW.col2 := 'ins'; :NEW.col4 := 'ins'; END;",
on='oracle'),
sa.DDL("CREATE TRIGGER dt_ins BEFORE INSERT ON dt "
"FOR EACH ROW BEGIN "
"SET NEW.col2='ins'; SET NEW.col4='ins'; END",
on=lambda ddl, event, target, bind, **kw:
bind.engine.name not in ('oracle', 'mssql', 'sqlite')
),
):
event.listen(dt, 'after_create', ins)
event.listen(dt, 'before_drop', sa.DDL("DROP TRIGGER dt_ins"))
for up in (
sa.DDL("CREATE TRIGGER dt_up AFTER UPDATE ON dt "
"FOR EACH ROW BEGIN "
"UPDATE dt SET col3='up', col4='up' "
"WHERE dt.id = OLD.id; END",
on='sqlite'),
sa.DDL("CREATE TRIGGER dt_up ON dt AFTER UPDATE AS "
"UPDATE dt SET col3='up', col4='up' "
"WHERE dt.id IN (SELECT id FROM deleted);",
on='mssql'),
sa.DDL("CREATE TRIGGER dt_up BEFORE UPDATE ON dt "
"FOR EACH ROW BEGIN "
":NEW.col3 := 'up'; :NEW.col4 := 'up'; END;",
on='oracle'),
sa.DDL("CREATE TRIGGER dt_up BEFORE UPDATE ON dt "
"FOR EACH ROW BEGIN "
"SET NEW.col3='up'; SET NEW.col4='up'; END",
on=lambda ddl, event, target, bind, **kw:
bind.engine.name not in ('oracle', 'mssql', 'sqlite')
),
):
event.listen(dt, 'after_create', up)
event.listen(dt, 'before_drop', sa.DDL("DROP TRIGGER dt_up"))
@classmethod
def setup_classes(cls):
class Default(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
Default, dt = cls.classes.Default, cls.tables.dt
mapper(Default, dt)
def test_insert(self):
Default = self.classes.Default
d1 = Default(id=1)
eq_(d1.col1, None)
eq_(d1.col2, None)
eq_(d1.col3, None)
eq_(d1.col4, None)
session = create_session()
session.add(d1)
session.flush()
eq_(d1.col1, None)
eq_(d1.col2, 'ins')
eq_(d1.col3, None)
# don't care which trigger fired
assert d1.col4 in ('ins', 'up')
def test_update(self):
Default = self.classes.Default
d1 = Default(id=1)
session = create_session()
session.add(d1)
session.flush()
d1.col1 = 'set'
session.flush()
eq_(d1.col1, 'set')
eq_(d1.col2, 'ins')
eq_(d1.col3, 'up')
eq_(d1.col4, 'up')
class ExcludedDefaultsTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
dt = Table('dt', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col1', String(20), default="hello"))
def test_exclude(self):
dt = self.tables.dt
class Foo(fixtures.BasicEntity):
pass
mapper(Foo, dt, exclude_properties=('col1',))
f1 = Foo()
sess = create_session()
sess.add(f1)
sess.flush()
eq_(dt.select().execute().fetchall(), [(1, "hello")])
| 34.541096 | 72 | 0.508031 |
7162b0eacd5f2976082da93ae7ec87ab5d6ea112 | 6,802 | py | Python | segmentation_rt/dl/dataloader/dataloader.py | BrouBoni/segmentation_RT | e44f4fafe23652f3122a5e65bd8515283dcfdbe0 | [
"MIT"
] | 6 | 2021-02-11T15:59:56.000Z | 2021-12-17T20:15:35.000Z | segmentation_rt/dl/dataloader/dataloader.py | liuhd073/segmentation_RT | e44f4fafe23652f3122a5e65bd8515283dcfdbe0 | [
"MIT"
] | null | null | null | segmentation_rt/dl/dataloader/dataloader.py | liuhd073/segmentation_RT | e44f4fafe23652f3122a5e65bd8515283dcfdbe0 | [
"MIT"
] | 3 | 2021-04-09T17:08:02.000Z | 2021-08-03T07:20:20.000Z | """
Implementation of two dataloader, one for the training and the other one for inference.
Both are based on 3D patch samplers allowing a less greedy memory consumption.
"""
import torch
import torch.utils.data
import torchio as tio
from segmentation_rt.util.util import get_subjects
class DatasetSingle:
"""
Initialize a dataset suited inference extracting patches across a whole volume.
:param str root:
root folder containing CT dicom files or a path to a nii file.
:param list[str] structures:
list of structures to label the output.
:param (int, int, int) patch_size:
Tuple of integers (width, height, depth).
"""
def __init__(self, root, structures, patch_size=(512, 512, 6)):
self.root = root
self.structures = structures
self.n_structures = len(structures)
self.transform = tio.Compose([
tio.ToCanonical(),
tio.RescaleIntensity(1, (1, 99.0))
])
self.subject = tio.Subject(ct=tio.ScalarImage(root))
self.patch_size = patch_size
self.patch_overlap = 4
grid_sampler = tio.inference.GridSampler(
self.transform(self.subject),
self.patch_size,
self.patch_overlap,
)
self.loader = torch.utils.data.DataLoader(
grid_sampler, batch_size=4, drop_last=True)
self.aggregator = tio.inference.GridAggregator(grid_sampler)
class DatasetPatch:
"""
Initialize a dataset suited for patch-based training. Patches are sampled with labeled voxels at their center.
:param str root:
root folder.
:param list[str] structures:
list of structures..
:param float ratio:
splitting ratio.
:param (int, int, int) patch_size:
Tuple of integers (width, height, depth).
:param int batch_size:
batch size.
:param int num_worker:
number of subprocesses to use for data loading..
:param int samples_per_volume:
number of patches to extract from each volume.
:param int max_length:
maximum number of patches that can be stored in the queue.
"""
def __init__(self, root, structures, ratio=0.9, patch_size=(384, 384, 6),
batch_size=1, num_worker=2, samples_per_volume=20, max_length=300):
self.root = root
self.structures = structures
self.n_structures = len(structures)
self.batch_size = batch_size
self.num_worker = num_worker
self.transform = tio.Compose([
tio.ToCanonical(),
tio.RescaleIntensity(1, (1, 99.0))
])
self.subjects = get_subjects(self.root, self.structures, self.transform)
self.training_subjects, self.validation_subjects = random_split(self.subjects, ratio)
self.patches_training_set, self.patches_validation_set = queuing(self.training_subjects,
self.validation_subjects,
patch_size, len(structures)+1,
samples_per_volume,
max_length, num_worker)
def get_loaders(self):
"""
Return training and validation :class:`data.DataLoader`.
:return: training and validation DataLoader.
:rtype: (:class:`data.DataLoader`, :class:`data.DataLoader`)
"""
training_loader_patches = torch.utils.data.DataLoader(
self.patches_training_set, batch_size=self.batch_size,
drop_last=True)
validation_loader_patches = torch.utils.data.DataLoader(
self.patches_validation_set, batch_size=self.batch_size,
drop_last=True)
print('Training set:', len(self.training_subjects), 'subjects')
print('Validation set:', len(self.validation_subjects), 'subjects')
return training_loader_patches, validation_loader_patches
def random_split(subjects, ratio=0.8):
"""
Randomly split a dataset into non-overlapping new datasets according to the ratio.
:param subjects: dataset to be split.
:type subjects: :class:`tio.SubjectsDataset`
:param ratio: splitting ratio.
:type ratio: float
:return: training and validation datasets.
:rtype: (:class:`tio.SubjectsDataset`, :class:`tio.SubjectsDataset`)
"""
num_subjects = len(subjects)
num_training_subjects = int(ratio * num_subjects)
num_validation_subjects = num_subjects - num_training_subjects
num_split_subjects = num_training_subjects, num_validation_subjects
return torch.utils.data.random_split(subjects, num_split_subjects)
def queuing(training_subjects, validation_subjects, patch_size, n, samples_per_volume=10,
max_length=200, num_workers=2):
"""
Queue used for stochastic patch-based training.
See :class:`tio.data.Queue`.
:param n: # structures,
:type n: int
:param training_subjects: train dataset.
:type training_subjects: :class:`tio.SubjectsDataset`
:param validation_subjects: validation dataset.
:type validation_subjects: :class:`tio.SubjectsDataset`
:param patch_size: Tuple of integers (width, height, depth).
:type patch_size: (int, int, int)
:param samples_per_volume: number of patches to extract from each volume.
:type samples_per_volume: int
:param max_length: maximum number of patches that can be stored in the queue.
:type max_length: int
:param num_workers: number of subprocesses to use for data loading.
:type num_workers: int
:return: training and validation queue.
:rtype: (:class:`tio.data.Queue`, :class:`tio.data.Queue`)
"""
probabilities = dict(zip([i for i in range(n)], [1 for _ in range(n)]))
sampler = tio.data.LabelSampler(
patch_size=patch_size,
label_name='label_map',
label_probabilities=probabilities,
)
patches_training_set = tio.Queue(
subjects_dataset=training_subjects,
max_length=max_length,
samples_per_volume=samples_per_volume,
sampler=sampler,
num_workers=num_workers,
shuffle_subjects=True,
shuffle_patches=True,
)
patches_validation_set = tio.Queue(
subjects_dataset=validation_subjects,
max_length=max_length,
samples_per_volume=samples_per_volume,
sampler=sampler,
num_workers=num_workers,
shuffle_subjects=False,
shuffle_patches=False,
)
return patches_training_set, patches_validation_set
| 34.704082 | 114 | 0.639812 |
5370c8cfadc7e9fe8ae9d663db8d2859d2340b2e | 246 | py | Python | generate.py | lawreka/montagarticlernn | f3b7829f36fcf711891601a879d66136db8ada74 | [
"MIT"
] | 1 | 2020-05-25T16:25:17.000Z | 2020-05-25T16:25:17.000Z | generate.py | lawreka/montagarticlernn | f3b7829f36fcf711891601a879d66136db8ada74 | [
"MIT"
] | null | null | null | generate.py | lawreka/montagarticlernn | f3b7829f36fcf711891601a879d66136db8ada74 | [
"MIT"
] | null | null | null | import gpt_2_simple as gpt2
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name="montagmodel")
gpt2.generate_to_file(sess, destination_path='montagarticlernn-temp0-5-100samples.txt', nsamples=100, temperature=0.5, run_name="montagmodel")
| 35.142857 | 142 | 0.808943 |
02787fc720519795820a304a33a699951eaf6640 | 32,742 | py | Python | libs/Theano/theano/sandbox/cuda/tests/test_opt.py | dendisuhubdy/attention-lvcsr | 598d487c118e66875fdd625baa84ed29d283b800 | [
"MIT"
] | 295 | 2015-09-25T21:15:04.000Z | 2022-01-13T01:16:18.000Z | libs/Theano/theano/sandbox/cuda/tests/test_opt.py | shenshenzhanzhan/attention-lvcsr | 598d487c118e66875fdd625baa84ed29d283b800 | [
"MIT"
] | 21 | 2015-10-28T19:06:32.000Z | 2022-03-11T23:13:05.000Z | libs/Theano/theano/sandbox/cuda/tests/test_opt.py | shenshenzhanzhan/attention-lvcsr | 598d487c118e66875fdd625baa84ed29d283b800 | [
"MIT"
] | 114 | 2015-09-26T21:23:02.000Z | 2021-11-19T02:36:41.000Z | from __future__ import print_function
import operator
import sys
import unittest
import numpy
# Skip test if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
from nose.tools import assert_raises
import theano
from six.moves import reduce
from theano.compile.pfunc import pfunc
from theano import config, tensor
import theano.tensor.tests.test_nlinalg
import theano.tensor.tests.test_opt as test_opt
from theano.tests.breakpoint import PdbBreakpoint
from theano.tests import unittest_tools as utt
import theano.sandbox.cuda as cuda
if not cuda.cuda_available:
raise SkipTest('Optional package cuda disabled')
import theano.sandbox.cuda.cula as cula
from theano.sandbox.cuda import basic_ops
from theano.sandbox.cuda.type import CudaNdarrayType
from theano.scalar.basic_scipy import erfinv
from theano.tensor.nnet.blocksparse import sparse_block_dot
from theano.sandbox.cuda.blocksparse import GpuSparseBlockGemv, GpuSparseBlockOuter
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpu')
def test_no_shared_var_graph():
"""Test that the InputToGpuOptimizer optimizer make graph that don't have shared variable compiled too.
"""
a = tensor.fmatrix()
b = tensor.fmatrix()
f = theano.function([a, b], [a + b], mode=mode_with_gpu)
l = f.maker.fgraph.toposort()
assert len(l) == 4
assert numpy.any(isinstance(x.op, cuda.GpuElemwise) for x in l)
assert numpy.any(isinstance(x.op, cuda.GpuFromHost) for x in l)
assert numpy.any(isinstance(x.op, cuda.HostFromGpu) for x in l)
def test_local_assert():
x = theano.tensor.fmatrix()
a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any())
f = theano.function([x], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 1
assert isinstance(a_op[0].inputs[0].type, CudaNdarrayType)
def test_local_remove_all_assert():
x = theano.tensor.fmatrix()
a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any())
# By default `unsafe` should not be there
f = theano.function([x], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 1
# Put `unsafe`
f = theano.function([x], a, mode=mode_with_gpu.including('unsafe'))
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 0
# Remove `unsafe`
f = theano.function([x], a, mode=mode_with_gpu.excluding('unsafe'))
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 1
def test_local_gpu_contiguous_gpu_contiguous():
a = tensor.fmatrix()
o1 = basic_ops.gpu_contiguous(a)
o2 = basic_ops.gpu_contiguous(o1)
f1 = theano.function([a], o1, mode=mode_with_gpu)
f2 = theano.function([a], o2, mode=mode_with_gpu)
assert 1 == len([node for node in f1.maker.fgraph.toposort()
if isinstance(node.op, basic_ops.GpuContiguous)])
assert 1 == len([node for node in f2.maker.fgraph.toposort()
if isinstance(node.op, basic_ops.GpuContiguous)])
def test_local_assert_no_cpu_op():
numpy.random.seed(1)
m = numpy.random.uniform(-1, 1, (10, 10)).astype("float32")
ms = cuda.shared_constructor(m, name="m_shared")
out = theano.tensor.tanh(ms).dot(ms.T)
mode_local_assert = mode_with_gpu.including("assert_no_cpu_op")
mode_local_assert = mode_local_assert.excluding("local_gpu_elemwise_0")
mode_local_assert = mode_local_assert.excluding("local_gpu_elemwise_1")
old = config.assert_no_cpu_op
old2 = config.on_opt_error
# If the flag is raise
try:
config.assert_no_cpu_op = 'raise'
config.on_opt_error = 'ignore'
assert_raises(AssertionError, theano.function,
[], out, mode=mode_local_assert)
finally:
config.assert_no_cpu_op = old
config.on_opt_error = old2
# If the flag is ignore
try:
config.assert_no_cpu_op = 'ignore'
theano.function([], out, mode=mode_local_assert)
finally:
config.assert_no_cpu_op = old
def test_int_pow():
a = CudaNdarrayType([False])()
f = theano.function([a], (a*4).sum(), mode=mode_with_gpu)
op_names = [n.op.__class__.__name__ for n in f.maker.fgraph.toposort()]
assert op_names == ['GpuCAReduce', 'GpuElemwise', 'HostFromGpu']
f = theano.function([a], tensor.pow(a, 4).sum(), mode=mode_with_gpu)
op_names = [n.op.__class__.__name__ for n in f.maker.fgraph.toposort()]
assert op_names == ['GpuElemwise', 'GpuCAReduce', 'HostFromGpu']
def test_gpualloc():
'''
This tests tries to catch the scenario when, due to infer_shape,
the input of the alloc changes from tensor scalar to a constant
1. In this case the original constracted broadcastable pattern will
have a False for that dimension, but the new broadcastable pattern
that will be inserted by gpualloc will have a True since it knows the
dimension is 1 and therefore broadcastable.
'''
x = theano.shared(numpy.ones(3, dtype='float32'), 'x')
m = (x).dimshuffle(['x', 0])
v = tensor.alloc(1., *m.shape)
f = theano.function([], v + x,
mode=mode_with_gpu.excluding("local_elemwise_alloc"))
l = f.maker.fgraph.toposort()
assert numpy.any([isinstance(x.op, cuda.GpuAlloc) for x in l])
def test_gpuallocempty():
f_gpu = theano.function([], tensor.AllocEmpty('float32')(2,3),
mode=mode_with_gpu)
l_gpu = f_gpu.maker.fgraph.toposort()
assert numpy.any([isinstance(x.op, basic_ops.GpuAllocEmpty) for x in l_gpu])
f_cpu = theano.function([], tensor.AllocEmpty('int32')(2,3))
l_cpu = f_cpu.maker.fgraph.toposort()
assert not numpy.any([isinstance(x.op, basic_ops.GpuAllocEmpty) for x in l_cpu])
class Test_local_elemwise_alloc(test_opt.Test_local_elemwise_alloc):
dtype = 'float32'
def setUp(self):
super(Test_local_elemwise_alloc, self).setUp()
self.fast_run_mode = mode_with_gpu
# self.vec = tensor.vector('vec', dtype=dtype)
# self.mat = tensor.matrix('mat', dtype=dtype)
# self.tens = tensor.tensor3('tens', dtype=dtype)
# self.alloc_wo_dep = basic_ops.gpu_alloc(self.vec, 2, 2)
# self.alloc_w_dep = basic_ops.gpu_alloc(self.vec, *self.mat.shape)
self.alloc_wo_dep = basic_ops.gpu_alloc(self.vec, 2, 2)
self.alloc_w_dep = basic_ops.gpu_alloc(self.vec, *self.mat.shape)
self.alloc_w_dep_tens = basic_ops.gpu_alloc(
self.vec,
self.tens.shape[0],
self.tens.shape[1]
)
self.tv_wo_dep = basic_ops.gpu_alloc(self.vec, 5, 5)
self.tm_wo_dep = basic_ops.gpu_alloc(self.mat, 5, 5, 5)
self.s = tensor.iscalar('s')
self.tv_w_dep = basic_ops.gpu_alloc(self.vec, self.s, self.s)
self.tm_w_dep = basic_ops.gpu_alloc(self.mat, 5, 5, 5)
self.row = tensor.row(dtype=self.dtype)
self.o = basic_ops.gpu_alloc(self.row, 5, 5)
def _verify_alloc_count(self, f, count):
assert(
sum([isinstance(elem.op, basic_ops.GpuAlloc)
for elem in f.maker.fgraph.toposort()
if elem.op is not None]) == count
)
def test_alloc_memset_0():
i = tensor.iscalar()
z = numpy.zeros((1,), dtype='float32')
o = numpy.ones((1,), dtype='float32')
ones = numpy.ones((2,), dtype='float32')
# Test with 0
a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(z)), i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, basic_ops.GpuAlloc) and topo[0].op.memset_0
assert (numpy.asarray(f(6)) == 0).all()
# Test with 1
a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(o)), i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, basic_ops.GpuAlloc)
assert not topo[0].op.memset_0
assert (numpy.asarray(f(6)) == 1).all()
# Test with 1, 1
a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(ones)), i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, basic_ops.GpuAlloc)
assert not topo[0].op.memset_0
assert (numpy.asarray(f(2)) == 1).all()
def test_gpuspecifyshape():
x = cuda.shared_constructor(numpy.ones(3, dtype='float32'), 'x')
m = theano.tensor.specify_shape(x + numpy.float32(1), (3,))
f = theano.function([], updates=[(x, m * numpy.float32(2))],
mode=mode_with_gpu)
l = f.maker.fgraph.toposort()
assert not numpy.any([isinstance(x.op, cuda.HostFromGpu) for x in l])
def test_softmax():
x = tensor.fmatrix()
f = theano.function([x], tensor.nnet.nnet.Softmax()(x),
mode=mode_with_gpu.excluding('cudnn'))
f2 = theano.function([x], tensor.nnet.nnet.Softmax()(x),
mode=mode_without_gpu)
assert isinstance(f.maker.fgraph.toposort()[1].op, cuda.nnet.GpuSoftmax)
xv = numpy.random.rand(7, 8).astype('float32')
assert numpy.allclose(f(xv), f2(xv))
def test_softmax_with_bias():
x = tensor.fmatrix()
b = tensor.fvector()
f = theano.function([x, b], tensor.nnet.nnet.SoftmaxWithBias()(x, b),
mode=mode_with_gpu)
f2 = theano.function([x, b], tensor.nnet.nnet.SoftmaxWithBias()(x, b),
mode=mode_without_gpu)
assert isinstance(f.maker.fgraph.toposort()[2].op,
cuda.nnet.GpuSoftmaxWithBias)
xv = numpy.random.rand(7, 8).astype('float32')
bv = numpy.random.rand(8).astype('float32')
assert numpy.allclose(f(xv, bv), f2(xv, bv))
def test_opt_gpujoin_onlyajoin():
# from a bug in normal sampling
_a = numpy.asarray([[1, 2], [3, 4]], dtype='float32')
_b = numpy.asarray([[5, 6, 7], [8, 9, 10]], dtype='float32')
a = cuda.shared_constructor(_a)
b = cuda.shared_constructor(_b)
c = tensor.join(1, a, b)
f = theano.function([], c, mode=mode_with_gpu)
f()
graph_nodes = f.maker.fgraph.toposort()
assert isinstance(graph_nodes[-1].op, cuda.HostFromGpu)
assert isinstance(graph_nodes[-2].op, cuda.GpuJoin)
assert numpy.all(f() == numpy.concatenate([_a, _b], axis=1))
# test mixed dtype
_b = numpy.asarray([[5, 6, 7], [8, 9, 10]], dtype='float64')
b = theano.tensor.constant(_b)
c = tensor.join(1, a, b)
f = theano.function([], c, mode=mode_with_gpu)
f()
graph_nodes = f.maker.fgraph.toposort()
assert isinstance(graph_nodes[-1].op, theano.tensor.Join)
assert numpy.all(f() == numpy.concatenate([_a, _b], axis=1))
def test_opt_gpujoin_joinvectors_elemwise_then_minusone():
# from a bug in gpu normal sampling
_a = numpy.asarray([1, 2, 3, 4], dtype='float32')
_b = numpy.asarray([5, 6, 7, 8], dtype='float32')
a = cuda.shared_constructor(_a)
b = cuda.shared_constructor(_b)
a_prime = tensor.cos(a)
b_prime = tensor.sin(b)
c = tensor.join(0, a_prime, b_prime)
d = c[:-1]
f = theano.function([], d, mode=mode_with_gpu)
graph_nodes = f.maker.fgraph.toposort()
assert isinstance(graph_nodes[-1].op, cuda.HostFromGpu)
assert isinstance(graph_nodes[-2].op, cuda.GpuSubtensor)
assert isinstance(graph_nodes[-3].op, cuda.GpuJoin)
concat = numpy.concatenate([numpy.cos(_a), numpy.sin(_b)], axis=0)
concat = concat[:-1]
assert numpy.allclose(numpy.asarray(f()), concat)
def test_opt_gpujoin_joinvectors_negativeaxes():
"""
Test that negative axis concatenation works as expected.
"""
# Test case for one-dimensional vectors
rng = numpy.random.RandomState(22)
x1 = rng.rand(5)
x2 = rng.rand(10)
t1 = cuda.shared_constructor(numpy.asarray(x1, "float32"))
t2 = cuda.shared_constructor(numpy.asarray(x2, "float32"))
t = tensor.concatenate([t1, t2], axis=-1)
f = theano.function(inputs=[], outputs=t)
assert(numpy.allclose(f(), numpy.concatenate([x1, x2], axis=-1)))
# Test case for two-dimensional vectors
x1 = rng.rand(5, 10)
x2 = rng.rand(10, 10)
t1 = cuda.shared_constructor(numpy.asarray(x1, "float32"))
t2 = cuda.shared_constructor(numpy.asarray(x2, "float32"))
t = tensor.concatenate([t1, t2], axis=-2)
f = theano.function(inputs=[], outputs=t)
assert(numpy.allclose(f(), numpy.concatenate([x1, x2], axis=-2)))
# Now check that a value error is raised when vectors don't match
# along the negative concatenation axis
try:
t = tensor.concatenate([t1, t2], axis=-1)
f = theano.function(inputs=[], outputs=t)
f()
assert(False)
except ValueError:
assert(True)
# Finally check that a value error is raised when negative
# axis is larger in absolute value than smallest number of dims
try:
t = tensor.concatenate([t1, t2], axis=-3)
f = theano.function(inputs=[], outputs=t)
f()
assert(False)
except IndexError:
assert(True)
def test_local_gpu_subtensor():
# Test shared forced on CPU.
t = tensor._shared(numpy.zeros(20, "float32"))
f = theano.function([], t[3:4], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert any([type(node.op) is tensor.Subtensor for node in topo])
assert not any([isinstance(node.op, cuda.GpuSubtensor) for node in topo])
# Test graph input.
t = tensor.fmatrix()
f = theano.function([t], t[3:4], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert any([type(node.op) is tensor.Subtensor for node in topo])
assert not any([isinstance(node.op, cuda.GpuSubtensor) for node in topo])
# Test multiple use of the input
# We want the subtensor to be on the GPU to prevent multiple transfer.
t = tensor.fmatrix()
f = theano.function([t], [t[3:4], t+1], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert not any([type(node.op) is tensor.Subtensor for node in topo])
assert any([isinstance(node.op, cuda.GpuSubtensor) for node in topo])
# Test multiple use of the input + input as output
# We want the subtensor to be on the GPU to prevent multiple transfer.
t = tensor.fmatrix()
f = theano.function([t], [t[3:4], t+1, t], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert not any([type(node.op) is tensor.Subtensor for node in topo])
assert any([isinstance(node.op, cuda.GpuSubtensor) for node in topo])
# Test shared forced on CPU end we do computation on the output of
# the subtensor.
t = tensor._shared(numpy.zeros(20, "float32"))
f = theano.function([], t[3:4]+1, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert any([type(node.op) is tensor.Subtensor for node in topo])
assert not any([isinstance(node.op, cuda.GpuSubtensor) for node in topo])
assert any([isinstance(node.op, cuda.GpuElemwise) for node in topo])
def test_local_gpu_split():
""" Test that the GpuSplit op is being applied and works """
# Construct symbolic split
x = tensor.fvector()
splits = tensor.lvector()
ra, rb, rc = tensor.split(x, splits, n_splits=3, axis=0)
# Compile function to use CPU
f = theano.function([x, splits], [ra, rb, rc], mode=mode_without_gpu)
# Get values for CPU version
cpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1])
l = f.maker.fgraph.toposort()
# Ensure that one op is theano.tensor.Split
assert any([isinstance(o.op, theano.tensor.Split) for o in l])
# GPU version
f = theano.function([x, splits], [ra, rb, rc], mode=mode_with_gpu)
gpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1])
l = f.maker.fgraph.toposort()
assert any([isinstance(o.op, cuda.GpuSplit) for o in l])
# Check equality
assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)])
# Test the other path of the optimizer, when it is the output that
# is moved to the GPU.
ra = cuda.gpu_from_host(ra)
f = theano.function([x, splits], [ra, rb, rc],
mode=mode_with_gpu.excluding("InputToGpuOptimizer"))
gpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1])
l = f.maker.fgraph.toposort()
assert any([isinstance(o.op, cuda.GpuSplit) for o in l])
# Check equality
assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)])
# Test that split with only 1 output work
ra = tensor.split(x, splits, n_splits=1, axis=0)
f = theano.function([x, splits], [ra], mode=mode_without_gpu)
cpu_res = f([0, 1, 2, 3, 4, 5], [6])
l = f.maker.fgraph.toposort()
# Ensure that no op is theano.tensor.Split or GpuSplit, they get
# optimized away.
assert not any([isinstance(o.op, (theano.tensor.Split,
cuda.GpuSplit)) for o in l])
# GPU version
f = theano.function([x, splits], [ra], mode=mode_with_gpu)
gpu_res = f([0, 1, 2, 3, 4, 5], [6])
l = f.maker.fgraph.toposort()
assert not any([isinstance(o.op, (theano.tensor.Split,
cuda.GpuSplit)) for o in l])
# Check equality
assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)])
def test_print_op():
""" Test that print ops don't block gpu optimization"""
b = tensor.fmatrix()
f = theano.function([b], theano.printing.Print()(b)*2, mode=mode_with_gpu)
# theano.printing.debugprint(f)
# print f.maker.fgraph.toposort()
#[GpuFromHost(<TensorType(float32, matrix)>), <theano.printing.Print object at 0x3581210>(GpuFromHost.0), GpuElemwise{mul}(CudaNdarray{[[ 2.]]}, <theano.printing.Print object at 0x3581210>.0), HostFromGpu(GpuElemwise{mul}.0)]
topo = f.maker.fgraph.toposort()
assert topo[0].op == cuda.gpu_from_host
assert isinstance(topo[1].op, theano.printing.Print)
assert isinstance(topo[2].op, cuda.GpuElemwise)
assert topo[3].op == cuda.host_from_gpu
f(numpy.random.random((5, 5)).astype('float32'))
def test_pdbbreakpoint_op():
""" Test that PdbBreakpoint ops don't block gpu optimization"""
b = tensor.fmatrix()
# Create a function composed of a breakpoint followed by
# some computation
condition = tensor.gt(b.sum(), 0)
b_monitored = PdbBreakpoint(name='TestBreakpoint')(condition, b)
output = b_monitored ** 2
f = theano.function([b], output, mode=mode_with_gpu)
# Ensure that, in the compiled function, the computation following the
# breakpoint has been moved to the gpu.
topo = f.maker.fgraph.toposort()
assert isinstance(topo[-2].op, cuda.GpuElemwise)
assert topo[-1].op == cuda.host_from_gpu
def test_local_gpu_elemwise_careduce():
x = theano.tensor.fmatrix()
o = (x * x).sum()
f = theano.function([x], o, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 3
assert topo[1].op.pre_scalar_op == theano.scalar.sqr
data = numpy.random.rand(3, 4).astype('float32')
utt.assert_allclose(f(data), (data * data).sum())
o = (x * x).sum(axis=1)
f = theano.function([x], o, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 3
assert topo[1].op.pre_scalar_op == theano.scalar.sqr
utt.assert_allclose(f(data), (data * data).sum(axis=1))
def test_huge_elemwise_fusion():
""" Test the the GpuElemwise fusion work correctly
We check that we fuse one node with part of its input
in case their is too many inputs and that would make it bust the 256
bytes limits.
"""
shape = (2, 3, 4, 5, 6)
ttype = tensor.tensor(dtype='float32', broadcastable=(False,) * len(shape))
gpu_ptr_size = theano.sandbox.cuda.opt.get_device_type_sizes()['gpu_ptr_size']
if gpu_ptr_size == 8:
nb_in = 7
len_topo = 10
elif gpu_ptr_size == 4:
nb_in = 8
len_topo = 11
else:
raise Exception("Unexpected value for gpu_ptr_size", gpu_ptr_size)
vars = [tensor.tanh(ttype) for x in range(nb_in)]
f = pfunc(vars, [reduce(operator.sub, vars)], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == len_topo
assert sum([isinstance(node.op, cuda.GpuElemwise) for node in topo]) == 2
assert isinstance(topo[-3].op.scalar_op, theano.scalar.basic.Sub)
assert isinstance(topo[-2].op.scalar_op, theano.scalar.basic.Composite)
# let debugmode catch errors
gen = lambda: theano._asarray(numpy.random.rand(*shape), dtype='float32')
f(*[gen() for i in range(nb_in)])
# Test the case where we can't put the computation on the gpu! their is too
# many dimensions to the input to have 2 inputs to the op!
shape = (1, 2, 3, 4, 5, 6, 7, 2, 2, 3, 2, 1, 2, 2, 2,)
ttype = tensor.tensor(dtype='float32', broadcastable=(False,) * len(shape))
vars = [tensor.tanh(ttype) for x in range(7)]
f = pfunc(vars, [vars[0] - vars[1] - vars[2] - vars[3] - vars[4] -
vars[5] - vars[6]], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert sum([isinstance(node.op, cuda.GpuElemwise) for node in topo]) == 0
assert sum([isinstance(node.op, tensor.Elemwise) for node in topo]) == 1
# let debugmode catch errors
gen = lambda: theano._asarray(numpy.random.rand(*shape), dtype='float32')
f(gen(), gen(), gen(), gen(), gen(), gen(), gen())
def gen(shape):
return theano._asarray(numpy.random.rand(*shape), dtype='float32')
max_var = 16 # excluded
for shape in [(2,),
(2, 2),
(2, 2, 2),
(2, 2, 2, 2),
(2, 2, 2, 2, 2), # 5d
(2, 2, 2, 2, 2, 2),
# (2, 2, 2, 2, 2, 2, 2),
# (2, 2, 2, 2, 2, 2, 2, 2),
# (2, 2, 2, 1, 1, 1, 1, 2, 2), # 9d
]:
vals = [cuda.shared_constructor(gen(shape)) for x in range(max_var)]
for use_tan in [True, False]:
if use_tan:
vars = [tensor.tanh(x) for x in vals]
else:
vars = vals
for nb_var in range(1, max_var):
out = reduce(lambda x, y: x + y, vars[:nb_var])
if not isinstance(out.type, CudaNdarrayType):
out = cuda.gpu_from_host(out)
f = pfunc([], [out], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
# print shape, nb_var, use_tan, len(topo)
assert (sum([isinstance(node.op, cuda.GpuElemwise)
for node in topo]) == len(topo) or
(nb_var == 1 and use_tan is False))
assert sum([isinstance(node.op, tensor.Elemwise)
for node in topo]) == 0
# let debugmode catch errors
f()
def test_local_gpu_elemwise_0():
"""
Test local_gpu_elemwise_0 when there is a dtype upcastable to float32
"""
a = tensor.bmatrix()
b = tensor.fmatrix()
c = tensor.fmatrix()
a_v = (numpy.random.rand(4, 5) * 10).astype("int8")
b_v = (numpy.random.rand(4, 5) * 10).astype("float32")
c_v = (numpy.random.rand(4, 5) * 10).astype("float32")
# Due to optimization order, this composite is created when all
# the op are on the gpu.
f = theano.function([a, b, c], a + b + c, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, cuda.GpuElemwise) for node in topo) == 1
assert sum(isinstance(node.op, tensor.Elemwise) for node in topo) == 1
utt.assert_allclose(f(a_v, b_v, c_v), a_v + b_v + c_v)
# Now test with the composite already on the cpu before we move it
# to the gpu
a_s = theano.scalar.int8()
b_s = theano.scalar.float32()
c_s = theano.scalar.float32()
out_s = theano.scalar.Composite([a_s, b_s, c_s], [a_s + b_s + c_s])
out_op = tensor.Elemwise(out_s)
f = theano.function([a, b, c], out_op(a, b, c), mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, cuda.GpuElemwise) for node in topo) == 1
assert sum(isinstance(node.op, tensor.Elemwise) for node in topo) == 1
utt.assert_allclose(f(a_v, b_v, c_v), a_v + b_v + c_v)
# Test multiple output
a_s = theano.scalar.float32()
a = tensor.fmatrix()
from theano.scalar.basic import identity
out_s = theano.scalar.Composite([a_s, b_s, c_s],
[identity(a_s), identity(c_s), identity(b_s)])
outs_op = tensor.Elemwise(out_s)
f = theano.function([a, b, c], outs_op(a, b, c), mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, cuda.GpuElemwise) for node in topo) == 1
assert sum(isinstance(node.op, tensor.Elemwise) for node in topo) == 0
out = f(a_v, b_v, c_v)
utt.assert_allclose(out[0], a_v)
utt.assert_allclose(out[1], c_v)
utt.assert_allclose(out[2], b_v)
# Test multiple output
out_s = theano.scalar.Composite([a_s, b_s, c_s], [a_s + b_s, a_s * c_s])
outs_op = tensor.Elemwise(out_s)
f = theano.function([a, b, c], outs_op(a, b, c), mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, cuda.GpuElemwise) for node in topo) == 1
assert sum(isinstance(node.op, tensor.Elemwise) for node in topo) == 0
out = f(a_v, b_v, c_v)
utt.assert_allclose(out[0], a_v + b_v)
utt.assert_allclose(out[1], a_v * c_v)
# Test non-contiguous input
c = cuda.shared_constructor(c_v)
f = theano.function([a, b], outs_op(a[::2], b[::2], c[::2]),
mode=mode_with_gpu)
out = f(a_v, b_v)
utt.assert_allclose(out[0], a_v[::2] + b_v[::2])
utt.assert_allclose(out[1], a_v[::2] * c_v[::2])
def test_elemwise_fusion():
""" Test the the GpuElemwise fusion work correctly"""
shape = (3, 4)
a = cuda.shared_constructor(theano._asarray(numpy.random.rand(*shape),
dtype='float32'), 'a')
b = tensor.fmatrix()
c = tensor.fmatrix()
f = pfunc([b, c], [a + b + c], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
for i, node in enumerate(topo):
print(i, node, file=sys.stdout)
assert len(topo) == 4
assert isinstance(topo[2].op.scalar_op, theano.scalar.basic.Composite)
# let debugmode catch errors
f(theano._asarray(numpy.random.rand(*shape), dtype='float32'),
theano._asarray(numpy.random.rand(*shape), dtype='float32'))
import theano.tests.test_ifelse
class TestIfElse(theano.tests.test_ifelse.test_ifelse):
dtype = "float32"
mode = mode_with_gpu
cast_output = staticmethod(basic_ops.as_cuda_ndarray_variable)
shared = staticmethod(cuda.shared_constructor)
def get_ifelse(self, n):
return theano.ifelse.IfElse(n, gpu=True, as_view=True)
def test_incsubtensor_mixed():
# This catches a bug that occurred when incrementing
# a float32 tensor by a float64 tensor.
# The result is defined to be float32, so it is OK
# to downcast the float64 increment in order to
# transfer it to the GPU.
# The bug was that the optimization called GpuFromHost
# without casting first, causing the optimization to
# fail.
X = tensor.fmatrix()
Y = tensor.dmatrix()
Z = tensor.inc_subtensor(X[0:1, 0:1], Y)
f = theano.function([X, Y], Z, mode=mode_with_gpu)
packed, = f.maker.fgraph.inputs[1].clients
client, idx = packed
print(client)
assert isinstance(client.op, tensor.Elemwise)
assert isinstance(client.op.scalar_op, theano.scalar.Cast)
packed, = client.outputs[0].clients
client, idx = packed
assert isinstance(client.op, cuda.GpuFromHost)
def test_erfinvgpu():
""" Test that local_gpu_elemwise_0 replaces Erfinv with ErfinvGPU """
x = tensor.fmatrix()
f = theano.function([x], tensor.Elemwise(erfinv)(x), mode=mode_with_gpu)
f2 = theano.function([x], tensor.Elemwise(erfinv)(x),
mode=mode_without_gpu)
assert isinstance(f.maker.fgraph.toposort()[1].op, cuda.GpuElemwise)
assert isinstance(f.maker.fgraph.toposort()[1].op.scalar_op,
cuda.elemwise.ErfinvGPU)
xv = numpy.random.rand(7, 8).astype('float32')
assert numpy.allclose(f(xv), f2(xv))
def test_local_gpu_solve():
if not cula.cula_available:
raise SkipTest('Optional dependency CULA not available')
numpy.random.seed(1)
def cmp(a_shp, b_shp):
a0 = numpy.random.uniform(-0.4, 0.4,
a_shp).astype('float32')
a = cuda.shared_constructor(a0, 'a')
b0 = numpy.random.uniform(-0.4, 0.4,
b_shp).astype('float32')
b = cuda.shared_constructor(b0, 'b')
f = pfunc([], tensor.slinalg.solve(a, b), mode=mode_with_gpu)
assert isinstance(f.maker.fgraph.toposort()[1].inputs[0].owner.op,
cuda.cula.GpuSolve)
assert cuda.opt.local_gpu_solve.transform(
tensor.slinalg.solve(a, b).owner)
out = f()
assert numpy.allclose(numpy.dot(a0, out), b0)
cmp((6, 6), (6, 1))
cmp((5, 5), (5, 1))
def test_local_gpu_dot_to_dot22dot():
def cmp(a_shp, b_shp):
a0 = numpy.random.rand(*a_shp).astype('float32')
a = cuda.shared_constructor(a0, 'a')
b0 = numpy.random.rand(*b_shp).astype('float32')
b = cuda.shared_constructor(b0, 'b')
f = pfunc([], tensor.dot(a, b), mode=mode_with_gpu)
assert cuda.opt.local_gpu_dot_to_dot22.transform(
tensor.dot(a, b).owner)
out = f()
assert numpy.allclose(numpy.dot(a0, b0), out)
# Try with a matrix equal to a0, but with strides in both dims
a.set_value(a0)
a.set_value(
a.get_value(borrow=True,
return_internal_type=True)[::-1],
borrow=True)
f()
cmp((4,), (4, 5))
cmp((3, 4), (4,))
def test_blocksparse_gpu_gemv_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], o, mode=mode_with_gpu)
assert isinstance(f.maker.fgraph.toposort()[-2].op, GpuSparseBlockGemv)
def test_blocksparse_gpu_outer_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], [o, tensor.grad(o.sum(),
wrt=W)],
mode=mode_with_gpu)
assert isinstance(f.maker.fgraph.toposort()[-2].op, GpuSparseBlockOuter)
class test_diag(theano.tensor.tests.test_nlinalg.test_diag):
mode = mode_with_gpu
shared = staticmethod(cuda.shared_constructor)
floatX = 'float32'
type = CudaNdarrayType
def __init__(self, name):
super(theano.tensor.tests.test_nlinalg.test_diag,
self).__init__(name)
class Test_GpuReshape(test_opt.Test_Reshape):
def setUp(self):
self.mode = mode_with_gpu
self.op = basic_ops.GpuReshape
def test_local_abstractconv_gemm():
""" We test it here as this is the optimization only that we test.
This test gh-4036"""
image = tensor.ftensor4()
W = tensor.ftensor4()
conv = tensor.nnet.conv2d(image,
W,
input_shape=(1, 32, 32, 32),
filter_shape=(32, 32, 3, 3),
border_mode='half')
f = theano.function([image, W], [conv], mode=mode_with_gpu)
f(numpy.random.rand(1, 32, 32, 32).astype('float32'),
numpy.random.rand(32, 32, 3, 3).astype('float32'))
if __name__ == '__main__':
test_gpualloc()
test_opt_gpujoin_onlyajoin()
test_opt_gpujoin_joinvectors_elemwise_then_minusone()
test_opt_gpujoin_joinvectors_negativeaxes()
| 37.164586 | 225 | 0.636033 |
1bd78e46affdd1b3871e371ba147694a446cd797 | 8,587 | py | Python | third_party/oauth2client/tools.py | asysc2020/contentbox | 5c155976e0ce7ea308d62293ab89624d97b21d09 | [
"Apache-2.0"
] | 39 | 2015-06-10T23:18:07.000Z | 2021-10-21T04:29:06.000Z | third_party/oauth2client/tools.py | asysc2020/contentbox | 5c155976e0ce7ea308d62293ab89624d97b21d09 | [
"Apache-2.0"
] | 2 | 2016-08-22T12:38:10.000Z | 2017-01-26T18:37:33.000Z | third_party/oauth2client/tools.py | asysc2020/contentbox | 5c155976e0ce7ea308d62293ab89624d97b21d09 | [
"Apache-2.0"
] | 26 | 2015-06-10T22:09:15.000Z | 2021-06-27T15:45:15.000Z | # Copyright (C) 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = ['argparser', 'run_flow', 'run', 'message_if_missing']
import BaseHTTPServer
import argparse
import httplib2
import logging
import os
import socket
import sys
import webbrowser
from oauth2client import client
from oauth2client import file
from oauth2client import util
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
_CLIENT_SECRETS_MESSAGE = """WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
"""
# run_parser is an ArgumentParser that contains command-line options expected
# by tools.run(). Pass it in as part of the 'parents' argument to your own
# ArgumentParser.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument('--auth_host_name', default='localhost',
help='Hostname when running a local web server.')
argparser.add_argument('--noauth_local_webserver', action='store_true',
default=False, help='Do not run a local web server.')
argparser.add_argument('--auth_host_port', default=[8080, 8090], type=int,
nargs='*', help='Port web server should listen on.')
argparser.add_argument('--logging_level', default='ERROR',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR',
'CRITICAL'],
help='Set the logging level of detail.')
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(s):
"""Handle a GET request.
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
query = s.path.split('?', 1)[-1]
query = dict(parse_qsl(query))
s.server.query_params = query
s.wfile.write("<html><head><title>Authentication Status</title></head>")
s.wfile.write("<body><p>The authentication flow has completed.</p>")
s.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
@util.positional(3)
def run_flow(flow, storage, flags, http=None):
"""Core code for a command-line application.
The run() function is called from your application and runs through all the
steps to obtain credentials. It takes a Flow argument and attempts to open an
authorization server page in the user's default web browser. The server asks
the user to grant your application access to the user's data. If the user
grants access, the run() function returns new credentials. The new credentials
are also stored in the Storage argument, which updates the file associated
with the Storage object.
It presumes it is run from a command-line application and supports the
following flags:
--auth_host_name: Host name to use when running a local web server
to handle redirects during OAuth authorization.
(default: 'localhost')
--auth_host_port: Port to use when running a local web server to handle
redirects during OAuth authorization.;
repeat this option to specify a list of values
(default: '[8080, 8090]')
(an integer)
--[no]auth_local_webserver: Run a local web server to handle redirects
during OAuth authorization.
(default: 'true')
The tools module defines an ArgumentParser the already contains the flag
definitions that run() requires. You can pass that ArgumentParser to your
ArgumentParser constructor:
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.run_parser])
flags = parser.parse_args(argv)
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
flags: argparse.ArgumentParser, the command-line flags.
http: An instance of httplib2.Http.request
or something that acts like it.
Returns:
Credentials, the obtained credential.
"""
logging.getLogger().setLevel(getattr(logging, flags.logging_level))
if not flags.noauth_local_webserver:
success = False
port_number = 0
for port in flags.auth_host_port:
port_number = port
try:
httpd = ClientRedirectServer((flags.auth_host_name, port),
ClientRedirectHandler)
except socket.error, e:
pass
else:
success = True
break
flags.noauth_local_webserver = not success
if not success:
print 'Failed to start a local webserver listening on either port 8080'
print 'or port 9090. Please check your firewall settings and locally'
print 'running programs that may be blocking or using those ports.'
print
print 'Falling back to --noauth_local_webserver and continuing with',
print 'authorization.'
print
if not flags.noauth_local_webserver:
oauth_callback = 'http://%s:%s/' % (flags.auth_host_name, port_number)
else:
oauth_callback = client.OOB_CALLBACK_URN
flow.redirect_uri = oauth_callback
authorize_url = flow.step1_get_authorize_url()
if not flags.noauth_local_webserver:
webbrowser.open(authorize_url, new=1, autoraise=True)
print 'Your browser has been opened to visit:'
print
print ' ' + authorize_url
print
print 'If your browser is on a different machine then exit and re-run this'
print 'application with the command-line parameter '
print
print ' --noauth_local_webserver'
print
else:
print 'Go to the following link in your browser:'
print
print ' ' + authorize_url
print
code = None
if not flags.noauth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
print 'Failed to find "code" in the query parameters of the redirect.'
sys.exit('Try running with --noauth_local_webserver.')
else:
code = raw_input('Enter verification code: ').strip()
try:
credential = flow.step2_exchange(code, http=http)
except client.FlowExchangeError, e:
sys.exit('Authentication has failed: %s' % e)
storage.put(credential)
credential.set_store(storage)
print 'Authentication successful.'
return credential
def message_if_missing(filename):
"""Helpful message to display if the CLIENT_SECRETS file is missing."""
return _CLIENT_SECRETS_MESSAGE % filename
try:
from old_run import run
from old_run import FLAGS
except ImportError:
def run(*args, **kwargs):
raise NotImplementedError(
'The gflags library must be installed to use tools.run(). '
'Please install gflags or preferrably switch to using '
'tools.run_flow().')
| 35.192623 | 81 | 0.691278 |
92abea78f3dbfae0bf6417d051884922b349c432 | 3,503 | py | Python | keras_retinanet/backend/tensorflow_backend.py | pauldes/keras-retinanet | 097600af1d4720f491582cfa1ed12d9a2d2d8096 | [
"Apache-2.0"
] | null | null | null | keras_retinanet/backend/tensorflow_backend.py | pauldes/keras-retinanet | 097600af1d4720f491582cfa1ed12d9a2d2d8096 | [
"Apache-2.0"
] | null | null | null | keras_retinanet/backend/tensorflow_backend.py | pauldes/keras-retinanet | 097600af1d4720f491582cfa1ed12d9a2d2d8096 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tensorflow
def ones(*args, **kwargs):
""" See https://www.tensorflow.org/versions/master/api_docs/python/tf/ones .
"""
return tensorflow.ones(*args, **kwargs)
def transpose(*args, **kwargs):
""" See https://www.tensorflow.org/versions/master/api_docs/python/tf/transpose .
"""
return tensorflow.transpose(*args, **kwargs)
def map_fn(*args, **kwargs):
""" See https://www.tensorflow.org/versions/master/api_docs/python/tf/map_fn .
"""
return tensorflow.map_fn(*args, **kwargs)
def pad(*args, **kwargs):
""" See https://www.tensorflow.org/versions/master/api_docs/python/tf/pad .
"""
return tensorflow.pad(*args, **kwargs)
def top_k(*args, **kwargs):
""" See https://www.tensorflow.org/versions/master/api_docs/python/tf/nn/top_k .
"""
return tensorflow.nn.top_k(*args, **kwargs)
def clip_by_value(*args, **kwargs):
""" See https://www.tensorflow.org/versions/master/api_docs/python/tf/clip_by_value .
"""
return tensorflow.clip_by_value(*args, **kwargs)
def resize_images(images, size, method='bilinear', align_corners=False):
""" See https://www.tensorflow.org/versions/master/api_docs/python/tf/image/resize_images .
Args
method: The method used for interpolation. One of ('bilinear', 'nearest', 'bicubic', 'area').
"""
methods = {
'bilinear': tensorflow.image.ResizeMethod.BILINEAR,
'nearest' : tensorflow.image.ResizeMethod.NEAREST_NEIGHBOR,
'bicubic' : tensorflow.image.ResizeMethod.BICUBIC,
'area' : tensorflow.image.ResizeMethod.AREA,
}
return tensorflow.image.resize_images(images, size, methods[method], align_corners)
def non_max_suppression(*args, **kwargs):
""" See https://www.tensorflow.org/versions/master/api_docs/python/tf/image/non_max_suppression .
"""
return tensorflow.image.non_max_suppression(*args, **kwargs)
def range(*args, **kwargs):
""" See https://www.tensorflow.org/versions/master/api_docs/python/tf/range .
"""
return tensorflow.range(*args, **kwargs)
def scatter_nd(*args, **kwargs):
""" See https://www.tensorflow.org/versions/master/api_docs/python/tf/scatter_nd .
"""
return tensorflow.scatter_nd(*args, **kwargs)
def gather_nd(*args, **kwargs):
""" See https://www.tensorflow.org/versions/master/api_docs/python/tf/gather_nd .
"""
return tensorflow.gather_nd(*args, **kwargs)
def meshgrid(*args, **kwargs):
""" See https://www.tensorflow.org/versions/master/api_docs/python/tf/meshgrid .
"""
return tensorflow.meshgrid(*args, **kwargs)
def where(*args, **kwargs):
""" See https://www.tensorflow.org/versions/master/api_docs/python/tf/where .
"""
return tensorflow.where(*args, **kwargs)
def unstack(*args, **kwargs):
""" See https://www.tensorflow.org/api_docs/python/tf/unstack .
"""
return tensorflow.unstack(*args, **kwargs)
| 31.558559 | 101 | 0.693977 |
5592f9552a6485110da7f9759464210699a1ff2c | 978 | py | Python | rules/php/CVI_1014.py | kingsec666/Cobra-W | 322e7f04a929f48bd67242eb557ca23b176ffdbb | [
"MIT"
] | 17 | 2018-12-09T01:57:53.000Z | 2021-07-14T07:25:17.000Z | rules/php/CVI_1014.py | zer0yu/Cobra-W | 7e8b5b99eac3bbfb6a8f8a6c822689498c7381ea | [
"MIT"
] | 1 | 2021-11-15T17:52:13.000Z | 2021-11-15T17:52:13.000Z | rules/php/CVI_1014.py | devsecops-SRC/Cobra-W | 322e7f04a929f48bd67242eb557ca23b176ffdbb | [
"MIT"
] | 3 | 2018-11-07T14:39:16.000Z | 2021-03-18T01:39:26.000Z | # -*- coding: utf-8 -*-
"""
CVI-1014
~~~~
variable shadowing
:author: LoRexxar <LoRexxar@gmail.com>
:homepage: https://github.com/LoRexxar/cobra
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 LoRexxar. All rights reserved
"""
from cobra.file import file_grep
class CVI_1014():
"""
rule class
"""
def __init__(self):
self.svid = 1014
self.language = "PHP"
self.author = "LoRexxar/wufeifei"
self.vulnerability = "variable shadowing"
self.description = "variable shadowing"
# status
self.status = True
# 部分配置
self.match_mode = "function-param-regex"
self.match = "import_request_variables|parse_str|mb_parse_str|extract"
self.vul_function = None
def main(self, regex_string):
"""
regex string input
:regex_string: regex match string
:return:
"""
pass
| 21.26087 | 78 | 0.592025 |
dcaff7c7758dba88235974a62bfe01268572e3e0 | 3,753 | py | Python | avm_client/exceptions.py | ENBISYS/avmPython | daa3686ea431e752687c915e2f5b6f65b6c77130 | [
"Unlicense"
] | null | null | null | avm_client/exceptions.py | ENBISYS/avmPython | daa3686ea431e752687c915e2f5b6f65b6c77130 | [
"Unlicense"
] | null | null | null | avm_client/exceptions.py | ENBISYS/avmPython | daa3686ea431e752687c915e2f5b6f65b6c77130 | [
"Unlicense"
] | null | null | null | # coding: utf-8
"""
AVM
This is api for AVM (automated valuation machine) # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: info@enbisys.com
Generated by: https://openapi-generator.tech
"""
import six
class OpenApiException(Exception):
"""The base exception class for all OpenAPIExceptions"""
class ApiTypeError(OpenApiException, TypeError):
def __init__(self, msg, path_to_item=None, valid_classes=None,
key_type=None):
""" Raises an exception for TypeErrors
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list): a list of keys an indices to get to the
current_item
None if unset
valid_classes (tuple): the primitive classes that current item
should be an instance of
None if unset
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
None if unset
"""
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg)
class ApiValueError(OpenApiException, ValueError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list) the path to the exception in the
received_data dict. None if unset
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiValueError, self).__init__(full_msg)
class ApiKeyError(OpenApiException, KeyError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg)
class ApiException(OpenApiException):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
def render_path(path_to_item):
"""Returns a string representation of a path"""
result = ""
for pth in path_to_item:
if isinstance(pth, six.integer_types):
result += "[{0}]".format(pth)
else:
result += "['{0}']".format(pth)
return result
| 30.762295 | 74 | 0.568878 |
9f73291db98b0f088dde14e0040f83879e60fc89 | 3,655 | py | Python | LocalDataStore/main_logic.py | VamsiMudaliar/File-Based-Key-Valued-DataStore- | 36f39476e521c1a75970a32465ff829c7e218920 | [
"MIT"
] | 1 | 2020-12-31T17:14:56.000Z | 2020-12-31T17:14:56.000Z | LocalDataStore/main_logic.py | VamsiMudaliar/File-Based-Key-Valued-DataStore- | 36f39476e521c1a75970a32465ff829c7e218920 | [
"MIT"
] | null | null | null | LocalDataStore/main_logic.py | VamsiMudaliar/File-Based-Key-Valued-DataStore- | 36f39476e521c1a75970a32465ff829c7e218920 | [
"MIT"
] | 1 | 2021-01-21T08:33:47.000Z | 2021-01-21T08:33:47.000Z | import time
from threading import Thread
import json
from json.decoder import JSONDecodeError
FILE_LIMIT = 1024*1024*1024 # 1GB
KEY_LIMIT = 32 # max 32 characters
VALUE_LIMIT = 16*1024*1024 #16KB
MAX_CONNECTIONS = 1 # no of connections
DEFAULT_LOCATION = "./"
cache={} # our local DB
PATH_FORMED = DEFAULT_LOCATION + "new_obj.json"
class LocalStorageDB():
def __init__(self):
pass
def create(self,key,value,time_to_live=0):
# load on the data from a json file
try:
with open(PATH_FORMED) as f:
old_cache=json.load(f)
new_cache=old_cache
if key in new_cache:
print(f"ERROR : Key {key} Already Present...")
elif key.isalpha():
curr_record=[]
if len(cache)<FILE_LIMIT and value<=VALUE_LIMIT:
if time_to_live==0:
#set to default
curr_record=[value,time_to_live]
else:
curr_record=[value,time.time()+time_to_live]
# check if key is atmost 32 chars
if len(key)<=KEY_LIMIT:
new_cache[key]=curr_record
with open(PATH_FORMED,"w") as fl:
json.dump(new_cache,fl,indent=2)
print(f"Key {key} Inserted Successfully.")
else:
print("ERROR : Memory Limit Exceed ! \n Note : File size and inserting value should be no more than 1GB and 16KB ")
else:
print(f"ERROR : Key Name not Valid ! \n Note : Key Name should only contain alphabets and can be at max {KEY_LIMIT} size")
#print(f"Note : Key Name should only contain alphabets and can be at max {KEY_LIMIT} size")
except JSONDecodeError as er:
pass
# READ Operation in the file
def read(self,key):
try:
with open(PATH_FORMED) as f:
old_cache=json.load(f)
new_cache=old_cache
if key not in new_cache:
print("ERROR : Key Not Found in Database ! ")
else:
store_tmp = new_cache[key]
pattern = "{ " + str(key)+": "+str(store_tmp[0]) + " }"
if store_tmp[1]!=0:
if time.time() < store_tmp[1]:
return pattern
else:
print("ERROR : Timer Expired Key No Longer Present in Database")
else:
return pattern
except JSONDecodeError as er:
pass
def delete(self,key):
try:
with open(PATH_FORMED) as f:
old_cache=json.load(f)
new_cache=old_cache
if key not in new_cache:
print(f"ERROR : Key {key} Not Found in Database \n")
else:
store_tmp = new_cache[key]
if store_tmp[1]!=0:
if time.time()<store_tmp[1]:
del new_cache[key]
print(f"Key {key} Deleted... \n")
else:
print(f"ERROR : Couldn't Delete, Key {key} has Already Expired ... \n")
else:
del new_cache[key]
print(f"Key {key} Deleted... \n")
with open(PATH_FORMED,'w') as f:
json.dump(new_cache,f,indent=2)
except JSONDecodeError as er:
pass
# Update Function to Update the key Value before it expires
def update(self,key , value, timer=0):
try:
with open(PATH_FORMED) as ft:
old_cache = json.load(ft)
new_cache=old_cache
if key not in new_cache:
print("ERROR : Key Not Found in Database")
else:
store_tmp = new_cache[key]
if time.time()<store_tmp[1] or store_tmp[1]==0:
if timer>0:
new_value = [value,timer]
else:
new_value = [value,store_tmp[1]]
new_cache[key] = new_value
with open(PATH_FORMED,"w") as fte:
json.dump(new_cache,fte,indent=2)
print("Values Updated..")
else:
print("ERROR : Couldn't Update, Key has Already Expired \n")
except JSONDecodeError as er:
pass
| 21.755952 | 128 | 0.617237 |
eb96981c5ab5df6f3a806738df818f322c9df94f | 15,441 | py | Python | live/RpycHost/rpyc/core/stream.py | faroit/midihack | 5ce927576c4967499e164d95262a8391173c23ab | [
"CC0-1.0"
] | 4 | 2016-03-13T14:14:21.000Z | 2016-09-11T11:54:36.000Z | live/RpycHost/rpyc/core/stream.py | faroit/midihack | 5ce927576c4967499e164d95262a8391173c23ab | [
"CC0-1.0"
] | null | null | null | live/RpycHost/rpyc/core/stream.py | faroit/midihack | 5ce927576c4967499e164d95262a8391173c23ab | [
"CC0-1.0"
] | 1 | 2018-09-09T08:37:44.000Z | 2018-09-09T08:37:44.000Z | """
An abstraction layer over OS-dependent file-like objects, that provides a
consistent view of a *duplex byte stream*.
"""
import sys
import os
import socket
import time
import errno
from rpyc.lib import safe_import
from rpyc.lib.compat import select, select_error, BYTES_LITERAL, get_exc_errno, maxint
win32file = safe_import("win32file")
win32pipe = safe_import("win32pipe")
msvcrt = safe_import("msvcrt")
ssl = safe_import("ssl")
retry_errnos = (errno.EAGAIN, errno.EWOULDBLOCK)
class Stream(object):
"""Base Stream"""
__slots__ = ()
def close(self):
"""closes the stream, releasing any system resources associated with it"""
raise NotImplementedError()
@property
def closed(self):
"""tests whether the stream is closed or not"""
raise NotImplementedError()
def fileno(self):
"""returns the stream's file descriptor"""
raise NotImplementedError()
def poll(self, timeout):
"""indicates whether the stream has data to read (within *timeout*
seconds)"""
try:
rl, _, _ = select([self], [], [], timeout)
except ValueError:
# i get this some times: "ValueError: file descriptor cannot be a negative integer (-1)"
# let's translate it to select.error
ex = sys.exc_info()[1]
raise select_error(str(ex))
return bool(rl)
def read(self, count):
"""reads **exactly** *count* bytes, or raise EOFError
:param count: the number of bytes to read
:returns: read data
"""
raise NotImplementedError()
def write(self, data):
"""writes the entire *data*, or raise EOFError
:param data: a string of binary data
"""
raise NotImplementedError()
class ClosedFile(object):
"""Represents a closed file object (singleton)"""
__slots__ = ()
def __getattr__(self, name):
if name.startswith("__"): # issue 71
raise AttributeError("stream has been closed")
raise EOFError("stream has been closed")
def close(self):
pass
@property
def closed(self):
return True
def fileno(self):
raise EOFError("stream has been closed")
ClosedFile = ClosedFile()
class SocketStream(Stream):
"""A stream over a socket"""
__slots__ = ("sock",)
MAX_IO_CHUNK = 8000
def __init__(self, sock):
self.sock = sock
@classmethod
def _connect(cls, host, port, family = socket.AF_INET, socktype = socket.SOCK_STREAM,
proto = 0, timeout = 3, nodelay = False):
s = socket.socket(family, socktype, proto)
s.settimeout(timeout)
s.connect((host, port))
if nodelay:
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return s
@classmethod
def connect(cls, host, port, **kwargs):
"""factory method that creates a ``SocketStream`` over a socket connected
to *host* and *port*
:param host: the host name
:param port: the TCP port
:param kwargs: additional keyword arguments: ``family``, ``socktype``,
``proto``, ``timeout``, ``nodelay``, passed directly to
the ``socket`` constructor, or ``ipv6``.
:param ipv6: if True, creates an IPv6 socket (``AF_INET6``); otherwise
an IPv4 (``AF_INET``) socket is created
:returns: a :class:`SocketStream`
"""
if kwargs.pop("ipv6", False):
kwargs["family"] = socket.AF_INET6
return cls(cls._connect(host, port, **kwargs))
@classmethod
def unix_connect(cls, path, timeout = 3):
"""factory method that creates a ``SocketStream `` over a unix domain socket
located in *path*
:param path: the path to the unix domain socket
:param timeout: socket timeout
"""
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.settimeout(timeout)
s.connect(path)
return cls(s)
@classmethod
def ssl_connect(cls, host, port, ssl_kwargs, **kwargs):
"""factory method that creates a ``SocketStream`` over an SSL-wrapped
socket, connected to *host* and *port* with the given credentials.
:param host: the host name
:param port: the TCP port
:param ssl_kwargs: a dictionary of keyword arguments to be passed
directly to ``ssl.wrap_socket``
:param kwargs: additional keyword arguments: ``family``, ``socktype``,
``proto``, ``timeout``, ``nodelay``, passed directly to
the ``socket`` constructor, or ``ipv6``.
:param ipv6: if True, creates an IPv6 socket (``AF_INET6``); otherwise
an IPv4 (``AF_INET``) socket is created
:returns: a :class:`SocketStream`
"""
if kwargs.pop("ipv6", False):
kwargs["family"] = socket.AF_INET6
s = cls._connect(host, port, **kwargs)
s2 = ssl.wrap_socket(s, **ssl_kwargs)
return cls(s2)
@property
def closed(self):
return self.sock is ClosedFile
def close(self):
if not self.closed:
try:
self.sock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
self.sock.close()
self.sock = ClosedFile
def fileno(self):
try:
return self.sock.fileno()
except socket.error:
self.close()
ex = sys.exc_info()[1]
if get_exc_errno(ex) == errno.EBADF:
raise EOFError()
else:
raise
def read(self, count):
data = []
while count > 0:
try:
buf = self.sock.recv(min(self.MAX_IO_CHUNK, count))
except socket.timeout:
continue
except socket.error:
ex = sys.exc_info()[1]
if get_exc_errno(ex) in retry_errnos:
# windows just has to be a bitch
continue
self.close()
raise EOFError(ex)
if not buf:
self.close()
raise EOFError("connection closed by peer")
data.append(buf)
count -= len(buf)
return BYTES_LITERAL("").join(data)
def write(self, data):
try:
while data:
count = self.sock.send(data[:self.MAX_IO_CHUNK])
data = data[count:]
except socket.error:
ex = sys.exc_info()[1]
self.close()
raise EOFError(ex)
class TunneledSocketStream(SocketStream):
"""A socket stream over an :class:`rpyc.utils.ssh.SshTunnel`"""
__slots__ = ("tun",)
def __init__(self, sock):
self.sock = sock
self.tun = None
def close(self):
SocketStream.close(self)
if self.tun:
self.tun.close()
class PipeStream(Stream):
"""A stream over two simplex pipes (one used to input, another for output)"""
__slots__ = ("incoming", "outgoing")
MAX_IO_CHUNK = 32000
def __init__(self, incoming, outgoing):
outgoing.flush()
self.incoming = incoming
self.outgoing = outgoing
@classmethod
def from_std(cls):
"""factory method that creates a PipeStream over the standard pipes
(``stdin`` and ``stdout``)
:returns: a :class:`PipeStream` instance
"""
return cls(sys.stdin, sys.stdout)
@classmethod
def create_pair(cls):
"""factory method that creates two pairs of anonymous pipes, and
creates two PipeStreams over them. Useful for ``fork()``.
:returns: a tuple of two :class:`PipeStream` instances
"""
r1, w1 = os.pipe()
r2, w2 = os.pipe()
side1 = cls(os.fdopen(r1, "rb"), os.fdopen(w2, "wb"))
side2 = cls(os.fdopen(r2, "rb"), os.fdopen(w1, "wb"))
return side1, side2
@property
def closed(self):
return self.incoming is ClosedFile
def close(self):
self.incoming.close()
self.outgoing.close()
self.incoming = ClosedFile
self.outgoing = ClosedFile
def fileno(self):
return self.incoming.fileno()
def read(self, count):
data = []
try:
while count > 0:
buf = os.read(self.incoming.fileno(), min(self.MAX_IO_CHUNK, count))
if not buf:
raise EOFError("connection closed by peer")
data.append(buf)
count -= len(buf)
except EOFError:
self.close()
raise
except EnvironmentError:
ex = sys.exc_info()[1]
self.close()
raise EOFError(ex)
return BYTES_LITERAL("").join(data)
def write(self, data):
try:
while data:
chunk = data[:self.MAX_IO_CHUNK]
written = os.write(self.outgoing.fileno(), chunk)
data = data[written:]
except EnvironmentError:
ex = sys.exc_info()[1]
self.close()
raise EOFError(ex)
class Win32PipeStream(Stream):
"""A stream over two simplex pipes (one used to input, another for output).
This is an implementation for Windows pipes (which suck)"""
__slots__ = ("incoming", "outgoing", "_fileno", "_keepalive")
PIPE_BUFFER_SIZE = 130000
MAX_IO_CHUNK = 32000
def __init__(self, incoming, outgoing):
self._keepalive = (incoming, outgoing)
if hasattr(incoming, "fileno"):
self._fileno = incoming.fileno()
incoming = msvcrt.get_osfhandle(incoming.fileno())
if hasattr(outgoing, "fileno"):
outgoing = msvcrt.get_osfhandle(outgoing.fileno())
self.incoming = incoming
self.outgoing = outgoing
@classmethod
def from_std(cls):
return cls(sys.stdin, sys.stdout)
@classmethod
def create_pair(cls):
r1, w1 = win32pipe.CreatePipe(None, cls.PIPE_BUFFER_SIZE)
r2, w2 = win32pipe.CreatePipe(None, cls.PIPE_BUFFER_SIZE)
return cls(r1, w2), cls(r2, w1)
def fileno(self):
return self._fileno
@property
def closed(self):
return self.incoming is ClosedFile
def close(self):
if self.closed:
return
try:
win32file.CloseHandle(self.incoming)
except Exception:
pass
self.incoming = ClosedFile
try:
win32file.CloseHandle(self.outgoing)
except Exception:
pass
self.outgoing = ClosedFile
def read(self, count):
try:
data = []
while count > 0:
dummy, buf = win32file.ReadFile(self.incoming, int(min(self.MAX_IO_CHUNK, count)))
count -= len(buf)
data.append(buf)
except TypeError:
ex = sys.exc_info()[1]
if not self.closed:
raise
raise EOFError(ex)
except win32file.error:
ex = sys.exc_info()[1]
self.close()
raise EOFError(ex)
return BYTES_LITERAL("").join(data)
def write(self, data):
try:
while data:
dummy, count = win32file.WriteFile(self.outgoing, data[:self.MAX_IO_CHUNK])
data = data[count:]
except TypeError:
ex = sys.exc_info()[1]
if not self.closed:
raise
raise EOFError(ex)
except win32file.error:
ex = sys.exc_info()[1]
self.close()
raise EOFError(ex)
def poll(self, timeout, interval = 0.1):
"""a poor man's version of select()"""
if timeout is None:
timeout = maxint
length = 0
tmax = time.time() + timeout
try:
while length == 0:
length = win32pipe.PeekNamedPipe(self.incoming, 0)[1]
if time.time() >= tmax:
break
time.sleep(interval)
except TypeError:
ex = sys.exc_info()[1]
if not self.closed:
raise
raise EOFError(ex)
return length != 0
class NamedPipeStream(Win32PipeStream):
"""A stream over two named pipes (one used to input, another for output).
Windows implementation."""
NAMED_PIPE_PREFIX = r'\\.\pipe\rpyc_'
PIPE_IO_TIMEOUT = 3
CONNECT_TIMEOUT = 3
__slots__ = ("is_server_side",)
def __init__(self, handle, is_server_side):
Win32PipeStream.__init__(self, handle, handle)
self.is_server_side = is_server_side
@classmethod
def from_std(cls):
raise NotImplementedError()
@classmethod
def create_pair(cls):
raise NotImplementedError()
@classmethod
def create_server(cls, pipename, connect = True):
"""factory method that creates a server-side ``NamedPipeStream``, over
a newly-created *named pipe* of the given name.
:param pipename: the name of the pipe. It will be considered absolute if
it starts with ``\\\\.``; otherwise ``\\\\.\\pipe\\rpyc``
will be prepended.
:param connect: whether to connect on creation or not
:returns: a :class:`NamedPipeStream` instance
"""
if not pipename.startswith("\\\\."):
pipename = cls.NAMED_PIPE_PREFIX + pipename
handle = win32pipe.CreateNamedPipe(
pipename,
win32pipe.PIPE_ACCESS_DUPLEX,
win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_READMODE_BYTE | win32pipe.PIPE_WAIT,
1,
cls.PIPE_BUFFER_SIZE,
cls.PIPE_BUFFER_SIZE,
cls.PIPE_IO_TIMEOUT * 1000,
None
)
inst = cls(handle, True)
if connect:
inst.connect_server()
return inst
def connect_server(self):
"""connects the server side of an unconnected named pipe (blocks
until a connection arrives)"""
if not self.is_server_side:
raise ValueError("this must be the server side")
win32pipe.ConnectNamedPipe(self.incoming, None)
@classmethod
def create_client(cls, pipename):
"""factory method that creates a client-side ``NamedPipeStream``, over
a newly-created *named pipe* of the given name.
:param pipename: the name of the pipe. It will be considered absolute if
it starts with ``\\\\.``; otherwise ``\\\\.\\pipe\\rpyc``
will be prepended.
:returns: a :class:`NamedPipeStream` instance
"""
if not pipename.startswith("\\\\."):
pipename = cls.NAMED_PIPE_PREFIX + pipename
handle = win32file.CreateFile(
pipename,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
0,
None,
win32file.OPEN_EXISTING,
0,
None
)
return cls(handle, False)
def close(self):
if self.closed:
return
if self.is_server_side:
win32file.FlushFileBuffers(self.outgoing)
win32pipe.DisconnectNamedPipe(self.outgoing)
Win32PipeStream.close(self)
if sys.platform == "win32":
PipeStream = Win32PipeStream
| 32.783439 | 100 | 0.568746 |
6b17f6957b8352a6bc63c504c285cdc81eeae51c | 583 | py | Python | utils/logger.py | denn-s/SimCLR | e2239ac52464b1271c3b8ad1ec4eb26f3b73c7d4 | [
"MIT"
] | 5 | 2020-08-24T17:57:51.000Z | 2021-06-06T18:18:19.000Z | utils/logger.py | denn-s/SimCLR | e2239ac52464b1271c3b8ad1ec4eb26f3b73c7d4 | [
"MIT"
] | null | null | null | utils/logger.py | denn-s/SimCLR | e2239ac52464b1271c3b8ad1ec4eb26f3b73c7d4 | [
"MIT"
] | 1 | 2020-08-29T00:35:36.000Z | 2020-08-29T00:35:36.000Z | import logging
def setup_logger(logger_name, log_file_path):
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
# create console handler
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
# create file handler
fh = logging.FileHandler(log_file_path)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
| 24.291667 | 89 | 0.703259 |
42d890ba20060b9b5e70ae72d8b1f482b27b2343 | 972 | py | Python | buildroot-2017.08/support/testing/tests/core/test_rootfs_overlay.py | xregist/v3s-linux-sdk | a2b013e3959662d65650a13fc23ec1cd503865eb | [
"Apache-2.0"
] | 4 | 2020-04-28T02:12:34.000Z | 2021-02-23T01:57:39.000Z | buildroot-2017.08/support/testing/tests/core/test_rootfs_overlay.py | xregist/v3s-linux-sdk | a2b013e3959662d65650a13fc23ec1cd503865eb | [
"Apache-2.0"
] | null | null | null | buildroot-2017.08/support/testing/tests/core/test_rootfs_overlay.py | xregist/v3s-linux-sdk | a2b013e3959662d65650a13fc23ec1cd503865eb | [
"Apache-2.0"
] | 5 | 2018-05-16T16:36:08.000Z | 2020-11-04T05:51:11.000Z | import os
import subprocess
import infra.basetest
def compare_file(file1, file2):
return subprocess.call(["cmp", file1, file2])
class TestRootfsOverlay(infra.basetest.BRTest):
rootfs_overlay_path = infra.filepath("tests/core/rootfs-overlay")
rootfs_overlay = "BR2_ROOTFS_OVERLAY=\"{0}1 {0}2\"".format(rootfs_overlay_path)
config = infra.basetest.BASIC_TOOLCHAIN_CONFIG + \
infra.basetest.MINIMAL_CONFIG + \
rootfs_overlay
def test_run(self):
target_file = os.path.join(self.builddir, "target", "test-file1")
overlay_file = "{}1/test-file1".format(self.rootfs_overlay_path)
ret = compare_file(overlay_file, target_file)
self.assertEqual(ret, 0)
target_file = os.path.join(self.builddir, "target", "etc", "test-file2")
overlay_file = "{}2/etc/test-file2".format(self.rootfs_overlay_path)
ret = compare_file(overlay_file, target_file)
self.assertEqual(ret, 0)
| 34.714286 | 83 | 0.691358 |
24e9209d50eeb5ca86fcebac9c8bfd73e193fe36 | 40,950 | py | Python | stdplugins/anime.py | aashiq075/PepeBot | 5f40f4316c84ec3875bcbcd476e10448f9214f31 | [
"Apache-2.0"
] | 1 | 2020-08-09T11:43:20.000Z | 2020-08-09T11:43:20.000Z | stdplugins/anime.py | aashiq075/PepeBot | 5f40f4316c84ec3875bcbcd476e10448f9214f31 | [
"Apache-2.0"
] | null | null | null | stdplugins/anime.py | aashiq075/PepeBot | 5f40f4316c84ec3875bcbcd476e10448f9214f31 | [
"Apache-2.0"
] | null | null | null | """
Random Anime Chooser Plugin for Userbot
usage = .{anime_genre} number of times(integer)
By : - @Zero_cool7870
"""
from telethon import events
import random
action_list = [
'6 Angels ',
'12 Beast ',
'Accel World ',
'Accel World: Infinite Burst ',
'Adventures of Kotetsu ',
'Afro Samurai ',
'Agent Aika ',
'Aika R-16: Virgin Mission ',
'Air Gear ',
'Air Master ',
'Akakichi no Eleven ',
'Akashic Records of Bastard Magic Instructor ',
'Akumetsu ',
'Alive: The Final Evolution ',
'All Purpose Cultural Cat-Girl Nuku Nuku DASH! ',
'Amakusa 1637 ',
'Amefurashi ',
'Angel/Dust ',
'Angel Links ',
"Angel's Feather ",
'Anne Freaks ',
'Apocalypse Zero ',
'Aquarion Evol ',
'Aquarion Logos ',
'Arc the Lad ',
'Aria the Scarlet Ammo ',
"Armed Girl's Machiavellism ",
'Armitage III ',
'Armored Trooper Votoms ',
'Armored Trooper Votoms: Pailsen Files ',
'Arpeggio of Blue Steel ',
'Ashizuri Suizokukan ',
'The Asterisk War ',
'Aventura (manga) ',
'B.B. (manga) ',
'Bakumatsu Gijinden Roman ',
'Bambi and Her Pink Gun ',
'Baoh ',
'Basquash! ',
'Bastard!! ',
'Bat-Manga!: The Secret History of Batman in Japan ',
'Battle Rabbits ',
'Beelzebub (manga) ',
'Ben-To ',
'Berserk (2016 TV series) ',
'Birdy the Mighty ',
'Birth (anime) ',
'Black Bullet ',
'Black God (manga) ',
'Blame! ',
'Blame! (film) ',
'Blassreiter ',
'Blood-C: The Last Dark ',
'Blood: The Last Vampire ',
'Blue Blink ',
'Blue Seed ',
'Blue Sheep Reverie ',
'Bogle (manga) ',
'Boruto: Naruto the Movie ',
'Brave 10 ',
'Broken Blade ',
'Brotherhood: Final Fantasy XV ',
'Btooom! ',
'Bubblegum Crisis ',
'Bungo Stray Dogs ',
'Burn Up! ',
'Burn-Up Excess ',
'Burn-Up Scramble ',
'Burn-Up W ',
'Butlers: Chitose Momotose Monogatari ',
'C (TV series) ',
'C3 (novel series) ',
'Campus Special Investigator Hikaruon ',
'Caravan Kidd ',
'The Castle of Cagliostro ',
'Cat Paradise ',
'A Certain Magical Index ',
'Chivalry of a Failed Knight ',
'Chōsoku Henkei Gyrozetter ',
'Chronos Ruler ',
'City Hunter ',
'Clockwork Planet ',
'Cluster Edge ',
'Comedy (2002 film) ',
'Coppelion ',
'Cowboy Bebop ',
'Cowboy Bebop: The Movie ',
'Crimson Spell ',
'Crown (manga) ',
'Crusher Joe ',
'D4 Princess ',
'The Dagger of Kamui ',
'Daigunder ',
'Dance in the Vampire Bund ',
'Daphne in the Brilliant Blue ',
'Darkside Blues ',
'Debutante Detective Corps ',
'Demon City Shinjuku ',
'Demonizer Zilch ',
'Dennō Bōkenki Webdiver ',
'Desert Punk ',
'The Devil of the Earth ',
'Devilman ',
'Dimension W ',
'DJ (2013 anime) ',
'Dog Days (Japanese TV series) ',
'Dragon Ball Z: Bardock – The Father of Goku ',
'Dragon Ball Z: The History of Trunks ',
'Dragon Crisis! ',
'Dream Eater Merry ',
'Durarara!! ',
'Dynamic Heroes ',
"E's ",
"Eden: It's an Endless World! ",
"Eden's Bowy ",
'Ehrgeiz (TV series) ',
'Elementalors ',
"The Enemy's the Pirates! ",
'Fairy Gone ',
'Final Fantasy: Unlimited ',
'Flag (TV series) ',
'FLCL ',
'Freesia (manga) ',
'Freezing (manga) ',
'Full Metal Panic! ',
"Full-Blast Science Adventure – So That's How It Is ",
'Futakoi Alternative ',
'G-On Riders ',
'Ga-Rei ',
'Gaist Crusher ',
'The Galaxy Railways ',
'Gantz ',
'Gantz: O ',
'Genesis of Aquarion ',
'Ghost in the Shell: Stand Alone Complex ',
'Giant Gorg ',
'Girls und Panzer ',
'Glass Maiden ',
'Gokudo the Adventurer ',
'Grenadier (manga) ',
'Grey (manga) ',
'Gulliver Boy ',
'Gunslinger Stratos: The Animation ',
'Guyver: The Bioboosted Armor ',
'Hajime no Ippo ',
'Hanako and the Terror of Allegory ',
'Hand Shakers ',
'Happy World! ',
'Hayate × Blade ',
'Hero Heel ',
'Hero Mask ',
'Hidamari no Ki ',
'Highlander: The Search for Vengeance ',
'Holy Talker ',
'Hyakka Ryōran ',
'Immortal Grand Prix ',
'Iron Virgin Jun ',
'The Irregular at Magic High School The Movie: The Girl Who Calls the Stars ',
'The Irregular at Magic High School ',
'Sword Oratoria ',
'Isuca ',
'Izetta: The Last Witch ',
'Japan (1992 manga) ',
'Jibaku-kun ',
'Jungle Book Shōnen Mowgli ',
'Jungle King Tar-chan ',
'Junk Force (manga) ',
'Junk: Record of the Last Hero ',
'Jushin Liger (TV series) ',
'The Kabocha Wine ',
'Kacchū no Senshi Gamu ',
'Kaiji (manga) ',
'Kamikaze (manga) ',
'Kamiyadori ',
'Kämpfer ',
'Kamui (manga series) ',
'Karakuri Circus ',
'Katanagatari ',
'Kaze ga Gotoku ',
'Kaze no Stigma ',
'Kemurikusa ',
'Kengan Ashura ',
'Kenka Shōbai ',
'Kick-Heart ',
'Kill la Kill ',
'The King Kong Show ',
'The King of Braves GaoGaiGar Final ',
'Kinnikuman ',
'Kishin Corps ',
'Kite (1998 film) ',
'Kite Liberator ',
'Kiznaiver ',
'Knights of Ramune ',
'Koihime Musō ',
'Kon Kon Kokon ',
'Kongō Banchō ',
'Kōtetsu Sangokushi ',
'Kōya no Shōnen Isamu ']
adventure_list = [
'3×3 Eyes ',
'12 Beast ',
'801 T.T.S. Airbats ',
'3000 Leagues in Search of Mother ',
'Acrobunch ',
'The Adventure of Rock ',
'The Adventures of Hutch the Honeybee ',
'The Adventures of Pepero ',
'The Adventures of Peter Pan ',
'The Adventures of Lolo the Penguin ',
'Adventures of the Little Koala ',
'The Adventures of the Little Prince (TV series) ',
'Aesop World ',
'Age of Adventure ',
'Agent Aika ',
'Ai Tenchi Muyo! ',
'Aika R-16: Virgin Mission ',
'Akame ga Kill! ',
'Aladdin and the Wonderful Lamp (1982 film) ',
'Alakazam the Great ',
'Alice SOS ',
'Alive: The Final Evolution ',
'All You Need Is Kill ',
'Allison (novel series) ',
'Allison & Lillia ',
'Amon Saga ',
'Angel Links ',
'Angelic Layer ',
'Anime Ganbare Goemon ',
'Aqua Knight ',
'Arata: The Legend ',
'Arcadia of My Youth ',
'Arcadia of My Youth: Endless Orbit SSX ',
'Argento Soma ',
'Armitage III ',
'Astro Boy ',
'Attack on Titan ',
'Attack on Titan: No Regrets ',
'Aura Battler Dunbine ',
"B't X ",
'Baby Birth ',
'Baccano! ',
'BakéGyamon ',
'Bakugan Battle Brawlers ',
'Barrage (manga) ',
'Basilisk (manga) ',
'Bat-Manga!: The Secret History of Batman in Japan ',
'Battle B-Daman ',
'Battle Tendency ',
'Bayonetta: Bloody Fate ',
"Beast Wars II: Lio Convoy's Close Call! ",
'Beet the Vandel Buster ',
'Berserk (1997 TV series) ',
'Berserk (manga) ',
'Berserk: The Golden Age Arc ',
'The Betrayal Knows My Name ',
'Betterman (TV series) ',
'Beyond the Beyond (manga) ',
'Big Wars ',
'Birth (anime) ',
'Black Cat (manga) ',
'Black Clover ',
'Black Lagoon ',
'Blade of the Phantom Master ',
'Blank Slate (manga) ',
'Bleach (manga) ',
'Bleach (TV series) ',
'Blood Blockade Battlefront ',
'Blood Lad ',
'Blood-C ',
'Blue Dragon (TV series) ',
'Blue Exorcist ',
'Blue Inferior ',
'Blue Sonnet ',
'Bobobo-bo Bo-bobo ',
'Bomberman Jetters ',
'Boruto: Naruto Next Generations ',
'Boruto: Naruto the Movie ',
'Bosco Adventure ',
'Brave Story ',
'Bumpety Boo ',
'Burst Angel ',
'Buso Renkin ',
'Campus Special Investigator Hikaruon ',
'Capricorn (manga) ',
'Captain Harlock: Dimensional Voyage ',
'Caravan Kidd ',
'Castle in the Sky ',
'The Castle of Cagliostro ',
'The Cat Returns ',
'Cat Soup ',
"Cat's Eye (manga) ",
'A Certain Scientific Railgun ',
'Chrono Crusade ',
'Cinnamoroll ',
'Classical Medley ',
'Claymore (manga) ',
'Cleopatra DC ',
'Cobra (manga) ',
'Code Geass ',
'Cosmo Warrior Zero ',
'The Cosmopolitan Prayers ',
'Cowa! ',
'Coyote Ragtime Show ',
'Crimson Spell ',
'Croket! ',
'Cross Manage ',
'Crusher Joe ',
'Cutie Honey Universe ',
'D.Gray-man ',
'The Dagger of Kamui ',
'Dai-Guard ',
'Dangaioh ',
'Dead Leaves ',
'Deadman Wonderland ',
'Dear (manga) ',
'Demon Slayer: Kimetsu no Yaiba ',
'Detonator Orgun ',
'Devil May Cry: The Animated Series ',
'The Devil of the Earth ',
'Devils and Realist ',
'Diamond Is Unbreakable ',
'Digimon Adventure ',
'Digimon Adventure 02 ',
'Digimon Adventure tri. ',
'Digimon Data Squad ',
'Digimon Frontier ',
'Digimon Fusion ',
'Digimon Tamers ',
'Dinozaurs ',
'Divergence Eve ',
'Dōbutsu no Mori (film) ',
'Dog Days (Japanese TV series) ',
'Dogs (manga) ',
'Doraemon: Nobita and the Birth of Japan 2016 ',
'Dorohedoro ',
'Double Decker! Doug & Kirill ',
'Dr. Stone ',
'Dragon Ball (manga) ',
'Dragon Ball (TV series) ',
'Dragon Ball GT ',
'Dragon Ball Super ',
'Dragon Ball Super: Broly ',
'Dragon Ball Z ',
'Dragon Drive ',
'Dragon Eye (manga) ',
'Dragon Half ',
'Dragon Knights ',
'Dragon Quest: The Adventure of Dai ',
'Drifters (manga) ',
'DT Eightron ',
'Eagle Riders ',
'Eat-Man ',
"Eden's Bowy ",
'El Cazador de la Bruja ',
'ĒlDLIVE ',
'Elementalors ',
'Ellcia ',
"Elmer's Adventure: My Father's Dragon ",
'Engage Planet Kiss Dum ',
'Eureka Seven ',
'Fairy Tail ',
'Famous Dog Lassie ',
'Fate/Zero ',
'Fighting Foodons ',
'Fire Force ',
'Firestorm (TV series) ',
'The First King Adventure ',
'Flame of Recca ',
'Flint the Time Detective ',
'Flower in a Storm ',
'Food Wars!: Shokugeki no Soma ',
'The Fossil Island ',
"Full-Blast Science Adventure – So That's How It Is ",
'Fullmetal Alchemist ',
'Fullmetal Alchemist (TV series) ',
'Fullmetal Alchemist: Brotherhood ',
'Future Boy Conan ',
'Future War 198X ',
'Gaba Kawa ',
'Gad Guard ',
'Galactic Gale Baxingar ',
'Galactic Whirlwind Sasuraiger ',
'Galaxy Cyclone Braiger ',
'Gall Force ',
'Gamba no Bōken ',
'Gangsta (manga) ',
'Gatchaman (OVA) ',
'Gatchaman Fighter ',
'Gatchaman II ',
'Gate Keepers ',
'Generator Gawl ',
'Geneshaft ',
'Genesis Climber MOSPEADA ',
'Gestalt (manga) ',
'GetBackers ',
'Gin Tama ',
'God Mazinger ',
'Golden Kamuy ']
harem_list = [
'1+2=Paradise ',
'Ah My Buddha ',
'Ai Tenchi Muyo! ',
'Ai Yori Aoshi ',
'Akashic Records of Bastard Magic Instructor ',
'Amagami ',
'The Ambition of Oda Nobuna ',
'Angel/Dust Neo ',
'Angel Tales ',
"Arifureta: From Commonplace to World's Strongest ",
'The Asterisk War ',
'Behind Closed Doors (anime) ',
'Bladedance of Elementalers ',
'Brothers Conflict ',
'C3 (novel series) ',
'Campione! ',
'Cat Planet Cuties ',
'Change 123 ',
'Clear (visual novel) ',
'D-Frag! ',
'Da Capo (visual novel) ',
'Da Capo III ',
'Death March to the Parallel World Rhapsody ',
'Demon King Daimao ',
'Dog Days (Japanese TV series) ',
'Dual! Parallel Trouble Adventure ',
'Ebiten: Kōritsu Ebisugawa Kōkō Tenmonbu ',
'Elf-ban Kakyūsei ',
'FairlyLife ',
'The Familiar of Zero ',
'Fortune Arterial ',
'Futakoi ',
'G-On Riders ',
'Gift (visual novel) ',
'Girls Bravo ',
'Girls Saurus ',
'A Good Librarian Like a Good Shepherd ',
'Guardian Hearts ',
'Haganai ',
'Hakuoki ',
'Hanaukyo Maid Team ',
'Hand Maid May ',
'Happiness! (visual novel) ',
'Happy Lesson ',
'Harukoi Otome ',
'He Is My Master ',
"Heaven's Lost Property ",
'Hello, Good-bye ',
'The "Hentai" Prince and the Stony Cat. ',
'High School DxD ',
'Highschool of the Dead ',
'HoneyComing ',
'Hoshiuta ',
'Hoshizora e Kakaru Hashi ',
'How Not to Summon a Demon Lord ',
"I Couldn't Become a Hero, So I Reluctantly Decided to Get a Job. ",
"I Don't Like You at All, Big Brother!! ",
"I'm Gonna Be An Angel! ",
'If Her Flag Breaks ',
'Iketeru Futari ',
'Imouto Paradise 2 ',
'Imouto Paradise! ',
'In Another World with My Smartphone ',
'Invaders of the Rokujouma!? ',
'Iono-sama Fanatics ',
'Is This a Zombie? ',
'Kage Kara Mamoru! ',
'Kamikaze Explorer! ',
'Kämpfer ',
'Kannagi: Crazy Shrine Maidens ',
'Kanojo × Kanojo × Kanojo ',
'Kanokon ',
'Kanon (visual novel) ',
'Kenkō Zenrakei Suieibu Umishō ',
'Kimi ga Aruji de Shitsuji ga Ore de ',
'KimiKiss ',
'Koi Koi Seven ',
'Koi suru Tenshi Angelique ',
'Koihime Musō ',
'Labyrinth of Flames ',
'Ladies versus Butlers! ',
'Like Life ',
'Lime-iro Senkitan ',
'Little Busters! ',
'Lord Marksman and Vanadis ',
'Lotte no Omocha! ',
'Love Hina ',
'Love Love? ',
'Love, Election and Chocolate ',
'Lovely Idol ',
'Maburaho ',
'Maga-Tsuki ',
"Magician's Academy ",
'Magikano ',
'Maji de Watashi ni Koi Shinasai! ',
'Maken-ki! ',
'Makura no Danshi ',
'Maple Colors ',
'Marriage Royale ',
'Mashiroiro Symphony ',
'The Master of Ragnarok & Blesser of Einherjar ',
'Mayo Chiki! ',
'MM! ',
'Monster Musume ',
'My Bride is a Mermaid ',
'My First Girlfriend Is a Gal ',
'Nagasarete Airantō ',
'Nakaimo – My Sister Is Among Them! ',
'Negima! Magister Negi Magi ',
'Night Shift Nurses ',
'Ninja Girls ',
'Nogizaka Haruka no Himitsu ',
'North Wind (visual novel) ',
'Nyan Koi! ',
'Ohime-sama Navigation ',
'Omamori Himari ',
'One: Kagayaku Kisetsu e ',
'OniAi ',
'Onihime VS ',
'Oreimo ',
'Oreshura ',
'Otome wa Boku ni Koishiteru ',
'Please Twins! ',
'Princess Lover! ',
'Princess Resurrection ',
'The Quintessential Quintuplets ',
'R-15 (novel series) ',
'Rizelmine ',
'Rosario + Vampire ',
'S.L.H Stray Love Hearts! ',
'Saber Marionette ',
'Sakura Wars ',
'Samurai Harem: Asu no Yoichi ',
'School Days (visual novel) ',
'See Me After Class ',
'Sekirei ',
'Sex Taxi ',
'Shomin Sample ',
'Shuffle! ',
'Shukufuku no Campanella ',
'Sister Princess ',
'Sky Wizards Academy ',
'Strawberry 100% ',
'Summer (visual novel) ',
'Suzunone Seven! ',
'Tayutama: Kiss on my Deity ',
'Tears to Tiara ',
'Tenchi Forever! The Movie ',
'Tenchi Muyo! ',
'Tenchi Muyo! GXP ',
'Tenchi Muyo! Ryo-Ohki ',
'Tenchi Muyo! War on Geminar ',
'Tenchi the Movie 2: The Daughter of Darkness ',
'Tenchi the Movie: Tenchi Muyo in Love ',
'Tenchi Universe ',
'Tenshin Ranman: Lucky or Unlucky!? ',
'To Heart ',
'To Heart 2 ',
'To Love-Ru ',
'Trinity Seven ',
'Tsugumomo ',
'Tsuki wa Higashi ni Hi wa Nishi ni ',
'Unbalance ×2 ',
'Unlimited Fafnir ',
'Utawarerumono ',
'Valkyrie Complex ',
'Vandread ',
'W Wish ',
'We Never Learn ',
'White Album (visual novel) ',
'Wind: A Breath of Heart ',
'Words Worth ',
'World Break: Aria of Curse for a Holy Swordsman ',
'Yomeiro Choice ',
'Yosuga no Sora ',
'Yumeria ',
'Yuuna and the Haunted Hot Springs ']
romance_list = [
'3×3 Eyes ',
'Absolute Boyfriend ',
'Accel World ',
'After the Rain (manga) ',
'Age 12 ',
'Ai-Ren ',
'Air (2005 film) ',
'Aishite Knight ',
'Aishiteruze Baby ',
'Akatsuki-iro no Senpuku Majo ',
'Akogare ',
'Alice 19th ',
'Alice the 101st ',
'All My Darling Daughters ',
'Allison & Lillia ',
'Alpen Rose ',
'Amnesia Labyrinth ',
'Anata to Scandal ',
'Ane no Kekkon ',
'Angel Lip ',
'Angel Nest ',
'Angelique (video game series) ',
'Ani-Imo ',
'Animated Classics of Japanese Literature ',
'Ano Ko ni 1000% ',
'Anoko no Toriko ',
'Anonymous Noise ',
'Aokana: Four Rhythm Across the Blue ',
'Aozora Yell ',
'Aquarion Evol ',
'Armitage III ',
'Ashita no Nadja ',
'Ask Dr. Rin! ',
'Attack No. 1 ',
'Attack on Tomorrow ',
'Attacker You! ',
'Azuki-chan ',
'B.O.D.Y. (manga) ',
'Baby Love (manga) ',
'Backstage Prince ',
'Banner of the Stars ',
'Bara no Tame ni ',
'Barairo no Ashita ',
'Barefoot Waltz ',
'Beast Master (manga) ',
'Beauty is the Beast ',
'Beauty Pop ',
'Beck (manga) ',
'Believers (manga) ',
'Beyond the Boundary ',
'Binetsu Shōjo ',
'Bitter Virgin ',
'Black Bird (manga) ',
'Black Rose Alice ',
'Blood Alone ',
'Blood Hound (manga) ',
'Bloom Into You ',
'Blue Friend (manga) ',
'Blue Gender ',
'Blue Spring Ride ',
'Bonjour Sweet Love Patisserie ',
'Book Girl (film) ',
"Boy's Next Door ",
'Boyfriend (manga) ',
'Boys Be... ',
"A Bride's Story ",
'Broken Angels (manga) ',
"Cactus's Secret ",
'Call Me Princess ',
'Candy Candy ',
'Canon (manga) ',
'Canvas 2: Akane-iro no Palette ',
'Captive Hearts (manga) ',
'Castle in the Sky ',
'Cat Street (manga) ',
'Cause of My Teacher ',
'Challengers (manga) ',
'Chance Pop Session ',
'Cherry Juice ',
'Chihayafuru ',
'Children Who Chase Lost Voices ',
'Chirality (manga) ',
'ChocoTan! ',
'Chōyaku Hyakunin isshu: Uta Koi ',
'Clannad (film) ',
'Clannad (visual novel) ',
'Clear (visual novel) ',
'Clover (Toriko Chiya manga) ',
'Codename: Sailor V ',
'Coicent ',
'The Cosmopolitan Prayers ',
'Crimson Spell ',
'Crown (manga) ',
'Crown of Love (manga) ',
'D.N.Angel ',
'Da Capo III ',
'Dance in the Vampire Bund ',
'Dance with Devils ',
'A Dark Rabbit Has Seven Lives ',
'Darling in the Franxx ',
'Dawn of the Arcana ',
'Dear (manga) ',
'The Demon Ororon ',
'The Demon Prince of Momochi House ',
'Demonizer Zilch ',
'Dengeki Daisy ',
'A Devil and Her Love Song ',
'The Devil Does Exist ',
'Dolis ',
'Domestic Girlfriend ',
"Don't Say Anymore, Darling ",
'Dōse Mō Nigerarenai ',
'Dragon Eye (manga) ',
'Dream Saga ',
"Dreamin' Sun ",
'A Drifting Life ',
'Drifting Net Cafe ',
'Drowning Love ',
'Dusk Maiden of Amnesia ',
'The Earl and the Fairy ',
'Eden* ',
"Eden's Bowy ",
'Eerie Queerie! ',
'El-Hazard ',
'Embracing Love ',
'Emma (manga) ',
'Eureka Seven ',
'FairlyLife ',
'Final Approach (visual novel) ',
'Fire Tripper ',
'First Love Sisters ',
'Fish in the Trap ',
'Flower in a Storm ',
'Fortune Arterial ',
'Four Shōjo Stories ',
'Four-Eyed Prince ',
'Foxy Lady (manga) ',
'From Far Away ',
'Fruits Basket ',
'Full Metal Panic? Fumoffu ',
'Full Moon o Sagashite ',
'Fushigi Yûgi ',
'Future Diary ',
'Gaba Kawa ',
'Gakuen Polizi ',
'Garden Dreams ',
'Gatcha Gacha ',
'Genesis of Aquarion ',
'Genji Monogatari Sennenki ',
'A Gentle Breeze in the Village ',
'Georgie! ',
'Gerard & Jacques ',
'Gift (visual novel) ',
'Girl Friend (manga) ',
'Girl Friend Beta ',
'Girl Friends (manga) ',
'Girl Got Game ',
'The Girl Who Leapt Through Time (2006 film) ',
'Girls Beyond the Wasteland ',
'Glass Wings ',
'Glasslip ',
'A Good Librarian Like a Good Shepherd ',
'Good Morning Call ',
'Gosick ',
'Gou-dere Sora Nagihara ',
'Gravitation (manga) ',
'Green Green (TV series) ',
'Gunparade March ',
'Hachimitsu ni Hatsukoi ',
'Haikara-San: Here Comes Miss Modern ',
'Hakuba no Ōji-sama ',
'Hal (2013 film) ',
'Hana & Hina After School ',
'Hana to Akuma ',
'Hana-Kimi ',
'Hana-kun to Koisuru Watashi ',
'Hanasakeru Seishōnen ',
'Hanbun no Tsuki ga Noboru Sora ',
'Handsome na Kanojo ',
'Hanjuku-Joshi ',
'Haou Airen ',
'Happy Hustle High ',
'Happy Marriage!? ',
'Haru Natsu Aki Fuyu ',
'Haruka: Beyond the Stream of Time (manga) ',
'Harukoi Otome ',
"He's My Only Vampire ",
'The Heart of Thomas ',
'Hello! Lady Lynn ',
"Her Majesty's Dog ",
'Here Is Greenwood ',
'Heroine Shikkaku ',
'Hiatari Ryōkō! ',
'Hibi Chōchō ',
'High School Debut ',
'Hikari no Densetsu ',
'Himitsu no Akko-chan ',
'Himitsu no Recipe ',
'Hirunaka no Ryuusei ']
mecha_list = [
'Ai City ',
'Akane Maniax ',
'Aldnoah.Zero ',
'All Purpose Cultural Cat-Girl Nuku Nuku DASH! ',
'AM Driver ',
'Ambassador Magma ',
'Aquarion Logos ',
'Argento Soma ',
'Argevollen ',
'Ariel (anime) ',
'Ark (2005 film) ',
'Armitage III ',
'Armored Trooper Votoms: Pailsen Files ',
'Assemble Insert ',
'Aura Battler Dunbine ',
'Baldr Force ',
'Basquash! ',
'Battle Skipper ',
"Beast Wars II: Lio Convoy's Close Call! ",
'Betterman (TV series) ',
'Blue Comet SPT Layzner ',
'Blue Gender ',
'Brain Powerd ',
'Broken Blade ',
'Bubblegum Crisis ',
'Bubblegum Crisis Tokyo 2040 ',
'Buddy Complex ',
'Burn-Up W ',
'Busou Shinki ',
'The Candidate for Goddess ',
'Cannon God Exaxxion ',
'Castle in the Sky ',
'Cat City ',
'Chō Jikū Robo Meguru ',
'Chō Kōsoku Galvion ',
'Chōsoku Henkei Gyrozetter ',
'Chōgattai Majutsu Robo Ginguiser ',
'Choriki Robo Galatt ',
'Code Geass ',
'Combat Mecha Xabungle ',
'Comet Lucifer ',
'The Cosmopolitan Prayers ',
'Cross Ange ',
'Cybuster ',
'D.I.C.E. ',
'Dai-Shogun – Great Revolution ',
'Daigunder ',
'Daimajū Gekitō: Hagane no Oni ',
'Daimidaler: Prince vs Penguin Empire ',
'Darling in the Franxx ',
'Dennō Bōkenki Webdiver ',
'Detonator Orgun ',
'Devadasy ',
'Dinosaur War Izenborg ',
'Dinozaurs ',
'Dual! Parallel Trouble Adventure ',
'Dynamic Heroes ',
'Ehrgeiz (TV series) ',
'The End of Evangelion ',
'Engage Planet Kiss Dum ',
'Escaflowne (film) ',
'Eureka Seven ',
'Evangelion: 1.0 You Are (Not) Alone ',
'Evangelion: 2.0 You Can (Not) Advance ',
'Evangelion: 3.0 You Can (Not) Redo ',
'Evangelion: 3.0+1.0 ',
'Expelled from Paradise ',
'Fafner in the Azure ',
'Fang of the Sun Dougram ',
'Fight! Iczer One ',
'Firestorm (TV series) ',
'First Squad ',
'Flag (TV series) ',
'Force Five ',
'Frame Arms Girl ',
'Gad Guard ',
'Galaxy Fräulein Yuna ',
'Gargantia on the Verdurous Planet ',
'Geneshaft ',
'Genesis Survivor Gaiarth ',
'Giant Gorg ',
"Gin'iro no Olynssis ",
'Ginga Hyōryū Vifam ',
'The Girl Who Leapt Through Space ',
'God Mazinger ',
'Godzilla: City on the Edge of Battle ',
'Godzilla: Planet of the Monsters ',
'Good Morning Althea ',
'Grey (manga) ',
'Guilty Crown ',
'Gunbuster ',
'Gunparade March ',
'Gurren Lagann ',
'Heavy Metal L-Gaim ',
'Heroic Age (TV series) ',
'Hikarian ',
'Ichigeki Sacchu!! HoiHoi-san ',
'Idolmaster: Xenoglossia ',
'Immortal Grand Prix ',
'Infinite Ryvius ',
'Infinite Stratos ',
'Innocent Venus ',
'Invincible King Tri-Zenon ',
'Jinki: Extend ',
'Jushin Liger (TV series) ',
'K.O. Beast ',
'Kannazuki no Miko ',
'Key the Metal Idol ',
'Kikaider ',
'Kirameki Project ',
'Kishin Corps ',
'Kishin Taisen Gigantic Formula ',
"Knight's & Magic ",
'Knights of Ramune ',
'Knights of Sidonia ',
'Kurogane Communication ',
'Kuromukuro ',
'Lime-iro Senkitan ',
'Linebarrels of Iron ',
'M3: The Dark Metal ',
'Machine Robo Rescue ',
'Macross ',
'Macross Delta ',
'Magic Knight Rayearth ',
'Majestic Prince (manga) ',
'Mars Daybreak ',
'Martian Successor Nadesico: The Motion Picture – Prince of Darkness ',
'Mazinger Z ',
'Mazinger Z vs. The Great General of Darkness ',
'MazinSaga ',
'MD Geist ',
'Melody of Oblivion ',
'Metal Armor Dragonar ',
'Negadon: The Monster from Mars ',
'Neo Ranga ',
'Neon Genesis Evangelion ',
'Neon Genesis Evangelion (franchise) ',
'Neon Genesis Evangelion: Death & Rebirth ',
'NG Knight Ramune & 40 ',
'Nobunaga the Fool ',
'Overman King Gainer ',
'Panzer World Galient ',
'Patlabor: The New Files ',
'Patlabor: The TV Series ',
'Planet With ',
'Planzet ',
'Plastic Little ',
'Platinumhugen Ordian ',
'Plawres Sanshiro ',
'Power Stone (TV series) ',
'Psycho Armor Govarian ',
'RahXephon ',
'Red Baron (TV series) ',
'Red Eyes ',
'Regalia: The Three Sacred Stars ',
'Rideback ',
'Robo Formers ',
'Robot Carnival ',
'Robot Girls Z ',
'Robotech ',
'Robotech II: The Sentinels ',
'Robotech: Love Live Alive ',
'Robotech: The Movie ',
'Robotics;Notes ',
'RS Project -Rebirth Storage- ',
'Sailor Victory ',
'Sakura Wars ',
'Samurai 7 ',
'School Shock ',
'Science Ninja Team Gatchaman ',
'Shattered Angels ',
'Shinkansen Henkei Robo Shinkalion ',
'Sky Girls ',
'SSSS.Gridman ',
'Star Driver ',
'Starship Troopers (OVA) ',
'Stellvia ',
'Strain: Strategic Armored Infantry ',
'Super Dimension Century Orguss ',
'The Super Dimension Fortress Macross ',
'Super Robot Wars Original Generation: Divine Wars ',
'Super Robot Wars Original Generation: The Animation ',
'Super Robot Wars Original Generation: The Inspector ',
'Techno Police 21C ',
'Tekkaman Blade ',
'Tenchi Muyo! War on Geminar ',
'Tokio Private Police ',
'Tomica Hyper Rescue Drive Head Kidō Kyūkyū Keisatsu ',
'Transformers Go! ',
'Transformers: Armada ',
'Transformers: Armada (comics) ',
'Transformers: Cybertron ',
'Transformers: Energon ',
'Transformers: Robot Masters ',
'Transformers: Super-God Masterforce ',
'Transformers: The Headmasters ',
'Transformers: Victory ',
'Transformers: Zone ']
slice_of_life_list = [
'A Channel (manga) ',
'Abandon the Old in Tokyo ',
'Age 12 ',
'Aho-Girl ',
'Aiura ',
'Akiba-chan (TV series) ',
'Akogare ',
'Amanchu! ',
'Amano Megumi wa Sukidarake! ',
'And Yet the Town Moves ',
'Ane no Kekkon ',
'Anime-Gatari ',
'Asahinagu ',
'Asari-chan ',
'Ashizuri Suizokukan ',
'Azumanga Daioh ',
'Baby & Me ',
'Baby Princess ',
'Bakuman ',
'Barairo no Ashita ',
'Barakamon ',
'Best Student Council ',
'Binbō Shimai Monogatari ',
'Blend S ',
"A Centaur's Life ",
'Chihayafuru ',
'Chimpui ',
'Chitose Get You!! ',
'Choir! ',
'Cinnamoroll ',
'Clannad (visual novel) ',
'The Comic Artist and His Assistants ',
'The Cosmopolitan Prayers ',
'Crayon Shin-chan ',
'Crossing Time ',
'Dagashi Kashi ',
'Daily Lives of High School Boys ',
'Dareka no Manazashi ',
'DD Fist of the North Star ',
"Dead Dead Demon's Dededede Destruction ",
'Doki Doki School Hours ',
"Dreamin' Sun ",
'Drowning Love ',
'Encouragement of Climb ',
'Endro! ',
'Flower of Life (manga) ',
'Flying Witch ',
'Food Wars!: Shokugeki no Soma ',
'Futagashira ',
'Futaribeya: A Room for Two ',
'GA Geijutsuka Art Design Class ',
'Ganbare!! Tabuchi-kun!! ',
'Genshiken ',
'A Gentle Breeze in the Village ',
"Girls' Last Tour ",
'Glasslip ',
'Gokicha ',
'Goodnight Punpun ',
'Gourmet Girl Graffiti ',
'Green Green (TV series) ',
'Hachimitsu ni Hatsukoi ',
'Hakumei and Mikochi ',
'Hana-kun to Koisuru Watashi ',
'Hanamaru Kindergarten ',
'Hanasaku Iroha ',
'Hanayamata ',
'Happy Happy Clover ',
'Hayate the Combat Butler ',
'Hello! Lady Lynn ',
'Hello! Sandybell ',
'Heroine Shikkaku ',
'Hibi Chōchō ',
'Hibi Rock ',
'Hidamari Sketch ',
'Hitori Bocchi no Marumaru Seikatsu ',
'Hōkago Play ',
'Hori-san to Miyamura-kun ',
'House of the Sun ',
'Human Crossing ',
'Hyakko ',
'Hyouka ',
"If It's for My Daughter, I'd Even Defeat a Demon Lord ",
'Is the Order a Rabbit? ',
'Jūhan Shuttai! ',
'K-On! ',
'Kamichu! ',
'Kamisama Minarai: Himitsu no Cocotama ',
'Kamurobamura-e ',
'Kanamemo ',
'Teasing Master Takagi-san ',
'Karakuri Odette ',
'Kenka Shōbai ',
'Kids on the Slope ',
'Kill Me Baby ',
'Kimi ni Todoke ',
'Kira Kira Happy Hirake! Cocotama ',
'Kokoro Button ',
'Kono Oto Tomare! Sounds of Life ',
'Konohana Kitan ',
'Koro Sensei Quest ',
'Kū Neru Futari Sumu Futari ',
'Kyō, Koi o Hajimemasu ',
'L DK ',
'Liar × Liar ',
'Little Forest ',
'Love Celeb ',
'Love Hina ',
'Love Live! ',
'Lucky Star (manga) ',
'Maestro (manga) ',
'Mai Mai Miracle ',
'Mainichi Kaasan ',
'Manga Dogs ',
'Maple Town ',
'Meganebu! ',
'Mitsuboshi Colors ',
'Mitsudomoe (manga) ',
'Morita-san wa Mukuchi ',
'Mushishi ',
'My Roommate Is a Cat ',
'Nagareboshi Lens ',
'Naisho no Tsubomi ',
'Nasu (manga) ',
'Natsuiro Kiseki ',
'Ningen Karimenchū ',
"No Matter How I Look at It, It's You Guys' Fault I'm Not Popular! ",
'Non Non Biyori ',
'Nōnai Poison Berry ',
'Nono-chan ',
'Noucome ',
"Nurse Hitomi's Monster Infirmary ",
'Ojamanga Yamada-kun ',
'The One I Love (manga) ',
'One Off (miniseries) ',
'Orange (manga) ',
'Otoko no Isshō ',
'Paboo & Mojies ',
'Place to Place ',
'Poyopoyo Kansatsu Nikki ',
'Princess Maison ',
'Project 575 ',
'The Push Man and Other Stories ',
'Recorder and Randsell ',
'Recovery of an MMO Junkie ',
'ReRe Hello ',
'Rin-ne ',
'Robot Girls Z ',
'S.S. Astro ',
'Sabagebu! ',
'Saint Young Men ',
'Sakura Quest ',
'Sakura Trick ',
'Sanrio Boys ',
'Sanzoku Diary ',
'Sayonara Sorcier ',
'Sayonara, Tama-chan ',
'Sazae-san ',
'School Days (visual novel) ',
'Seitokai Yakuindomo ',
'Senryu Girl ',
'Servant × Service ',
'Shitsuren Chocolatier ',
'Silver Spoon (manga) ',
'Sketchbook (manga) ',
'Slow Start (manga) ',
'Solanin ',
'Soul Eater Not! ',
'Sound of the Sky ',
'Space Brothers (manga) ',
'Star-Myu ',
'Stella Women’s Academy, High School Division Class C³ ',
'Strawberry Marshmallow ',
"Student Council's Discretion ",
'Sukimasuki ',
'Sunny (manga) ',
'Super Seisyun Brothers ',
'Sweetness and Lightning ',
'Sylvanian Families (OVA series) ',
'Tamagotchi! (TV series) ',
'Tenshi Nanka Ja Nai ',
'Tesagure! Bukatsu-mono ',
"Today's Menu for the Emiya Family ",
'Tokyo Alice ',
'Tonari no Kashiwagi-san ',
'Toradora! ',
'Town Doctor Jumbo!! ',
'True Love (manga) ',
'True Tears (TV series) ',
'The Tyrant Falls in Love ',
'Uchi no Sanshimai ',
'Ultimate Otaku Teacher ',
'Undercurrent (manga) ',
'Wake Up, Girls! ',
'Welcome to the N.H.K. ',
'What a Wonderful World! ',
'Working!! ',
'Yokohama Kaidashi Kikō ',
'Yotsuba&! ',
'YuruYuri ']
isekai_list = [
'12 Beast ',
'100 Sleeping Princes and the Kingdom of Dreams ',
"Arifureta: From Commonplace to World's Strongest ",
'Ascendance of a Bookworm ',
'Aura Battler Dunbine ',
'The Brave-Tuber ',
'Captain N: The Game Master ',
'Conception (video game) ',
'Death March to the Parallel World Rhapsody ',
"Didn't I Say to Make My Abilities Average in the Next Life?! ",
'Digimon Adventure ',
'Do You Love Your Mom and Her Two-Hit Multi-Target Attacks? ',
'Dog Days (Japanese TV series) ',
'Drifters (manga) ',
'El-Hazard ',
'Endride ',
'The Familiar of Zero ',
'Fushigi Yûgi ',
'Gate (novel series) ',
'Grimgar of Fantasy and Ash ',
'Hachinantte Sore wa Inai Deshō! ',
'The Hero is Overpowered but Overly Cautious ',
'High School Prodigies Have It Easy Even In Another World ',
'How a Realist Hero Rebuilt the Kingdom ',
'How Not to Summon a Demon Lord ',
"I've Been Killing Slimes for 300 Years and Maxed Out My Level ",
'In Another World with My Smartphone ',
'Infinite Dendrogram ',
'Inuyasha ',
'Isekai Cheat Magician ',
'Isekai Izakaya "Nobu" ',
'Isekai Quartet ',
'Kemonomichi ',
'Kiba (TV series) ',
"Knight's & Magic ",
'KonoSuba ',
'Kyo Kara Maoh! ',
'Log Horizon ',
'Magic Knight Rayearth ',
'Magical Shopping Arcade Abenobashi ',
'Maō-sama, Retry! ',
'MÄR ',
'The Master of Ragnarok & Blesser of Einherjar ',
'Mushoku Tensei ',
'My Next Life as a Villainess: All Routes Lead to Doom! ',
'New Life+: Young Again in Another World ',
'No Game No Life ',
'No Game, No Life Zero ',
'Outbreak Company ',
'Overlord (novel series) ',
'Pop in Q ',
"Problem Children Are Coming from Another World, Aren't They? ",
'Re:Zero − Starting Life in Another World ',
'Reborn as a Vending Machine, I Now Wander the Dungeon ',
'Restaurant to Another World ',
'The Rising of the Shield Hero ',
'The Saga of Tanya the Evil ',
"So I'm a Spider, So What? ",
'Spirited Away ',
'Sword Art Online ',
'That Time I Got Reincarnated as a Slime ',
'Tweeny Witches ',
'The Twelve Kingdoms ',
"Wise Man's Grandchild "]
@borg.on(events.NewMessage(pattern=r"\.action", outgoing=True))
async def action(event):
if event.fwd_from:
return
number_of_times = event.text[8:]
number_of_times = int(number_of_times)
i = 0
anime_list = []
while i != number_of_times:
anime = random.choice(action_list)
anime_list.append(anime + "\n")
i = i + 1
counter = 1
msg_str = []
for i in anime_list:
msg_str.append(str(counter) + ". " + i)
counter = counter + 1
msg_str = str(msg_str)
msg_str = msg_str.replace("['", "")
msg_str = msg_str.replace(",", "")
msg_str = msg_str.replace("']", "")
msg_str = msg_str.replace("' '", "")
msg_str_front = "Here's Top " + \
str(number_of_times) + " Action Anime List For you !\n"
msg_str = msg_str_front + msg_str
msg_str = msg_str.replace("\\n", "\n")
msg_str = msg_str.replace("'", "")
msg_str = msg_str.replace('"', "")
await event.edit("**" + msg_str + "**")
@borg.on(events.NewMessage(pattern=r"\.harem", outgoing=True))
async def action(event):
if event.fwd_from:
return
number_of_times = event.text[7:]
number_of_times = int(number_of_times)
i = 0
anime_list = []
while i != number_of_times:
anime = random.choice(harem_list)
anime_list.append(anime + "\n")
i = i + 1
counter = 1
msg_str = []
for i in anime_list:
msg_str.append(str(counter) + ". " + i)
counter = counter + 1
msg_str = str(msg_str)
msg_str = msg_str.replace("['", "")
msg_str = msg_str.replace(",", "")
msg_str = msg_str.replace("']", "")
msg_str = msg_str.replace("' '", "")
msg_str_front = "Here's Top " + \
str(number_of_times) + " Harem Anime List For you !\n"
msg_str = msg_str_front + msg_str
msg_str = msg_str.replace("\\n", "\n")
msg_str = msg_str.replace("'", "")
msg_str = msg_str.replace('"', "")
await event.edit("**" + msg_str + "**")
@borg.on(events.NewMessage(pattern=r"\.mecha", outgoing=True))
async def action(event):
if event.fwd_from:
return
number_of_times = event.text[7:]
number_of_times = int(number_of_times)
i = 0
anime_list = []
while i != number_of_times:
anime = random.choice(mecha_list)
anime_list.append(anime + "\n")
i = i + 1
counter = 1
msg_str = []
for i in anime_list:
msg_str.append(str(counter) + ". " + i)
counter = counter + 1
msg_str = str(msg_str)
msg_str = msg_str.replace("['", "")
msg_str = msg_str.replace(",", "")
msg_str = msg_str.replace("']", "")
msg_str = msg_str.replace("' '", "")
msg_str_front = "Here's Top " + \
str(number_of_times) + " Mecha Anime List For you !\n"
msg_str = msg_str_front + msg_str
msg_str = msg_str.replace("\\n", "\n")
msg_str = msg_str.replace("'", "")
msg_str = msg_str.replace('"', "")
await event.edit("**" + msg_str + "**")
@borg.on(events.NewMessage(pattern=r"\.romance", outgoing=True))
async def action(event):
if event.fwd_from:
return
number_of_times = event.text[9:]
number_of_times = int(number_of_times)
i = 0
anime_list = []
while i != number_of_times:
anime = random.choice(romance_list)
anime_list.append(anime + "\n")
i = i + 1
counter = 1
msg_str = []
for i in anime_list:
msg_str.append(str(counter) + ". " + i)
counter = counter + 1
msg_str = str(msg_str)
msg_str = msg_str.replace("['", "")
msg_str = msg_str.replace(",", "")
msg_str = msg_str.replace("']", "")
msg_str = msg_str.replace("' '", "")
msg_str_front = "Here's Top " + \
str(number_of_times) + " Romance Anime List For you !\n"
msg_str = msg_str_front + msg_str
msg_str = msg_str.replace("\\n", "\n")
msg_str = msg_str.replace("'", "")
msg_str = msg_str.replace('"', "")
await event.edit("**" + msg_str + "**")
@borg.on(events.NewMessage(pattern=r"\.isekai", outgoing=True))
async def action(event):
if event.fwd_from:
return
number_of_times = event.text[8:]
number_of_times = int(number_of_times)
i = 0
anime_list = []
while i != number_of_times:
anime = random.choice(isekai_list)
anime_list.append(anime + "\n")
i = i + 1
counter = 1
msg_str = []
for i in anime_list:
msg_str.append(str(counter) + ". " + i)
counter = counter + 1
msg_str = str(msg_str)
msg_str = msg_str.replace("['", "")
msg_str = msg_str.replace(",", "")
msg_str = msg_str.replace("']", "")
msg_str = msg_str.replace("' '", "")
msg_str_front = "Here's Top " + \
str(number_of_times) + " Isekai Anime List For you !\n"
msg_str = msg_str_front + msg_str
msg_str = msg_str.replace("\\n", "\n")
msg_str = msg_str.replace("'", "")
msg_str = msg_str.replace('"', "")
await event.edit("**" + msg_str + "**")
@borg.on(events.NewMessage(pattern=r"\.adventure", outgoing=True))
async def action(event):
if event.fwd_from:
return
number_of_times = event.text[10:]
number_of_times = int(number_of_times)
i = 0
anime_list = []
while i != number_of_times:
anime = random.choice(adventure_list)
anime_list.append(anime + "\n")
i = i + 1
counter = 1
msg_str = []
for i in anime_list:
msg_str.append(str(counter) + ". " + i)
counter = counter + 1
msg_str = str(msg_str)
msg_str = msg_str.replace("['", "")
msg_str = msg_str.replace(",", "")
msg_str = msg_str.replace("']", "")
msg_str = msg_str.replace("' '", "")
msg_str_front = "Here's Top " + \
str(number_of_times) + " Adventure Anime List For you !\n"
msg_str = msg_str_front + msg_str
msg_str = msg_str.replace("\\n", "\n")
msg_str = msg_str.replace("'", "")
msg_str = msg_str.replace('"', "")
await event.edit("**" + msg_str + "**")
@borg.on(events.NewMessage(pattern=r"\.slice", outgoing=True))
async def action(event):
if event.fwd_from:
return
number_of_times = event.text[7:]
number_of_times = int(number_of_times)
i = 0
anime_list = []
while i != number_of_times:
anime = random.choice(slice_of_life_list)
anime_list.append(anime + "\n")
i = i + 1
counter = 1
msg_str = []
for i in anime_list:
msg_str.append(str(counter) + ". " + i)
counter = counter + 1
msg_str = str(msg_str)
msg_str = msg_str.replace("['", "")
msg_str = msg_str.replace(",", "")
msg_str = msg_str.replace("']", "")
msg_str = msg_str.replace("' '", "")
msg_str_front = "Here's Top " + \
str(number_of_times) + " Slice of life Anime List For you !\n"
msg_str = msg_str_front + msg_str
msg_str = msg_str.replace("\\n", "\n")
msg_str = msg_str.replace("'", "")
msg_str = msg_str.replace('"', "")
await event.edit("**" + msg_str + "**")
| 27.876106 | 82 | 0.575849 |
e09cd7cbd52a3b4f93c2b3cdffc8d9fea4e79e2d | 1,693 | py | Python | azure-mgmt-monitor/azure/mgmt/monitor/models/metric_alert_criteria.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-mgmt-monitor/azure/mgmt/monitor/models/metric_alert_criteria.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | 1 | 2018-11-29T14:46:42.000Z | 2018-11-29T14:46:42.000Z | azure-mgmt-monitor/azure/mgmt/monitor/models/metric_alert_criteria.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MetricAlertCriteria(Model):
"""The rule criteria that defines the conditions of the alert rule.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: MetricAlertSingleResourceMultipleMetricCriteria
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param odatatype: Required. Constant filled by server.
:type odatatype: str
"""
_validation = {
'odatatype': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'odatatype': {'key': 'odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odatatype': {'Microsoft.Azure.Monitor.SingleResourceMultipleMetricCriteria': 'MetricAlertSingleResourceMultipleMetricCriteria'}
}
def __init__(self, **kwargs):
super(MetricAlertCriteria, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.odatatype = None
| 36.021277 | 136 | 0.647962 |
1a9aa533f74db700240b7d73ce67288e918ae570 | 5,020 | py | Python | cvat/apps/engine/tests/test_sequences.py | daedaleanai/cvat | d0df08c3f66a39324bd0b82683ee4cef05ed9c53 | [
"MIT"
] | 1 | 2021-07-12T20:34:31.000Z | 2021-07-12T20:34:31.000Z | cvat/apps/engine/tests/test_sequences.py | daedaleanai/cvat | d0df08c3f66a39324bd0b82683ee4cef05ed9c53 | [
"MIT"
] | 8 | 2020-05-04T09:44:13.000Z | 2021-10-14T12:54:40.000Z | cvat/apps/engine/tests/test_sequences.py | daedaleanai/cvat | d0df08c3f66a39324bd0b82683ee4cef05ed9c53 | [
"MIT"
] | 1 | 2020-07-15T09:30:13.000Z | 2020-07-15T09:30:13.000Z | import pathlib
import random
import re
from unittest import TestCase
from cvat.apps.engine.ddln.sequences import group, distribute, extend_assignees
from cvat.apps.engine.utils import group_on_delimiter, natural_order
sequences_dir = pathlib.Path(__file__).parent / "data" / "sequences"
alice, bob, chris, david, eva = "Alice Bob Chris David Eva".split()
class ExtendAssigneesTest(TestCase):
def test_even_workload(self):
sequences = [
('A', 80, {eva}),
('B', 23, {eva}),
('C', 65, {eva}),
('D', 94, {eva}),
('E', 70, {eva}),
('F', 28, {eva}),
('G', 12, {eva}),
('H', 40, {eva}),
('I', 33, {eva}),
]
assignees = [alice, bob, chris]
assignments, failed_sequences = extend_assignees(sequences, assignees)
self.assertEqual(failed_sequences, [])
self.assertEqual(calc_workload(assignments, sequences), {
alice: 153,
bob: 157,
chris: 135,
})
def test_failed_assignment(self):
sequences = [
('A', 12, {bob, chris, david}),
('B', 10, {alice, bob, david}),
('C', 15, {alice, chris, david}),
('D', 25, {alice, bob, eva}),
]
assignees = [alice, bob]
assignments, failed_sequences = extend_assignees(sequences, assignees)
self.assertEqual(assignments, [
('A', alice),
('C', bob),
])
self.assertEqual(failed_sequences, ['B', 'D'])
def test_constraint_is_enforced(self):
sequences = [
('A', 50, {eva}),
('B', 50, {eva}),
('C', 50, {eva}),
('D', 50, {bob}),
]
assignees = [alice, bob]
assignments, failed_sequences = extend_assignees(sequences, assignees)
self.assertEqual(failed_sequences, [])
self.assertEqual(assignments, [
('A', alice),
('B', bob),
('C', alice),
('D', alice),
])
def calc_workload(assignments, sequences):
workload_by_user = {}
size_by_sequence = {sequence: size for sequence, size, _ in sequences}
for sequence, user in assignments:
workload_by_user[user] = workload_by_user.get(user, 0) + size_by_sequence[sequence]
return workload_by_user
class DistributeSequencesTest(TestCase):
def test_single_assignee(self):
chunks = ['A', 'B', 'C']
assignees = [alice]
actual = distribute(chunks, assignees)
self.assertEqual(actual, [
('A', [alice]),
('B', [None]),
('C', [None]),
])
def test_multiple_assignees(self):
chunks = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
assignees = [alice, bob, chris]
actual = distribute(chunks, assignees)
self.assertEqual(actual, [
('A', [alice]),
('B', [bob]),
('C', [chris]),
('D', [None]),
('E', [None]),
('F', [None]),
('G', [None]),
('H', [None]),
])
def test_multiple_assignees_triple_annotated(self):
chunks = ['A', 'B']
assignees = [alice, bob, chris]
actual = distribute(chunks, assignees, 3)
self.assertEqual(actual, [
('A', [alice, bob, chris]),
('B', [alice, bob, chris]),
])
def test_multiple_assignees_double_annotated(self):
chunks = ['A', 'B', 'C', 'D']
assignees = [alice, bob, chris]
actual = distribute(chunks, assignees, 2)
self.assertEqual(actual, [
('A', [alice, bob]),
('B', [chris, alice]),
('C', [bob, chris]),
('D', [alice, bob]),
])
class GroupSequencesTest(TestCase):
def test_cases(self):
for test_file in sequences_dir.glob("*.txt"):
with self.subTest(task=test_file.stem):
sequences, chunk_size, expected = read_case_data(test_file)
random.shuffle(sequences)
actual = group(sequences, chunk_size)
self.assertEqual(_(expected), _(_get_seq_names(actual)))
def read_case_data(test_file):
content = test_file.read_text()
input, chunk_size, output = re.split(r'#{3,}\s+(\d+)\s+#{3,}\n', content)
sequences = [_parse_line(line) for line in input.splitlines()]
chunk_size = int(chunk_size)
output = re.sub(r"\s*#.*$", '', output, flags=re.MULTILINE)
expected = group_on_delimiter(output.splitlines(), '')
return sequences, chunk_size, expected
def _parse_line(line):
seq_name, size = line.split('\t')
size = int(size)
return seq_name, size
def _get_seq_names(chunks):
return [[s[0] for s in sequences] for sequences in chunks]
def _(chunks):
"""Ignore order during comparison"""
chunks = [sorted(sequences, key=natural_order) for sequences in chunks]
chunks.sort()
return chunks
| 28.685714 | 91 | 0.540637 |
c0453b11dfdb40dfe5d62a102770dec552a0cb70 | 4,148 | py | Python | script.py | othyn/url-shortcut-organiser | 5b5b32269b93e86db8e5eaa42890190345596f98 | [
"MIT"
] | null | null | null | script.py | othyn/url-shortcut-organiser | 5b5b32269b93e86db8e5eaa42890190345596f98 | [
"MIT"
] | null | null | null | script.py | othyn/url-shortcut-organiser | 5b5b32269b93e86db8e5eaa42890190345596f98 | [
"MIT"
] | null | null | null | # As this is my first excursion into Python, this script is heavily (OTT) documented
"""Simple script to sort out my internet shortcut archive.
This will eventually lead to *smrt* sorting of url's into categories/types for easy reference.
Stores data into a JSON format."""
# Docstring, required for C0111
# Provides help information about the scripts purpose
# Multiple lines as per C0301 (100 char limit per line)
import os
import argparse
import glob
import json
import time
#import pprint
# OS for filesystem
# Argparse for easy management of script arguments
# Glob for dealing with path names
# JSON for data manipulation/storage
# Time for filename
# pprint for similar behavior of PHP's var_dump, pprint.pprint(VAR), helps with debugging
# Spread across multiple lines as per C0410
START_TIME = time.time()
# Script start time
DIR_TO_SEARCH = ''
# For storing the directory argument in
REMOVE_FILES_ONCE_PROCESSED = False
# For storing whether the user would like to remove the files once processed
OLD_URLS_JSON_DIR = ''
# For storing the location of the old JSON data
NEW_URLS = {}
# Used for new (to be written) urls [JSON]
TIMESTAMP = int(time.time())
# Timestamp for filename
PARSER = argparse.ArgumentParser(description='A directory is required to scan for *.url files.')
# Creates a new argparse module, setting the description for the scripts arguments
PARSER.add_argument('-d', '--directory', help='Directory to search', required=True)
# For a directory to run the script on
PARSER.add_argument('-r', '--remove', help='Delete the files as they are processed.',
required=False, action='store_true')
# If the user would like to remove the file once it is processed
PARSER.add_argument('-j', '--json',
help='Instead of creating a new output file, add to an existing JSON data set.',
required=False)
# Ask for a directory to run the script on
ARGS = PARSER.parse_args()
# Get the argument values
DIR_TO_SEARCH = ARGS.directory
# Set the directory to search
REMOVE_FILES_ONCE_PROCESSED = ARGS.remove
# Set the users preference on file deletion
OLD_URLS_JSON_DIR = ARGS.json
# Set the value of the JSON dir
#pprint.pprint(DIR_TO_SEARCH)
try:
if not OLD_URLS_JSON_DIR is None:
if not os.path.isfile(OLD_URLS_JSON_DIR):
raise ValueError('Error: The specified JSON file does not exist')
#with open(OLD_URLS_JSON_DIR, 'r') as file_input:
#NEW_URLS = json.load(file_input)
# Parse the old urls
# I currently hate unicode, I'm doing something wrong...
except ValueError, error:
exit(str(error))
# Check whether the passed JSON argument file exists, if the argument is supplied
# If it does, load it
try:
if not os.path.isdir(DIR_TO_SEARCH):
raise ValueError('Error: The path specified is not a directory')
except ValueError, error:
exit(str(error))
# Check whether the passed directory argument exists and is infact a directory
os.chdir(DIR_TO_SEARCH)
# Set the directory to search to the users specified directory
for current_file in glob.glob("*.url"):
if not os.path.isfile(current_file):
continue
# At this moment, no easy solution for dealing with filenames containing unicode
# So, run a check to see if the file exists, if not, its probably unicode
with open(current_file, "r") as infile:
for line in infile:
if line.startswith('URL'):
NEW_URLS[current_file] = line[4:].strip('\n')
break
# Get the URL from the current file and add it into the dict
if REMOVE_FILES_ONCE_PROCESSED:
os.remove(current_file)
# Iterate through each file that ends in url
# Check the file exists
# If it does, add the filename and url to the dict
# Remove the file if the user has specified to do so
with open('url_data_'+str(TIMESTAMP)+'.json', 'w') as file_output:
json.dump(NEW_URLS, file_output, sort_keys=True, indent=4, ensure_ascii=False)
# Create a new JSON file in the current directory and dump the processed dict
print "Completed in: %s seconds" % (time.time() - START_TIME)
| 33.451613 | 100 | 0.72107 |
6fd7dcd0093203c85f6e82a35169ca4d8fc18d96 | 49,953 | py | Python | src/third_party/wiredtiger/dist/api_data.py | stevelyall/mongol-db | d8046147bfe806f7acc0ec4aa70c132507b761fb | [
"Apache-2.0"
] | 1 | 2018-03-16T09:49:05.000Z | 2018-03-16T09:49:05.000Z | src/third_party/wiredtiger/dist/api_data.py | stevelyall/mongol-db | d8046147bfe806f7acc0ec4aa70c132507b761fb | [
"Apache-2.0"
] | null | null | null | src/third_party/wiredtiger/dist/api_data.py | stevelyall/mongol-db | d8046147bfe806f7acc0ec4aa70c132507b761fb | [
"Apache-2.0"
] | null | null | null | # This file is a python script that describes the WiredTiger API.
class Method:
def __init__(self, config):
self.config = config
class Config:
def __init__(self, name, default, desc, subconfig=None, **flags):
self.name = name
self.default = default
self.desc = desc
self.subconfig = subconfig
self.flags = flags
def __cmp__(self, other):
return cmp(self.name, other.name)
# Metadata shared by all schema objects
common_meta = [
Config('app_metadata', '', r'''
application-owned metadata for this object'''),
Config('collator', 'none', r'''
configure custom collation for keys. Permitted values are \c "none"
or a custom collator name created with WT_CONNECTION::add_collator'''),
Config('columns', '', r'''
list of the column names. Comma-separated list of the form
<code>(column[,...])</code>. For tables, the number of entries
must match the total number of values in \c key_format and \c
value_format. For colgroups and indices, all column names must
appear in the list of columns for the table''',
type='list'),
]
source_meta = [
Config('source', '', r'''
set a custom data source URI for a column group, index or simple
table. By default, the data source URI is derived from the \c
type and the column group or index name. Applications can
create tables from existing data sources by supplying a \c
source configuration''', undoc=True),
Config('type', 'file', r'''
set the type of data source used to store a column group, index
or simple table. By default, a \c "file:" URI is derived from
the object name. The \c type configuration can be used to
switch to a different data source, such as LSM or an extension
configured by the application'''),
]
format_meta = common_meta + [
Config('key_format', 'u', r'''
the format of the data packed into key items. See @ref
schema_format_types for details. By default, the key_format is
\c 'u' and applications use WT_ITEM structures to manipulate
raw byte arrays. By default, records are stored in row-store
files: keys of type \c 'r' are record numbers and records
referenced by record number are stored in column-store files''',
type='format', func='__wt_struct_confchk'),
Config('value_format', 'u', r'''
the format of the data packed into value items. See @ref
schema_format_types for details. By default, the value_format
is \c 'u' and applications use a WT_ITEM structure to
manipulate raw byte arrays. Value items of type 't' are
bitfields, and when configured with record number type keys,
will be stored using a fixed-length store''',
type='format', func='__wt_struct_confchk'),
]
lsm_config = [
Config('lsm', '', r'''
options only relevant for LSM data sources''',
type='category', subconfig=[
Config('auto_throttle', 'true', r'''
Throttle inserts into LSM trees if flushing to disk isn't
keeping up''',
type='boolean'),
Config('bloom', 'true', r'''
create bloom filters on LSM tree chunks as they are merged''',
type='boolean'),
Config('bloom_config', '', r'''
config string used when creating Bloom filter files, passed
to WT_SESSION::create'''),
Config('bloom_bit_count', '16', r'''
the number of bits used per item for LSM bloom filters''',
min='2', max='1000'),
Config('bloom_hash_count', '8', r'''
the number of hash values per item used for LSM bloom
filters''',
min='2', max='100'),
Config('bloom_oldest', 'false', r'''
create a bloom filter on the oldest LSM tree chunk. Only
supported if bloom filters are enabled''',
type='boolean'),
Config('chunk_count_limit', '0', r'''
the maximum number of chunks to allow in an LSM tree. This
option automatically times out old data. As new chunks are
added old chunks will be removed. Enabling this option
disables LSM background merges''',
type='int'),
Config('chunk_max', '5GB', r'''
the maximum size a single chunk can be. Chunks larger than this
size are not considered for further merges. This is a soft
limit, and chunks larger than this value can be created. Must
be larger than chunk_size''',
min='100MB', max='10TB'),
Config('chunk_size', '10MB', r'''
the maximum size of the in-memory chunk of an LSM tree. This
limit is soft - it is possible for chunks to be temporarily
larger than this value. This overrides the \c memory_page_max
setting''',
min='512K', max='500MB'),
Config('merge_max', '15', r'''
the maximum number of chunks to include in a merge operation''',
min='2', max='100'),
Config('merge_min', '0', r'''
the minimum number of chunks to include in a merge operation. If
set to 0 or 1 half the value of merge_max is used''',
max='100'),
]),
]
# Per-file configuration
file_config = format_meta + [
Config('block_allocation', 'best', r'''
configure block allocation. Permitted values are \c "first" or
\c "best"; the \c "first" configuration uses a first-available
algorithm during block allocation, the \c "best" configuration
uses a best-fit algorithm''',
choices=['first', 'best',]),
Config('allocation_size', '4KB', r'''
the file unit allocation size, in bytes, must a power-of-two;
smaller values decrease the file space required by overflow
items, and the default value of 4KB is a good choice absent
requirements from the operating system or storage device''',
min='512B', max='128MB'),
Config('block_compressor', 'none', r'''
configure a compressor for file blocks. Permitted values are \c "none"
or custom compression engine name created with
WT_CONNECTION::add_compressor. If WiredTiger has builtin support for
\c "bzip2", \c "snappy", \c "lz4" or \c "zlib" compression, these names
are also available. See @ref compression for more information'''),
Config('cache_resident', 'false', r'''
do not ever evict the object's pages from cache. Not compatible with
LSM tables; see @ref tuning_cache_resident for more information''',
type='boolean'),
Config('checksum', 'uncompressed', r'''
configure block checksums; permitted values are <code>on</code>
(checksum all blocks), <code>off</code> (checksum no blocks) and
<code>uncompresssed</code> (checksum only blocks which are not
compressed for any reason). The \c uncompressed setting is for
applications which can rely on decompression to fail if a block
has been corrupted''',
choices=['on', 'off', 'uncompressed']),
Config('dictionary', '0', r'''
the maximum number of unique values remembered in the Btree
row-store leaf page value dictionary; see
@ref file_formats_compression for more information''',
min='0'),
Config('encryption', '', r'''
configure an encryptor for file blocks. When a table is created,
its encryptor is not implicitly used for any related indices
or column groups''',
type='category', subconfig=[
Config('name', 'none', r'''
Permitted values are \c "none"
or custom encryption engine name created with
WT_CONNECTION::add_encryptor.
See @ref encryption for more information'''),
Config('keyid', '', r'''
An identifier that identifies a unique instance of the encryptor.
It is stored in clear text, and thus is available when
the wiredtiger database is reopened. On the first use
of a (name, keyid) combination, the WT_ENCRYPTOR::customize
function is called with the keyid as an argument.'''),
]),
Config('format', 'btree', r'''
the file format''',
choices=['btree']),
Config('huffman_key', 'none', r'''
configure Huffman encoding for keys. Permitted values are
\c "none", \c "english", \c "utf8<file>" or \c "utf16<file>".
See @ref huffman for more information'''),
Config('huffman_value', 'none', r'''
configure Huffman encoding for values. Permitted values are
\c "none", \c "english", \c "utf8<file>" or \c "utf16<file>".
See @ref huffman for more information'''),
Config('internal_key_truncate', 'true', r'''
configure internal key truncation, discarding unnecessary
trailing bytes on internal keys (ignored for custom
collators)''',
type='boolean'),
Config('internal_page_max', '4KB', r'''
the maximum page size for internal nodes, in bytes; the size
must be a multiple of the allocation size and is significant
for applications wanting to avoid excessive L2 cache misses
while searching the tree. The page maximum is the bytes of
uncompressed data, that is, the limit is applied before any
block compression is done''',
min='512B', max='512MB'),
Config('internal_item_max', '0', r'''
historic term for internal_key_max''',
min=0, undoc=True),
Config('internal_key_max', '0', r'''
the largest key stored in an internal node, in bytes. If set, keys
larger than the specified size are stored as overflow items (which
may require additional I/O to access). The default and the maximum
allowed value are both one-tenth the size of a newly split internal
page''',
min='0'),
Config('key_gap', '10', r'''
the maximum gap between instantiated keys in a Btree leaf page,
constraining the number of keys processed to instantiate a
random Btree leaf page key''',
min='0', undoc=True),
Config('leaf_key_max', '0', r'''
the largest key stored in a leaf node, in bytes. If set, keys
larger than the specified size are stored as overflow items (which
may require additional I/O to access). The default value is
one-tenth the size of a newly split leaf page''',
min='0'),
Config('leaf_page_max', '32KB', r'''
the maximum page size for leaf nodes, in bytes; the size must
be a multiple of the allocation size, and is significant for
applications wanting to maximize sequential data transfer from
a storage device. The page maximum is the bytes of uncompressed
data, that is, the limit is applied before any block compression
is done''',
min='512B', max='512MB'),
Config('leaf_value_max', '0', r'''
the largest value stored in a leaf node, in bytes. If set, values
larger than the specified size are stored as overflow items (which
may require additional I/O to access). If the size is larger than
the maximum leaf page size, the page size is temporarily ignored
when large values are written. The default is one-half the size of
a newly split leaf page''',
min='0'),
Config('leaf_item_max', '0', r'''
historic term for leaf_key_max and leaf_value_max''',
min=0, undoc=True),
Config('log', '', r'''
the transaction log configuration for this object. Only valid if
log is enabled in ::wiredtiger_open.''',
type='category', subconfig=[
Config('enabled', 'true', r'''
if false, this object has checkpoint-level durability.''',
type='boolean'),
]),
Config('memory_page_max', '5MB', r'''
the maximum size a page can grow to in memory before being
reconciled to disk. The specified size will be adjusted to a lower
bound of <code>50 * leaf_page_max</code>, and an upper bound of
<code>cache_size / 2</code>. This limit is soft - it is possible
for pages to be temporarily larger than this value. This setting
is ignored for LSM trees, see \c chunk_size''',
min='512B', max='10TB'),
Config('os_cache_max', '0', r'''
maximum system buffer cache usage, in bytes. If non-zero, evict
object blocks from the system buffer cache after that many bytes
from this object are read or written into the buffer cache''',
min=0),
Config('os_cache_dirty_max', '0', r'''
maximum dirty system buffer cache usage, in bytes. If non-zero,
schedule writes for dirty blocks belonging to this object in the
system buffer cache after that many bytes from this object are
written into the buffer cache''',
min=0),
Config('prefix_compression', 'false', r'''
configure prefix compression on row-store leaf pages''',
type='boolean'),
Config('prefix_compression_min', '4', r'''
minimum gain before prefix compression will be used on row-store
leaf pages''',
min=0),
Config('split_deepen_min_child', '0', r'''
minimum entries in a page to consider deepening the tree. Pages
will be considered for splitting and deepening the search tree
as soon as there are more than the configured number of children
''',
type='int', undoc=True),
Config('split_deepen_per_child', '0', r'''
entries allocated per child when deepening the tree''',
type='int', undoc=True),
Config('split_pct', '75', r'''
the Btree page split size as a percentage of the maximum Btree
page size, that is, when a Btree page is split, it will be
split into smaller pages, where each page is the specified
percentage of the maximum Btree page size''',
min='25', max='100'),
]
# File metadata, including both configurable and non-configurable (internal)
file_meta = file_config + [
Config('checkpoint', '', r'''
the file checkpoint entries'''),
Config('checkpoint_lsn', '', r'''
LSN of the last checkpoint'''),
Config('id', '', r'''
the file's ID number'''),
Config('version', '(major=0,minor=0)', r'''
the file version'''),
]
table_only_config = [
Config('colgroups', '', r'''
comma-separated list of names of column groups. Each column
group is stored separately, keyed by the primary key of the
table. If no column groups are specified, all columns are
stored together in a single file. All value columns in the
table must appear in at least one column group. Each column
group must be created with a separate call to
WT_SESSION::create''', type='list'),
]
index_only_config = [
Config('extractor', 'none', r'''
configure custom extractor for indices. Permitted values are
\c "none" or an extractor name created with
WT_CONNECTION::add_extractor'''),
Config('immutable', 'false', r'''
configure the index to be immutable - that is an index is not changed
by any update to a record in the table''', type='boolean'),
]
colgroup_meta = common_meta + source_meta
index_meta = format_meta + source_meta + index_only_config + [
Config('index_key_columns', '', r'''
number of public key columns''', type='int', undoc=True),
]
table_meta = format_meta + table_only_config
# Connection runtime config, shared by conn.reconfigure and wiredtiger_open
connection_runtime_config = [
Config('async', '', r'''
asynchronous operations configuration options''',
type='category', subconfig=[
Config('enabled', 'false', r'''
enable asynchronous operation''',
type='boolean'),
Config('ops_max', '1024', r'''
maximum number of expected simultaneous asynchronous
operations''', min='1', max='4096'),
Config('threads', '2', r'''
the number of worker threads to service asynchronous requests.
Each worker thread uses a session from the configured
session_max.''',
min='1', max='20'), # !!! Must match WT_ASYNC_MAX_WORKERS
]),
Config('cache_size', '100MB', r'''
maximum heap memory to allocate for the cache. A database should
configure either \c cache_size or \c shared_cache but not both''',
min='1MB', max='10TB'),
Config('cache_overhead', '8', r'''
assume the heap allocator overhead is the specified percentage, and
adjust the cache usage by that amount (for example, if there is 10GB
of data in cache, a percentage of 10 means WiredTiger treats this as
11GB). This value is configurable because different heap allocators
have different overhead and different workloads will have different
heap allocation sizes and patterns, therefore applications may need to
adjust this value based on allocator choice and behavior in measured
workloads''',
min='0', max='30'),
Config('checkpoint', '', r'''
periodically checkpoint the database. Enabling the checkpoint server
uses a session from the configured session_max''',
type='category', subconfig=[
Config('name', '"WiredTigerCheckpoint"', r'''
the checkpoint name'''),
Config('log_size', '0', r'''
wait for this amount of log record bytes to be written to
the log between each checkpoint. A database can configure
both log_size and wait to set an upper bound for checkpoints;
setting this value above 0 configures periodic checkpoints''',
min='0', max='2GB'),
Config('wait', '0', r'''
seconds to wait between each checkpoint; setting this value
above 0 configures periodic checkpoints''',
min='0', max='100000'),
]),
Config('error_prefix', '', r'''
prefix string for error messages'''),
Config('eviction_dirty_target', '80', r'''
continue evicting until the cache has less dirty memory than the
value, as a percentage of the total cache size. Dirty pages will
only be evicted if the cache is full enough to trigger eviction''',
min=5, max=99),
Config('eviction_dirty_trigger', '95', r'''
trigger eviction when the cache is using this much memory for dirty
content, as a percentage of the total cache size. This setting only
alters behavior if it is lower than eviction_trigger''',
min=5, max=99),
Config('eviction_target', '80', r'''
continue evicting until the cache has less total memory than the
value, as a percentage of the total cache size. Must be less than
\c eviction_trigger''',
min=10, max=99),
Config('eviction_trigger', '95', r'''
trigger eviction when the cache is using this much memory, as a
percentage of the total cache size''', min=10, max=99),
Config('file_manager', '', r'''
control how file handles are managed''',
type='category', subconfig=[
Config('close_handle_minimum', '250', r'''
number of handles open before the file manager will look for handles
to close''', min=0),
Config('close_idle_time', '30', r'''
amount of time in seconds a file handle needs to be idle
before attempting to close it. A setting of 0 means that idle
handles are not closed''', min=0, max=100000),
Config('close_scan_interval', '10', r'''
interval in seconds at which to check for files that are
inactive and close them''', min=1, max=100000),
]),
Config('log', '', r'''
enable logging. Enabling logging uses three sessions from the
configured session_max''',
type='category', subconfig=[
Config('archive', 'true', r'''
automatically archive unneeded log files''',
type='boolean'),
Config('compressor', 'none', r'''
configure a compressor for log records. Permitted values are
\c "none" or custom compression engine name created with
WT_CONNECTION::add_compressor. If WiredTiger has builtin support
for \c "bzip2", \c "snappy", \c "lz4" or \c "zlib" compression,
these names are also available. See @ref compression for more
information'''),
Config('enabled', 'false', r'''
enable logging subsystem''',
type='boolean'),
Config('file_max', '100MB', r'''
the maximum size of log files''',
min='100KB', max='2GB'),
Config('path', '', r'''
the path to a directory into which the log files are written.
If the value is not an absolute path name, the files are created
relative to the database home'''),
Config('prealloc', 'true', r'''
pre-allocate log files.''',
type='boolean'),
Config('recover', 'on', r'''
run recovery or error if recovery needs to run after an
unclean shutdown.''',
choices=['error','on']),
Config('zero_fill', 'false', r'''
manually write zeroes into log files''',
type='boolean'),
]),
Config('lsm_manager', '', r'''
configure database wide options for LSM tree management. The LSM
manager is started automatically the first time an LSM tree is opened.
The LSM manager uses a session from the configured session_max.''',
type='category', subconfig=[
Config('worker_thread_max', '4', r'''
Configure a set of threads to manage merging LSM trees in
the database. Each worker thread uses a session handle from
the configured session_max''',
min='3', # !!! Must match WT_LSM_MIN_WORKERS
max='20'), # !!! Must match WT_LSM_MAX_WORKERS
Config('merge', 'true', r'''
merge LSM chunks where possible''',
type='boolean')
]),
Config('lsm_merge', 'true', r'''
merge LSM chunks where possible (deprecated)''',
type='boolean', undoc=True),
Config('eviction', '', r'''
eviction configuration options.''',
type='category', subconfig=[
Config('threads_max', '1', r'''
maximum number of threads WiredTiger will start to help evict
pages from cache. The number of threads started will vary
depending on the current eviction load. Each eviction worker
thread uses a session from the configured session_max''',
min=1, max=20),
Config('threads_min', '1', r'''
minimum number of threads WiredTiger will start to help evict
pages from cache. The number of threads currently running will
vary depending on the current eviction load''',
min=1, max=20),
]),
Config('shared_cache', '', r'''
shared cache configuration options. A database should configure
either a cache_size or a shared_cache not both. Enabling a
shared cache uses a session from the configured session_max''',
type='category', subconfig=[
Config('chunk', '10MB', r'''
the granularity that a shared cache is redistributed''',
min='1MB', max='10TB'),
Config('name', 'none', r'''
the name of a cache that is shared between databases or
\c "none" when no shared cache is configured'''),
Config('quota', '0', r'''
maximum size of cache this database can be allocated from the
shared cache. Defaults to the entire shared cache size''',
type='int'),
Config('reserve', '0', r'''
amount of cache this database is guaranteed to have
available from the shared cache. This setting is per
database. Defaults to the chunk size''', type='int'),
Config('size', '500MB', r'''
maximum memory to allocate for the shared cache. Setting
this will update the value if one is already set''',
min='1MB', max='10TB')
]),
Config('statistics', 'none', r'''
Maintain database statistics, which may impact performance.
Choosing "all" maintains all statistics regardless of cost,
"fast" maintains a subset of statistics that are relatively
inexpensive, "none" turns off all statistics. The "clear"
configuration resets statistics after they are gathered,
where appropriate (for example, a cache size statistic is
not cleared, while the count of cursor insert operations will
be cleared). When "clear" is configured for the database,
gathered statistics are reset each time a statistics cursor
is used to gather statistics, as well as each time statistics
are logged using the \c statistics_log configuration. See
@ref statistics for more information''',
type='list', choices=['all', 'fast', 'none', 'clear']),
Config('statistics_log', '', r'''
log any statistics the database is configured to maintain,
to a file. See @ref statistics for more information. Enabling
the statistics log server uses a session from the configured
session_max''',
type='category', subconfig=[
Config('on_close', 'false', r'''log statistics on database close''',
type='boolean'),
Config('path', '"WiredTigerStat.%d.%H"', r'''
the pathname to a file into which the log records are written,
may contain ISO C standard strftime conversion specifications.
If the value is not an absolute path name, the file is created
relative to the database home'''),
Config('sources', '', r'''
if non-empty, include statistics for the list of data source
URIs, if they are open at the time of the statistics logging.
The list may include URIs matching a single data source
("table:mytable"), or a URI matching all data sources of a
particular type ("table:")''',
type='list'),
Config('timestamp', '"%b %d %H:%M:%S"', r'''
a timestamp prepended to each log record, may contain strftime
conversion specifications'''),
Config('wait', '0', r'''
seconds to wait between each write of the log records; setting
this value above 0 configures statistics logging''',
min='0', max='100000'),
]),
Config('verbose', '', r'''
enable messages for various events. Only available if WiredTiger
is configured with --enable-verbose. Options are given as a
list, such as <code>"verbose=[evictserver,read]"</code>''',
type='list', choices=[
'api',
'block',
'checkpoint',
'compact',
'evict',
'evictserver',
'fileops',
'log',
'lsm',
'lsm_manager',
'metadata',
'mutex',
'overflow',
'read',
'reconcile',
'recovery',
'salvage',
'shared_cache',
'split',
'temporary',
'transaction',
'verify',
'version',
'write']),
]
session_config = [
Config('isolation', 'read-committed', r'''
the default isolation level for operations in this session''',
choices=['read-uncommitted', 'read-committed', 'snapshot']),
]
common_wiredtiger_open = [
Config('buffer_alignment', '-1', r'''
in-memory alignment (in bytes) for buffers used for I/O. The
default value of -1 indicates a platform-specific alignment value
should be used (4KB on Linux systems when direct I/O is configured,
zero elsewhere)''',
min='-1', max='1MB'),
Config('checkpoint_sync', 'true', r'''
flush files to stable storage when closing or writing
checkpoints''',
type='boolean'),
Config('direct_io', '', r'''
Use \c O_DIRECT to access files. Options are given as a list,
such as <code>"direct_io=[data]"</code>. Configuring
\c direct_io requires care, see @ref
tuning_system_buffer_cache_direct_io for important warnings.
Including \c "data" will cause WiredTiger data files to use
\c O_DIRECT, including \c "log" will cause WiredTiger log files
to use \c O_DIRECT, and including \c "checkpoint" will cause
WiredTiger data files opened at a checkpoint (i.e: read only) to
use \c O_DIRECT''',
type='list', choices=['checkpoint', 'data', 'log']),
Config('encryption', '', r'''
configure an encryptor for system wide metadata and logs.
If a system wide encryptor is set, it is also used for
encrypting data files and tables, unless encryption configuration
is explicitly set for them when they are created with
WT_SESSION::create''',
type='category', subconfig=[
Config('name', 'none', r'''
Permitted values are \c "none"
or custom encryption engine name created with
WT_CONNECTION::add_encryptor.
See @ref encryption for more information'''),
Config('keyid', '', r'''
An identifier that identifies a unique instance of the encryptor.
It is stored in clear text, and thus is available when
the wiredtiger database is reopened. On the first use
of a (name, keyid) combination, the WT_ENCRYPTOR::customize
function is called with the keyid as an argument.'''),
Config('secretkey', '', r'''
A string that is passed to the WT_ENCRYPTOR::customize function.
It is never stored in clear text, so must be given to any
subsequent ::wiredtiger_open calls to reopen the database.
It must also be provided to any "wt" commands used with
this database.'''),
]),
Config('extensions', '', r'''
list of shared library extensions to load (using dlopen).
Any values specified to an library extension are passed to
WT_CONNECTION::load_extension as the \c config parameter
(for example,
<code>extensions=(/path/ext.so={entry=my_entry})</code>)''',
type='list'),
Config('file_extend', '', r'''
file extension configuration. If set, extend files of the set
type in allocations of the set size, instead of a block at a
time as each new block is written. For example,
<code>file_extend=(data=16MB)</code>''',
type='list', choices=['data', 'log']),
Config('hazard_max', '1000', r'''
maximum number of simultaneous hazard pointers per session
handle''',
min='15'),
Config('mmap', 'true', r'''
Use memory mapping to access files when possible''',
type='boolean'),
Config('multiprocess', 'false', r'''
permit sharing between processes (will automatically start an
RPC server for primary processes and use RPC for secondary
processes). <b>Not yet supported in WiredTiger</b>''',
type='boolean'),
Config('session_max', '100', r'''
maximum expected number of sessions (including server
threads)''',
min='1'),
Config('session_scratch_max', '2MB', r'''
maximum memory to cache in each session''',
type='int', undoc=True),
Config('transaction_sync', '', r'''
how to sync log records when the transaction commits''',
type='category', subconfig=[
Config('enabled', 'false', r'''
whether to sync the log on every commit by default, can be
overridden by the \c sync setting to
WT_SESSION::commit_transaction''',
type='boolean'),
Config('method', 'fsync', r'''
the method used to ensure log records are stable on disk, see
@ref tune_durability for more information''',
choices=['dsync', 'fsync', 'none']),
]),
]
cursor_runtime_config = [
Config('append', 'false', r'''
append the value as a new record, creating a new record
number key; valid only for cursors with record number keys''',
type='boolean'),
Config('overwrite', 'true', r'''
configures whether the cursor's insert, update and remove
methods check the existing state of the record. If \c overwrite
is \c false, WT_CURSOR::insert fails with ::WT_DUPLICATE_KEY
if the record exists, WT_CURSOR::update and WT_CURSOR::remove
fail with ::WT_NOTFOUND if the record does not exist''',
type='boolean'),
]
methods = {
'file.meta' : Method(file_meta),
'colgroup.meta' : Method(colgroup_meta),
'index.meta' : Method(index_meta),
'table.meta' : Method(table_meta),
'WT_CURSOR.close' : Method([]),
'WT_CURSOR.reconfigure' : Method(cursor_runtime_config),
'WT_SESSION.close' : Method([]),
'WT_SESSION.compact' : Method([
Config('timeout', '1200', r'''
maximum amount of time to allow for compact in seconds. The
actual amount of time spent in compact may exceed the configured
value. A value of zero disables the timeout''',
type='int'),
]),
'WT_SESSION.create' : Method(file_config + lsm_config + source_meta +
index_only_config + table_only_config + [
Config('exclusive', 'false', r'''
fail if the object exists. When false (the default), if the
object exists, check that its settings match the specified
configuration''',
type='boolean'),
]),
'WT_SESSION.drop' : Method([
Config('force', 'false', r'''
return success if the object does not exist''',
type='boolean'),
Config('remove_files', 'true', r'''
should the underlying files be removed?''',
type='boolean'),
]),
'WT_SESSION.log_flush' : Method([
Config('sync', 'on', r'''
forcibly flush the log and wait for it to achieve the synchronization
level specified. The \c background setting initiates a background
synchronization intended to be used with a later call to
WT_SESSION::transaction_sync. The \c off setting forces any
buffered log records to be written to the file system. The
\c on setting forces log records to be written to the storage device''',
choices=['background', 'off', 'on']),
]),
'WT_SESSION.log_printf' : Method([]),
'WT_SESSION.open_cursor' : Method(cursor_runtime_config + [
Config('bulk', 'false', r'''
configure the cursor for bulk-loading, a fast, initial load
path (see @ref tune_bulk_load for more information). Bulk-load
may only be used for newly created objects and cursors
configured for bulk-load only support the WT_CURSOR::insert
and WT_CURSOR::close methods. When bulk-loading row-store
objects, keys must be loaded in sorted order. The value is
usually a true/false flag; when bulk-loading fixed-length
column store objects, the special value \c bitmap allows
chunks of a memory resident bitmap to be loaded directly into
a file by passing a \c WT_ITEM to WT_CURSOR::set_value where
the \c size field indicates the number of records in the
bitmap (as specified by the object's \c value_format
configuration). Bulk-loaded bitmap values must end on a byte
boundary relative to the bit count (except for the last set
of values loaded)'''),
Config('checkpoint', '', r'''
the name of a checkpoint to open (the reserved name
"WiredTigerCheckpoint" opens the most recent internal
checkpoint taken for the object). The cursor does not
support data modification'''),
Config('dump', '', r'''
configure the cursor for dump format inputs and outputs: "hex"
selects a simple hexadecimal format, "json" selects a JSON format
with each record formatted as fields named by column names if
available, and "print" selects a format where only non-printing
characters are hexadecimal encoded. These formats are compatible
with the @ref util_dump and @ref util_load commands''',
choices=['hex', 'json', 'print']),
Config('next_random', 'false', r'''
configure the cursor to return a pseudo-random record from
the object; valid only for row-store cursors. Cursors
configured with \c next_random=true only support the
WT_CURSOR::next and WT_CURSOR::close methods. See @ref
cursor_random for details''',
type='boolean'),
Config('raw', 'false', r'''
ignore the encodings for the key and value, manage data as if
the formats were \c "u". See @ref cursor_raw for details''',
type='boolean'),
Config('readonly', 'false', r'''
only query operations are supported by this cursor. An error is
returned if a modification is attempted using the cursor. The
default is false for all cursor types except for log and metadata
cursors''',
type='boolean'),
Config('skip_sort_check', 'false', r'''
skip the check of the sort order of each bulk-loaded key''',
type='boolean', undoc=True),
Config('statistics', '', r'''
Specify the statistics to be gathered. Choosing "all" gathers
statistics regardless of cost and may include traversing on-disk files;
"fast" gathers a subset of relatively inexpensive statistics. The
selection must agree with the database \c statistics configuration
specified to ::wiredtiger_open or WT_CONNECTION::reconfigure. For
example, "all" or "fast" can be configured when the database is
configured with "all", but the cursor open will fail if "all" is
specified when the database is configured with "fast", and the cursor
open will fail in all cases when the database is configured with
"none". If "size" is configured, only the underlying size of the
object on disk is filled in and the object is not opened. If \c
statistics is not configured, the default configuration is the database
configuration. The "clear" configuration resets statistics after
gathering them, where appropriate (for example, a cache size statistic
is not cleared, while the count of cursor insert operations will be
cleared). See @ref statistics for more information''',
type='list', choices=['all', 'fast', 'clear', 'size']),
Config('target', '', r'''
if non-empty, backup the list of objects; valid only for a
backup data source''',
type='list'),
]),
'WT_SESSION.rename' : Method([]),
'WT_SESSION.reset' : Method([]),
'WT_SESSION.salvage' : Method([
Config('force', 'false', r'''
force salvage even of files that do not appear to be WiredTiger
files''',
type='boolean'),
]),
'WT_SESSION.strerror' : Method([]),
'WT_SESSION.transaction_sync' : Method([
Config('timeout_ms', '1200000', r'''
maximum amount of time to wait for background sync to complete in
milliseconds. A value of zero disables the timeout and returns
immediately.''',
type='int'),
]),
'WT_SESSION.truncate' : Method([]),
'WT_SESSION.upgrade' : Method([]),
'WT_SESSION.verify' : Method([
Config('dump_address', 'false', r'''
Display addresses and page types as pages are verified,
using the application's message handler, intended for debugging''',
type='boolean'),
Config('dump_blocks', 'false', r'''
Display the contents of on-disk blocks as they are verified,
using the application's message handler, intended for debugging''',
type='boolean'),
Config('dump_offsets', '', r'''
Display the contents of specific on-disk blocks,
using the application's message handler, intended for debugging''',
type='list'),
Config('dump_pages', 'false', r'''
Display the contents of in-memory pages as they are verified,
using the application's message handler, intended for debugging''',
type='boolean'),
Config('dump_shape', 'false', r'''
Display the shape of the tree after verification,
using the application's message handler, intended for debugging''',
type='boolean'),
Config('strict', 'false', r'''
Treat any verification problem as an error; by default, verify will
warn, but not fail, in the case of errors that won't affect future
behavior (for example, a leaked block)''',
type='boolean')
]),
'WT_SESSION.begin_transaction' : Method([
Config('isolation', '', r'''
the isolation level for this transaction; defaults to the
session's isolation level''',
choices=['read-uncommitted', 'read-committed', 'snapshot']),
Config('name', '', r'''
name of the transaction for tracing and debugging'''),
Config('priority', 0, r'''
priority of the transaction for resolving conflicts.
Transactions with higher values are less likely to abort''',
min='-100', max='100'),
Config('snapshot', '', r'''
use a named, in-memory snapshot, see
@ref transaction_named_snapshots'''),
Config('sync', '', r'''
whether to sync log records when the transaction commits,
inherited from ::wiredtiger_open \c transaction_sync''',
type='boolean'),
]),
'WT_SESSION.commit_transaction' : Method([
Config('sync', '', r'''
override whether to sync log records when the transaction commits,
inherited from ::wiredtiger_open \c transaction_sync.
The \c background setting initiates a background
synchronization intended to be used with a later call to
WT_SESSION::transaction_sync. The \c off setting does not
wait for record to be written or synchronized. The
\c on setting forces log records to be written to the storage device''',
choices=['background', 'off', 'on']),
]),
'WT_SESSION.rollback_transaction' : Method([]),
'WT_SESSION.checkpoint' : Method([
Config('drop', '', r'''
specify a list of checkpoints to drop.
The list may additionally contain one of the following keys:
\c "from=all" to drop all checkpoints,
\c "from=<checkpoint>" to drop all checkpoints after and
including the named checkpoint, or
\c "to=<checkpoint>" to drop all checkpoints before and
including the named checkpoint. Checkpoints cannot be
dropped while a hot backup is in progress or if open in
a cursor''', type='list'),
Config('force', 'false', r'''
by default, checkpoints may be skipped if the underlying object
has not been modified, this option forces the checkpoint''',
type='boolean'),
Config('name', '', r'''
if set, specify a name for the checkpoint (note that checkpoints
including LSM trees may not be named)'''),
Config('target', '', r'''
if non-empty, checkpoint the list of objects''', type='list'),
]),
'WT_SESSION.snapshot' : Method([
Config('drop', '', r'''
if non-empty, specifies which snapshots to drop. Where a group
of snapshots are being dropped, the order is based on snapshot
creation order not alphanumeric name order''',
type='category', subconfig=[
Config('all', 'false', r'''
drop all named snapshots''', type='boolean'),
Config('before', '', r'''
drop all snapshots up to but not including the specified name'''),
Config('names', '', r'''
drop specific named snapshots''', type='list'),
Config('to', '', r'''
drop all snapshots up to and including the specified name.'''),
]),
Config('name', '', r'''specify a name for the snapshot'''),
]),
'WT_CONNECTION.add_collator' : Method([]),
'WT_CONNECTION.add_compressor' : Method([]),
'WT_CONNECTION.add_data_source' : Method([]),
'WT_CONNECTION.add_encryptor' : Method([]),
'WT_CONNECTION.add_extractor' : Method([]),
'WT_CONNECTION.async_new_op' : Method([
Config('append', 'false', r'''
append the value as a new record, creating a new record
number key; valid only for operations with record number keys''',
type='boolean'),
Config('overwrite', 'true', r'''
configures whether the cursor's insert, update and remove
methods check the existing state of the record. If \c overwrite
is \c false, WT_CURSOR::insert fails with ::WT_DUPLICATE_KEY
if the record exists, WT_CURSOR::update and WT_CURSOR::remove
fail with ::WT_NOTFOUND if the record does not exist''',
type='boolean'),
Config('raw', 'false', r'''
ignore the encodings for the key and value, manage data as if
the formats were \c "u". See @ref cursor_raw for details''',
type='boolean'),
Config('timeout', '1200', r'''
maximum amount of time to allow for compact in seconds. The
actual amount of time spent in compact may exceed the configured
value. A value of zero disables the timeout''',
type='int'),
]),
'WT_CONNECTION.close' : Method([
Config('leak_memory', 'false', r'''
don't free memory during close''',
type='boolean'),
]),
'WT_CONNECTION.reconfigure' : Method(connection_runtime_config),
'WT_CONNECTION.load_extension' : Method([
Config('config', '', r'''
configuration string passed to the entry point of the
extension as its WT_CONFIG_ARG argument'''),
Config('entry', 'wiredtiger_extension_init', r'''
the entry point of the extension, called to initialize the
extension when it is loaded. The signature of the function
must match ::wiredtiger_extension_init'''),
Config('terminate', 'wiredtiger_extension_terminate', r'''
an optional function in the extension that is called before
the extension is unloaded during WT_CONNECTION::close. The
signature of the function must match
::wiredtiger_extension_terminate'''),
]),
'WT_CONNECTION.open_session' : Method(session_config),
'WT_SESSION.reconfigure' : Method(session_config),
# There are 4 variants of the wiredtiger_open configurations.
# wiredtiger_open:
# Configuration values allowed in the application's configuration
# argument to the wiredtiger_open call.
# wiredtiger_open_basecfg:
# Configuration values allowed in the WiredTiger.basecfg file (remove
# creation-specific configuration strings and add a version string).
# wiredtiger_open_usercfg:
# Configuration values allowed in the WiredTiger.config file (remove
# creation-specific configuration strings).
# wiredtiger_open_all:
# All of the above configuration values combined
'wiredtiger_open' : Method(
connection_runtime_config +
common_wiredtiger_open + [
Config('config_base', 'true', r'''
write the base configuration file if creating the database. If
\c false in the config passed directly to ::wiredtiger_open, will
ignore any existing base configuration file in addition to not creating
one. See @ref config_base for more information''',
type='boolean'),
Config('create', 'false', r'''
create the database if it does not exist''',
type='boolean'),
Config('exclusive', 'false', r'''
fail if the database already exists, generally used with the
\c create option''',
type='boolean'),
Config('use_environment_priv', 'false', r'''
use the \c WIREDTIGER_CONFIG and \c WIREDTIGER_HOME environment
variables regardless of whether or not the process is running
with special privileges. See @ref home for more information''',
type='boolean'),
]),
'wiredtiger_open_basecfg' : Method(
connection_runtime_config +
common_wiredtiger_open + [
Config('version', '(major=0,minor=0)', r'''
the file version'''),
]),
'wiredtiger_open_usercfg' : Method(
connection_runtime_config +
common_wiredtiger_open
),
'wiredtiger_open_all' : Method(
connection_runtime_config +
common_wiredtiger_open + [
Config('config_base', 'true', r'''
write the base configuration file if creating the database. If
\c false in the config passed directly to ::wiredtiger_open, will
ignore any existing base configuration file in addition to not creating
one. See @ref config_base for more information''',
type='boolean'),
Config('create', 'false', r'''
create the database if it does not exist''',
type='boolean'),
Config('exclusive', 'false', r'''
fail if the database already exists, generally used with the
\c create option''',
type='boolean'),
Config('use_environment_priv', 'false', r'''
use the \c WIREDTIGER_CONFIG and \c WIREDTIGER_HOME environment
variables regardless of whether or not the process is running
with special privileges. See @ref home for more information''',
type='boolean'),
Config('version', '(major=0,minor=0)', r'''
the file version'''),
]),
}
| 46.992474 | 80 | 0.628811 |
1d4dc85ab34da5efbe605f09594be33c61787e12 | 191 | py | Python | tests/test___init__.py | zyxue/ncbitax2lin | 95dc13f6a8ef9a18b5569b877f7f2c2cce068412 | [
"MIT"
] | 103 | 2016-11-05T19:47:03.000Z | 2022-01-13T00:47:16.000Z | tests/test___init__.py | zyxue/ncbitax2lin | 95dc13f6a8ef9a18b5569b877f7f2c2cce068412 | [
"MIT"
] | 14 | 2017-02-28T20:56:41.000Z | 2022-03-20T18:58:05.000Z | tests/test___init__.py | zyxue/ncbitax2lin | 95dc13f6a8ef9a18b5569b877f7f2c2cce068412 | [
"MIT"
] | 21 | 2017-05-16T08:44:47.000Z | 2021-11-12T03:31:19.000Z | """tests for __init__.py"""
# pylint: disable=protected-access, missing-function-docstring
from ncbitax2lin import __version__
def test_version() -> None:
assert __version__ == "2.0.2"
| 23.875 | 62 | 0.73822 |
aa452e2afe481acddb6986a9fad72e52e6b0cbc5 | 34,420 | py | Python | poem/core/keypoint_utils_test.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 2 | 2021-02-06T12:31:50.000Z | 2021-02-06T12:37:11.000Z | poem/core/keypoint_utils_test.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 1 | 2022-02-10T06:34:18.000Z | 2022-02-10T06:34:18.000Z | poem/core/keypoint_utils_test.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 1 | 2021-07-30T22:42:46.000Z | 2021-07-30T22:42:46.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for keypoint utility functions."""
import math
import tensorflow as tf
from poem.core import keypoint_profiles
from poem.core import keypoint_utils
class KeypointUtilsTest(tf.test.TestCase):
def test_get_single_points(self):
# Shape = [2, 1, 3, 2].
points = [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]],
[[[10.0, 11.0], [12.0, 13.0], [14.0, 15.0]]]]
indices = [1]
# Shape = [2, 1, 1, 2].
points = keypoint_utils.get_points(points, indices)
self.assertAllEqual(points, [[[[2.0, 3.0]]], [[[12.0, 13.0]]]])
def test_get_center_points(self):
# Shape = [2, 1, 3, 2].
points = [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]],
[[[10.0, 11.0], [12.0, 13.0], [14.0, 15.0]]]]
indices = [0, 1]
# Shape = [2, 1, 1, 2].
points = keypoint_utils.get_points(points, indices)
self.assertAllClose(points, [[[[1.0, 2.0]]], [[[11.0, 12.0]]]])
def test_swap_x_y_2d(self):
# Shape = [2, 3, 2]
points = tf.constant([[[1.0, 2.0], [3.0, 6.0], [5.0, 6.0]],
[[11.0, 12.0], [13.0, 16.0], [15.0, 16.0]]])
swapped_points = keypoint_utils.swap_x_y(points)
self.assertAllClose(swapped_points,
[[[2.0, 1.0], [6.0, 3.0], [6.0, 5.0]],
[[12.0, 11.0], [16.0, 13.0], [16.0, 15.0]]])
def test_swap_x_y_3d(self):
# Shape = [2, 3, 3]
points = tf.constant([[[1.0, 2.0, 3.0], [3.0, 6.0, 7.0], [5.0, 6.0, 7.0]],
[[11.0, 12.0, 13.0], [13.0, 16.0, 17.0],
[15.0, 16.0, 17.0]]])
swapped_points = keypoint_utils.swap_x_y(points)
self.assertAllClose(
swapped_points,
[[[2.0, 1.0, 3.0], [6.0, 3.0, 7.0], [6.0, 5.0, 7.0]],
[[12.0, 11.0, 13.0], [16.0, 13.0, 17.0], [16.0, 15.0, 17.0]]])
def test_override_points(self):
# Shape = [2, 3, 3]
points = tf.constant([[[1.0, 2.0, 3.0], [3.0, 6.0, 7.0], [5.0, 6.0, 7.0]],
[[11.0, 12.0, 13.0], [13.0, 16.0, 17.0],
[15.0, 16.0, 17.0]]])
updated_points = keypoint_utils.override_points(
points, from_indices_list=[[0, 2], [1]], to_indices=[0, 1])
self.assertAllClose(
updated_points,
[[[3.0, 5.0, 6.0], [3.0, 5.0, 6.0], [5.0, 6.0, 7.0]],
[[13.0, 15.0, 16.0], [13.0, 15.0, 16.0], [15.0, 16.0, 17.0]]])
def test_naive_normalize_points(self):
# Shape = [2, 3, 2]
points = tf.constant([[[1.0, 2.0], [3.0, 6.0], [5.0, 6.0]],
[[11.0, 12.0], [13.0, 16.0], [15.0, 16.0]]])
# Shape = [2, 3].
point_masks = tf.constant([[True, True, False], [False, False, False]])
# Shape = [2, 3, 2].
normalized_points = keypoint_utils.naive_normalize_points(
points, point_masks)
self.assertAllClose(normalized_points,
[[[-0.25, -0.5], [0.25, 0.5], [0.0, 0.0]],
[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]])
def test_normalize_points(self):
# Shape = [2, 1, 3, 2].
points = [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]],
[[[10.0, 11.0], [12.0, 13.0], [14.0, 15.0]]]]
offset_point_indices = [0, 1]
scale_distance_point_index_pairs = [([0, 1], [1]), ([0], [1, 2])]
normalized_points, offset_points, scale_distances = (
keypoint_utils.normalize_points(
points,
offset_point_indices=offset_point_indices,
scale_distance_point_index_pairs=scale_distance_point_index_pairs,
scale_distance_reduction_fn=tf.math.reduce_sum,
scale_unit=1.0))
sqrt_2 = 1.414213562
self.assertAllClose(normalized_points, [
[[
[-0.25 / sqrt_2, -0.25 / sqrt_2],
[0.25 / sqrt_2, 0.25 / sqrt_2],
[0.75 / sqrt_2, 0.75 / sqrt_2],
]],
[[
[-0.25 / sqrt_2, -0.25 / sqrt_2],
[0.25 / sqrt_2, 0.25 / sqrt_2],
[0.75 / sqrt_2, 0.75 / sqrt_2],
]],
])
self.assertAllClose(offset_points, [[[[1.0, 2.0]]], [[[11.0, 12.0]]]])
self.assertAllClose(scale_distances,
[[[[4.0 * sqrt_2]]], [[[4.0 * sqrt_2]]]])
def test_centralize_masked_points(self):
# Shape = [2, 4, 2].
points = [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
[[9.0, 10.0], [11.0, 12.0], [13.0, 14.0], [15.0, 16.0]]]
# Shape = [2, 4].
point_masks = [[1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0]]
# Shape = [2, 4, 2].
centralized_points = keypoint_utils.centralize_masked_points(
points, point_masks)
self.assertAllClose(
centralized_points,
[[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [3.0, 4.0]],
[[9.0, 10.0], [11.0, 12.0], [10.0, 11.0], [10.0, 11.0]]])
def test_standardize_points(self):
# Shape = [2, 3, 2].
x = tf.constant([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]]])
standardized_x, offsets, scales = keypoint_utils.standardize_points(x)
self.assertAllClose(standardized_x,
[[[-0.5, -0.5], [0.0, 0.0], [0.5, 0.5]],
[[-0.5, -0.5], [0.0, 0.0], [0.5, 0.5]]])
self.assertAllClose(offsets, [[[3.0, 4.0]], [[6.0, 8.0]]])
self.assertAllClose(scales, [[[4.0]], [[8.0]]])
def test_compute_procrustes_alignment_params(self):
# Shape = [3, 4, 3].
target_points = tf.constant([[[1.0, 1.0, 0.0], [5.0, 2.0, 2.0],
[4.0, 0.0, 0.0], [-1.0, -2.0, 3.0]],
[[1.0, 0.0, 1.0], [5.0, 2.0, 2.0],
[4.0, 0.0, 0.0], [-1.0, 3.0, -2.0]],
[[2.0, 0.0, 2.0], [10.0, 4.0, 4.0],
[8.0, 0.0, 0.0], [-2.0, 6.0, -4.0]]])
source_points = tf.constant([[[3.0, 1.0, 5.0], [-2.0, 3.0, 0.0],
[1.0, -1.0, 1.0], [8.0, 3.0, -2.0]],
[[3.0, 5.0, 1.0], [-2.0, 0.0, 3.0],
[1.0, 1.0, -1.0], [8.0, -2.0, 3.0]],
[[6.0, 10.0, 2.0], [-4.0, 0.0, 6.0],
[2.0, 2.0, -2.0], [16.0, -4.0, 6.0]]])
rotations, scales, translations = (
keypoint_utils.compute_procrustes_alignment_params(
target_points, source_points))
self.assertAllClose(rotations, [[[-0.87982, -0.47514731, 0.01232074],
[-0.31623112, 0.60451691, 0.73113418],
[-0.35484453, 0.63937027, -0.68212243]],
[[-0.87982, 0.01232074, -0.47514731],
[-0.35484453, -0.68212243, 0.63937027],
[-0.31623112, 0.73113418, 0.60451691]],
[[-0.87982, 0.01232074, -0.47514731],
[-0.35484453, -0.68212243, 0.63937027],
[-0.31623112, 0.73113418, 0.60451691]]])
self.assertAllClose(
scales, [[[0.63716284347]], [[0.63716284347]], [[0.63716284347]]])
self.assertAllClose(translations, [[[4.17980137, 0.02171898, 0.96621997]],
[[4.17980137, 0.96621997, 0.02171898]],
[[8.35960274, 1.93243994, 0.04343796]]])
def test_compute_procrustes_alignment_params_with_masks(self):
# Shape = [3, 6, 3].
target_points = tf.constant([[[1.0, 1.0, 0.0], [5.0, 2.0, 2.0],
[4.0, 0.0, 0.0], [-1.0, -2.0, 3.0],
[100.0, 200.0, 300.0], [400.0, 500.0, 600.0]],
[[1.0, 0.0, 1.0], [5.0, 2.0, 2.0],
[700.0, 800.0, 900.0], [800.0, 700.0, 600.0],
[4.0, 0.0, 0.0], [-1.0, 3.0, -2.0]],
[[2.0, 0.0, 2.0], [500.0, 400.0, 300.0],
[10.0, 4.0, 4.0], [200.0, 100.0, 200.0],
[8.0, 0.0, 0.0], [-2.0, 6.0, -4.0]]])
source_points = tf.constant([[[3.0, 1.0, 5.0], [-2.0, 3.0, 0.0],
[1.0, -1.0, 1.0], [8.0, 3.0, -2.0],
[300.0, 400.0, 500.0], [600.0, 700.0, 800.0]],
[[3.0, 5.0, 1.0], [-2.0, 0.0, 3.0],
[900.0, 800.0, 700.0], [600.0, 500.0, 400.0],
[1.0, 1.0, -1.0], [8.0, -2.0, 3.0]],
[[6.0, 10.0, 2.0], [300.0, 200.0, 100.0],
[-4.0, 0.0, 6.0], [200.0, 300.0, 400.0],
[2.0, 2.0, -2.0], [16.0, -4.0, 6.0]]])
# Shape = [3, 6].
point_masks = tf.constant([[1.0, 1.0, 1.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 0.0, 1.0, 1.0]])
rotations, scales, translations = (
keypoint_utils.compute_procrustes_alignment_params(
target_points, source_points, point_masks=point_masks))
self.assertAllClose(rotations, [[[-0.87982, -0.47514731, 0.01232074],
[-0.31623112, 0.60451691, 0.73113418],
[-0.35484453, 0.63937027, -0.68212243]],
[[-0.87982, 0.01232074, -0.47514731],
[-0.35484453, -0.68212243, 0.63937027],
[-0.31623112, 0.73113418, 0.60451691]],
[[-0.87982, 0.01232074, -0.47514731],
[-0.35484453, -0.68212243, 0.63937027],
[-0.31623112, 0.73113418, 0.60451691]]])
self.assertAllClose(
scales, [[[0.63716284347]], [[0.63716284347]], [[0.63716284347]]])
self.assertAllClose(translations, [[[4.17980137, 0.02171898, 0.96621997]],
[[4.17980137, 0.96621997, 0.02171898]],
[[8.35960274, 1.93243994, 0.04343796]]])
def test_compute_mpjpes_case_1(self):
# Shape = [2, 3, 2].
lhs_points = tf.constant([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]]])
rhs_points = tf.constant([[[2.0, 3.0], [4.0, 5.0], [6.0, 7.0]],
[[8.0, 9.0], [10.0, 11.0], [12.0, 13.0]]])
mpjpes = keypoint_utils.compute_mpjpes(lhs_points, rhs_points)
self.assertAllClose(mpjpes, [1.41421356237, 1.41421356237])
def test_compute_mpjpes_case_2(self):
lhs_points = tf.constant([[0.0, 1.0, 2.0], [2.0, 3.0, 4.0]])
rhs_points = tf.constant([[1.0, 0.0, 2.0], [2.0, -1.0, 3.0]])
mpjpes = keypoint_utils.compute_mpjpes(lhs_points, rhs_points)
self.assertAlmostEqual(self.evaluate(mpjpes), 2.76866, places=5)
def test_compute_mpjpes_with_point_masks(self):
# Shape = [2, 3, 2].
lhs_points = tf.constant([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]]])
rhs_points = tf.constant([[[2.0, 3.0], [4.0, 5.0], [7.0, 8.0]],
[[8.0, 9.0], [10.0, 11.0], [12.0, 13.0]]])
# Shape = [2, 3].
point_masks = tf.constant([[1.0, 0.0, 0.5], [0.0, 0.0, 0.0]])
mpjpes = keypoint_utils.compute_mpjpes(
lhs_points, rhs_points, point_masks=point_masks)
self.assertAllClose(mpjpes, [1.88561808316, 0.0])
def test_compute_procrustes_aligned_mpjpes_case_1(self):
target_points = tf.constant([[[1.0, 1.0, 1.0], [0.0, 0.0, 0.0],
[1.0, 1.0, 1.0]],
[[2.0, 2.0, 2.0], [-1.5, -1.0, 0.0],
[2.5, 1.3, 1.4]]])
source_points = tf.constant([[[2.0, 2.0, 2.0], [-1.5, -1.0, 0.0],
[2.5, 1.3, 1.4]],
[[1.0, 1.0, 1.0], [0.0, 0.0, 0.0],
[1.0, 1.0, 1.0]]])
mpjpes = keypoint_utils.compute_procrustes_aligned_mpjpes(
target_points, source_points)
self.assertAllClose(mpjpes, [0.133016, 0.3496029])
def test_compute_procrustes_aligned_mpjpes_case_2(self):
target_points = tf.constant([[1.0, 1.0, 0.0], [5.0, 2.0, 2.0],
[4.0, 0.0, 0.0], [-1.0, -2.0, 3.0]])
source_points = tf.constant([[1.0, 1.0, 0.0], [5.0, 2.0, 2.0],
[4.0, 0.0, 0.0], [-1.0, -2.0, 3.0]])
mpjpes = keypoint_utils.compute_procrustes_aligned_mpjpes(
target_points, source_points)
self.assertAlmostEqual(self.evaluate(mpjpes), 0.0, places=5)
def test_compute_procrustes_aligned_mpjpes_case_3(self):
target_points = tf.constant([[1.0, 1.0, 0.0], [5.0, 2.0, 2.0],
[4.0, 0.0, 0.0], [-1.0, -2.0, 3.0]])
source_points = tf.constant([[1.5, 0.5, 0.0], [5.0, 2.0, 2.2],
[4.1, 0.0, -1.0], [-1.0, -2.5, -2.0]])
mpjpes = keypoint_utils.compute_procrustes_aligned_mpjpes(
target_points, source_points)
self.assertAlmostEqual(self.evaluate(mpjpes), 1.00227, places=5)
def test_compute_procrustes_aligned_mpjpes_case_4(self):
target_points = tf.constant([[1.0, 1.0, 0.0], [5.0, 2.0, 2.0],
[4.0, 0.0, 0.0], [-1.0, -2.0, 3.0]])
source_points = tf.constant([[-10.0, -24.5, -49.5], [-9.0, -22.5, -49.0],
[-10.0, -23.0, -50.0], [-8.5, -25.5, -51.0]])
mpjpes = keypoint_utils.compute_procrustes_aligned_mpjpes(
target_points, source_points)
self.assertAlmostEqual(self.evaluate(mpjpes), 0.0, places=5)
def test_compute_procrustes_aligned_mpjpes_case_5(self):
target_points = tf.constant([[1.0, 1.0, 1.0], [0.0, 0.0, 0.0],
[1.0, 1.0, 1.0]])
source_points = tf.constant([[2.0, 2.0, 2.0], [-1.5, -1.0, 0.0],
[2.5, 1.3, 1.4]])
mpjpes = keypoint_utils.compute_procrustes_aligned_mpjpes(
target_points, source_points)
self.assertAlmostEqual(self.evaluate(mpjpes), 0.133016, places=5)
def test_compute_procrustes_aligned_mpjpes_case_6(self):
target_points = tf.constant([[2.0, 2.0, 2.0], [-1.5, -1.0, 0.0],
[2.5, 1.3, 1.4]])
source_points = tf.constant([[1.0, 1.0, 1.0], [0.0, 0.0, 0.0],
[1.0, 1.0, 1.0]])
mpjpes = keypoint_utils.compute_procrustes_aligned_mpjpes(
target_points, source_points)
self.assertAlmostEqual(self.evaluate(mpjpes), 0.3496029, places=5)
def test_compute_procrustes_aligned_mpjpes_with_masks(self):
# Shape = [5, 3].
target_points = tf.constant([[2.0, 2.0, 2.0], [-1.5, -1.0, 0.0],
[100.0, 200.0, 300.0], [2.5, 1.3, 1.4],
[400.0, 500.0, 600.0]])
source_points = tf.constant([[1.0, 1.0, 1.0], [0.0, 0.0, 0.0],
[700.0, 800.0, 900.0], [1.0, 1.0, 1.0],
[800.0, 700.0, 600.0]])
# Shape = [5].
point_masks = tf.constant([1.0, 1.0, 0.0, 1.0, 0.0])
mpjpes = keypoint_utils.compute_procrustes_aligned_mpjpes(
target_points, source_points, point_masks=point_masks)
self.assertAlmostEqual(self.evaluate(mpjpes), 0.3496029, places=5)
def test_normalize_points_by_image_size(self):
points = tf.constant([
[[10.0, 40.0], [30.0, 80.0], [50.0, 120.0]],
[[0.2, 0.2], [0.6, 0.4], [1.0, 0.6]],
])
image_sizes = tf.constant([[100, 200], [20, 10]])
normalized_points = keypoint_utils.normalize_points_by_image_size(
points, image_sizes)
self.assertAllClose(normalized_points, [
[[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]],
[[0.01, 0.02], [0.03, 0.04], [0.05, 0.06]],
])
def test_denormalize_points_by_image_size(self):
points = tf.constant([
[[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]],
[[0.01, 0.02], [0.03, 0.04], [0.05, 0.06]],
])
image_sizes = tf.constant([[100, 200], [20, 10]])
denormalized_points = keypoint_utils.denormalize_points_by_image_size(
points, image_sizes)
self.assertAllClose(denormalized_points, [
[[10.0, 40.0], [30.0, 80.0], [50.0, 120.0]],
[[0.2, 0.2], [0.6, 0.4], [1.0, 0.6]],
])
def test_create_rotation_matrices_3d(self):
# Shape = [3, 2].
azimuths = tf.constant([[0.0, math.pi / 2.0], [math.pi / 2.0, 0.0],
[0.0, math.pi / 2.0]])
elevations = tf.constant([[0.0, -math.pi / 2.0], [-math.pi / 2.0, 0.0],
[0.0, -math.pi / 2.0]])
rolls = tf.constant([[0.0, math.pi], [math.pi, 0.0], [0.0, math.pi]])
self.assertAllClose(
keypoint_utils.create_rotation_matrices_3d(azimuths, elevations, rolls),
[[[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]]],
[[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]],
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]],
[[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]]]])
def test_rotate_points(self):
rotation_matrices = tf.constant([[[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]],
[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0],
[1.0, 0.0, 0.0]]],
[[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0],
[1.0, 0.0, 0.0]],
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]],
[[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]],
[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0],
[1.0, 0.0, 0.0]]]])
points = tf.constant([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
[[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]],
[[13.0, 14.0, 15.0], [16.0, 17.0, 18.0]]])
self.assertAllClose(
keypoint_utils.rotate_points(rotation_matrices, points),
[[[1.0, 2.0, 3.0], [5.0, 6.0, 4.0]],
[[8.0, 9.0, 7.0], [10.0, 11.0, 12.0]],
[[13.0, 14.0, 15.0], [17.0, 18.0, 16.0]]])
def test_random_rotate_and_project_3d_to_2d_without_default_camera(self):
keypoints_3d = tf.constant([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
[[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]],
[[13.0, 14.0, 15.0], [16.0, 17.0, 18.0]]])
keypoints_2d = keypoint_utils.random_rotate_and_project_3d_to_2d(
keypoints_3d,
azimuth_range=(math.pi / 2.0, math.pi / 2.0),
elevation_range=(-math.pi / 2.0, -math.pi / 2.0),
roll_range=(math.pi, math.pi),
default_camera=False)
self.assertAllClose(
keypoints_2d,
[[[2.0 / 1.0, 3.0 / 1.0], [5.0 / 4.0, 6.0 / 4.0]],
[[8.0 / 7.0, 9.0 / 7.0], [11.0 / 10.0, 12.0 / 10.0]],
[[14.0 / 13.0, 15.0 / 13.0], [17.0 / 16.0, 18.0 / 16.0]]])
def test_random_rotate_and_project_3d_to_2d_with_default_camera(self):
keypoints_3d = tf.constant([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
[[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]],
[[13.0, 14.0, 15.0], [16.0, 17.0, 18.0]]])
keypoints_2d = keypoint_utils.random_rotate_and_project_3d_to_2d(
keypoints_3d,
azimuth_range=(math.pi / 2.0, math.pi / 2.0),
elevation_range=(-math.pi / 2.0, -math.pi / 2.0),
roll_range=(math.pi, math.pi),
default_camera=True)
self.assertAllClose(
keypoints_2d,
[[[-1.0 / 5.0, 2.0 / 5.0], [-4.0 / 8.0, 5.0 / 8.0]],
[[-7.0 / 11.0, 8.0 / 11.0], [-10.0 / 14.0, 11.0 / 14.0]],
[[-13.0 / 17.0, 14.0 / 17.0], [-16.0 / 20.0, 17.0 / 20.0]]])
def test_random_rotate_and_project_temporal_3d_to_2d_without_default_camera(
self):
keypoints_3d_t1 = tf.constant([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
[[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]],
[[13.0, 14.0, 15.0], [16.0, 17.0, 18.0]]])
keypoints_3d_t2 = tf.constant([[[1.2, 2.2, 3.2], [4.2, 5.2, 6.2]],
[[7.4, 8.4, 9.4], [10.4, 11.4, 12.4]],
[[13.8, 14.8, 15.8], [16.8, 17.8, 18.8]]])
keypoints_3d_t3 = tf.constant([[[1.5, 2.5, 3.5], [4.5, 5.5, 6.5]],
[[7.6, 8.6, 9.6], [10.6, 11.6, 12.6]],
[[13.7, 14.7, 15.7], [16.7, 17.7, 18.7]]])
keypoints_3d = tf.stack(
[keypoints_3d_t1, keypoints_3d_t2, keypoints_3d_t3],
axis=-3)
keypoints_2d = keypoint_utils.random_rotate_and_project_3d_to_2d(
keypoints_3d,
azimuth_range=(math.pi / 2.0, math.pi / 2.0),
elevation_range=(-math.pi / 2.0, -math.pi / 2.0),
roll_range=(math.pi, math.pi),
default_camera=False,
sequential_inputs=True)
self.assertAllClose(
keypoints_2d,
[[[[2.0 / 1.0, 3.0 / 1.0], [5.0 / 4.0, 6.0 / 4.0]],
[[2.2 / 1.2, 3.2 / 1.2], [5.2 / 4.2, 6.2 / 4.2]],
[[2.5 / 1.5, 3.5 / 1.5], [5.5 / 4.5, 6.5 / 4.5]]],
[[[8.0 / 7.0, 9.0 / 7.0], [11.0 / 10.0, 12.0 / 10.0]],
[[8.4 / 7.4, 9.4 / 7.4], [11.4 / 10.4, 12.4 / 10.4]],
[[8.6 / 7.6, 9.6 / 7.6], [11.6 / 10.6, 12.6 / 10.6]]],
[[[14.0 / 13.0, 15.0 / 13.0], [17.0 / 16.0, 18.0 / 16.0]],
[[14.8 / 13.8, 15.8 / 13.8], [17.8 / 16.8, 18.8 / 16.8]],
[[14.7 / 13.7, 15.7 / 13.7], [17.7 / 16.7, 18.7 / 16.7]]]])
def test_random_rotate_and_project_temporal_3d_to_2d_with_default_camera(
self):
keypoints_3d_t1 = tf.constant([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
[[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]],
[[13.0, 14.0, 15.0], [16.0, 17.0, 18.0]]])
keypoints_3d_t2 = tf.constant([[[1.2, 2.2, 3.2], [4.2, 5.2, 6.2]],
[[7.4, 8.4, 9.4], [10.4, 11.4, 12.4]],
[[13.8, 14.8, 15.8], [16.8, 17.8, 18.8]]])
keypoints_3d_t3 = tf.constant([[[1.5, 2.5, 3.5], [4.5, 5.5, 6.5]],
[[7.6, 8.6, 9.6], [10.6, 11.6, 12.6]],
[[13.7, 14.7, 15.7], [16.7, 17.7, 18.7]]])
keypoints_3d = tf.stack(
[keypoints_3d_t1, keypoints_3d_t2, keypoints_3d_t3],
axis=-3)
keypoints_2d = keypoint_utils.random_rotate_and_project_3d_to_2d(
keypoints_3d,
azimuth_range=(math.pi / 2.0, math.pi / 2.0),
elevation_range=(-math.pi / 2.0, -math.pi / 2.0),
roll_range=(math.pi, math.pi),
default_camera=True,
sequential_inputs=True)
self.assertAllClose(
keypoints_2d,
[[[[-1.0 / 5.0, 2.0 / 5.0], [-4.0 / 8.0, 5.0 / 8.0]],
[[-1.2 / 5.2, 2.2 / 5.2], [-4.2 / 8.2, 5.2 / 8.2]],
[[-1.5 / 5.5, 2.5 / 5.5], [-4.5 / 8.5, 5.5 / 8.5]]],
[[[-7.0 / 11.0, 8.0 / 11.0], [-10.0 / 14.0, 11.0 / 14.0]],
[[-7.4 / 11.4, 8.4 / 11.4], [-10.4 / 14.4, 11.4 / 14.4]],
[[-7.6 / 11.6, 8.6 / 11.6], [-10.6 / 14.6, 11.6 / 14.6]]],
[[[-13.0 / 17.0, 14.0 / 17.0], [-16.0 / 20.0, 17.0 / 20.0]],
[[-13.8 / 17.8, 14.8 / 17.8], [-16.8 / 20.8, 17.8 / 20.8]],
[[-13.7 / 17.7, 14.7 / 17.7], [-16.7 / 20.7, 17.7 / 20.7]]]])
def test_create_smooth_rotation_matrices(self):
start_euler_angles = (-math.pi, -math.pi / 6.0, -math.pi / 6.0)
end_euler_angles = (math.pi, math.pi / 6.0, math.pi / 6.0)
metrics = keypoint_utils.create_smooth_rotation_matrices(
start_euler_angles, end_euler_angles, num_views=3)
self.assertAllClose(
metrics,
[[[-0.866, -0.25, 0.433],
[0., -0.866, -0.5],
[0.5, -0.433, 0.75]],
[[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]],
[[-0.866, -0.25, -0.433],
[-0.0, -0.866, 0.5],
[-0.5, 0.433, 0.75]]], atol=1e-04)
def test_select_keypoints_by_name(self):
input_keypoints = tf.constant([
[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0],
[3.0, 3.0, 3.0],
[4.0, 4.0, 4.0],
[5.0, 5.0, 5.0],
[6.0, 6.0, 6.0],
[7.0, 7.0, 7.0],
[8.0, 8.0, 8.0],
[9.0, 9.0, 9.0],
[10.0, 10.0, 10.0],
[11.0, 11.0, 11.0],
[12.0, 12.0, 12.0],
[13.0, 13.0, 13.0],
[14.0, 14.0, 14.0],
[15.0, 15.0, 15.0],
[16.0, 16.0, 16.0],
])
keypoint_profile_3d = (
keypoint_profiles.create_keypoint_profile_or_die('LEGACY_3DH36M17'))
keypoint_profile_2d = (
keypoint_profiles.create_keypoint_profile_or_die('LEGACY_2DCOCO13'))
output_keypoints, _ = keypoint_utils.select_keypoints_by_name(
input_keypoints,
input_keypoint_names=keypoint_profile_3d.keypoint_names,
output_keypoint_names=(
keypoint_profile_2d.compatible_keypoint_name_dict['LEGACY_3DH36M17']
))
self.assertAllClose(output_keypoints, [
[1.0, 1.0, 1.0],
[4.0, 4.0, 4.0],
[5.0, 5.0, 5.0],
[6.0, 6.0, 6.0],
[7.0, 7.0, 7.0],
[8.0, 8.0, 8.0],
[9.0, 9.0, 9.0],
[11.0, 11.0, 11.0],
[12.0, 12.0, 12.0],
[13.0, 13.0, 13.0],
[14.0, 14.0, 14.0],
[15.0, 15.0, 15.0],
[16.0, 16.0, 16.0],
])
def test_random_project_and_select_keypoints(self):
keypoints_3d = tf.constant([
[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0],
[3.0, 3.0, 3.0],
[4.0, 4.0, 4.0],
[5.0, 5.0, 5.0],
[6.0, 6.0, 6.0],
[7.0, 7.0, 7.0],
[8.0, 8.0, 8.0],
[9.0, 9.0, 9.0],
[10.0, 10.0, 10.0],
[11.0, 11.0, 11.0],
[12.0, 12.0, 12.0],
[13.0, 13.0, 13.0],
[14.0, 14.0, 14.0],
[15.0, 15.0, 15.0],
[16.0, 16.0, 16.0],
])
keypoint_profile_3d = (
keypoint_profiles.create_keypoint_profile_or_die('LEGACY_3DH36M17'))
keypoint_profile_2d = (
keypoint_profiles.create_keypoint_profile_or_die('LEGACY_2DCOCO13'))
keypoints_2d, _ = keypoint_utils.random_project_and_select_keypoints(
keypoints_3d,
keypoint_profile_3d=keypoint_profile_3d,
output_keypoint_names=(
keypoint_profile_2d.compatible_keypoint_name_dict['LEGACY_3DH36M17']
),
azimuth_range=(math.pi / 2.0, math.pi / 2.0),
elevation_range=(math.pi / 2.0, math.pi / 2.0),
roll_range=(-math.pi / 2.0, -math.pi / 2.0))
keypoints_2d, _, _ = keypoint_profile_2d.normalize(keypoints_2d)
self.assertAllClose(keypoints_2d, [
[-0.4356161, 0.4356161],
[-0.32822642, 0.32822642],
[-0.2897728, 0.28977284],
[-0.24986516, 0.24986516],
[-0.2084193, 0.2084193],
[-0.16534455, 0.16534461],
[-0.12054307, 0.1205431],
[-0.025327, 0.025327],
[0.025327, -0.025327],
[0.07818867, -0.07818867],
[0.13340548, -0.13340548],
[0.19113854, -0.19113848],
[0.2515637, -0.25156358],
])
def test_remove_at_indices(self):
keypoints = tf.constant([[[1.0, 2.0], [7.0, 8.0], [3.0, 4.0], [5.0, 6.0],
[9.0, 10.0]]])
indices = [1, 4]
keypoints = keypoint_utils.remove_at_indices(keypoints, indices)
self.assertAllClose(keypoints, [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]])
def test_insert_at_indices(self):
keypoints = tf.constant([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]])
indices = [1, 3, 3]
insert_keypoints = tf.constant([[[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]]])
keypoints = keypoint_utils.insert_at_indices(
keypoints, indices, insert_keypoints=insert_keypoints)
self.assertAllClose(keypoints, [[[1.0, 2.0], [7.0, 8.0], [3.0, 4.0],
[5.0, 6.0], [9.0, 10.0], [11.0, 12.0]]])
def test_insert_zeros_at_indices(self):
keypoints = tf.constant([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]])
indices = [1, 1, 3]
keypoints = keypoint_utils.insert_at_indices(
keypoints, indices, insert_keypoints=None)
self.assertAllClose(keypoints, [[[1.0, 2.0], [0.0, 0.0], [0.0, 0.0],
[3.0, 4.0], [5.0, 6.0], [0.0, 0.0]]])
def test_transfer_keypoint_masks_case_1(self):
# Shape = [2, 13].
input_keypoint_masks = tf.constant([
[
1.0, # NOSE_TIP
1.0, # LEFT_SHOULDER
1.0, # RIGHT_SHOULDER
0.0, # LEFT_ELBOW
1.0, # RIGHT_ELBOW
1.0, # LEFT_WRIST
0.0, # RIGHT_WRIST
1.0, # LEFT_HIP
0.0, # RIGHT_HIP
1.0, # LEFT_KNEE
1.0, # RIGHT_KNEE
0.0, # LEFT_ANKLE
0.0, # RIGHT_ANKLE
],
[
0.0, # NOSE_TIP
0.0, # LEFT_SHOULDER
0.0, # RIGHT_SHOULDER
1.0, # LEFT_ELBOW
0.0, # RIGHT_ELBOW
0.0, # LEFT_WRIST
1.0, # RIGHT_WRIST
0.0, # LEFT_HIP
1.0, # RIGHT_HIP
0.0, # LEFT_KNEE
0.0, # RIGHT_KNEE
1.0, # LEFT_ANKLE
1.0, # RIGHT_ANKLE
]
])
input_keypoint_profile = keypoint_profiles.create_keypoint_profile_or_die(
'2DSTD13')
output_keypoint_profile = keypoint_profiles.create_keypoint_profile_or_die(
'3DSTD16')
# Shape = [2, 16].
output_keypoint_masks = keypoint_utils.transfer_keypoint_masks(
input_keypoint_masks, input_keypoint_profile, output_keypoint_profile)
self.assertAllClose(
output_keypoint_masks,
[
[
1.0, # NOSE
1.0, # NECK
1.0, # LEFT_SHOULDER
1.0, # RIGHT_SHOULDER
0.0, # LEFT_ELBOW
1.0, # RIGHT_ELBOW
1.0, # LEFT_WRIST
0.0, # RIGHT_WRIST
0.0, # SPINE
0.0, # PELVIS
1.0, # LEFT_HIP
0.0, # RIGHT_HIP
1.0, # LEFT_KNEE
1.0, # RIGHT_KNEE
0.0, # LEFT_ANKLE
0.0, # RIGHT_ANKLE
],
[
0.0, # NOSE
0.0, # NECK
0.0, # LEFT_SHOULDER
0.0, # RIGHT_SHOULDER
1.0, # LEFT_ELBOW
0.0, # RIGHT_ELBOW
0.0, # LEFT_WRIST
1.0, # RIGHT_WRIST
0.0, # SPINE
0.0, # PELVIS
0.0, # LEFT_HIP
1.0, # RIGHT_HIP
0.0, # LEFT_KNEE
0.0, # RIGHT_KNEE
1.0, # LEFT_ANKLE
1.0, # RIGHT_ANKLE
]
])
def test_transfer_keypoint_masks_case_2(self):
# Shape = [2, 16].
input_keypoint_masks = tf.constant([
[
1.0, # NOSE
1.0, # NECK
1.0, # LEFT_SHOULDER
1.0, # RIGHT_SHOULDER
0.0, # LEFT_ELBOW
1.0, # RIGHT_ELBOW
1.0, # LEFT_WRIST
0.0, # RIGHT_WRIST
0.0, # SPINE
0.0, # PELVIS
1.0, # LEFT_HIP
0.0, # RIGHT_HIP
1.0, # LEFT_KNEE
1.0, # RIGHT_KNEE
0.0, # LEFT_ANKLE
0.0, # RIGHT_ANKLE
],
[
0.0, # NOSE
0.0, # NECK
0.0, # LEFT_SHOULDER
0.0, # RIGHT_SHOULDER
1.0, # LEFT_ELBOW
0.0, # RIGHT_ELBOW
0.0, # LEFT_WRIST
1.0, # RIGHT_WRIST
1.0, # SPINE
1.0, # PELVIS
0.0, # LEFT_HIP
1.0, # RIGHT_HIP
0.0, # LEFT_KNEE
0.0, # RIGHT_KNEE
1.0, # LEFT_ANKLE
1.0, # RIGHT_ANKLE
]
])
input_keypoint_profile = keypoint_profiles.create_keypoint_profile_or_die(
'3DSTD16')
output_keypoint_profile = keypoint_profiles.create_keypoint_profile_or_die(
'2DSTD13')
# Shape = [2, 13].
output_keypoint_masks = keypoint_utils.transfer_keypoint_masks(
input_keypoint_masks, input_keypoint_profile, output_keypoint_profile)
self.assertAllClose(
output_keypoint_masks,
[
[
1.0, # NOSE_TIP
1.0, # LEFT_SHOULDER
1.0, # RIGHT_SHOULDER
0.0, # LEFT_ELBOW
1.0, # RIGHT_ELBOW
1.0, # LEFT_WRIST
0.0, # RIGHT_WRIST
1.0, # LEFT_HIP
0.0, # RIGHT_HIP
1.0, # LEFT_KNEE
1.0, # RIGHT_KNEE
0.0, # LEFT_ANKLE
0.0, # RIGHT_ANKLE
],
[
0.0, # NOSE_TIP
0.0, # LEFT_SHOULDER
0.0, # RIGHT_SHOULDER
1.0, # LEFT_ELBOW
0.0, # RIGHT_ELBOW
0.0, # LEFT_WRIST
1.0, # RIGHT_WRIST
0.0, # LEFT_HIP
1.0, # RIGHT_HIP
0.0, # LEFT_KNEE
0.0, # RIGHT_KNEE
1.0, # LEFT_ANKLE
1.0, # RIGHT_ANKLE
]
])
if __name__ == '__main__':
tf.test.main()
| 43.459596 | 80 | 0.463364 |
57b4bf84a62ff147e6056bb69765498a2a71c31a | 4,662 | py | Python | database/models.py | joshbarrass/SkyrimCrafting | cdadbbb09b0eed01e4ed6be631c1ee3cb8cb4d2f | [
"MIT"
] | null | null | null | database/models.py | joshbarrass/SkyrimCrafting | cdadbbb09b0eed01e4ed6be631c1ee3cb8cb4d2f | [
"MIT"
] | 4 | 2020-10-01T14:39:46.000Z | 2020-10-01T18:33:27.000Z | database/models.py | joshbarrass/SkyrimCrafting | cdadbbb09b0eed01e4ed6be631c1ee3cb8cb4d2f | [
"MIT"
] | null | null | null | from sqlalchemy import Column, Integer, Float, String, Boolean, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
Base = declarative_base()
class Item(Base):
__tablename__ = "items"
id = Column(Integer, primary_key=True)
name = Column(String)
value = Column(Integer)
weight = Column(Float)
def __repr__(self):
return "<{base}.Item(ID={id}, name={name}, value={value}, weight={weight})>".format(
base=__name__,
name=self.name,
id=self.id,
value=self.value,
weight=self.weight,
)
class PotionEffect(Base):
__tablename__ = "potion_effects"
id = Column(Integer, primary_key=True)
name = Column(String)
is_negative = Column(Boolean)
description = Column(String)
base_cost = Column(Float)
base_mag = Column(Integer)
base_dur = Column(Integer)
value = Column(Integer) # value at 100 skill
vary_duration = Column(Boolean)
def __repr__(self):
return "<{base}.PotionEffect(id={id}, name={name}, is_negative={neg}, description={desc}, base_cost={cost}, base_mag={mag}, base_dur={dur}, value={value})>".format(
base=__name__,
id=self.id,
name=self.name,
neg=self.is_negative,
desc=self.description,
cost=self.base_cost,
mag=self.base_mag,
dur=self.base_dur,
value=self.value
)
class PotionItem(Base):
__tablename__ = "potion_items"
item_id = Column(
Integer,
ForeignKey("{table}.id".format(table=Item.__tablename__)),
primary_key=True
)
effect_id = Column(
Integer,
ForeignKey(
"{table}.id".format(table=PotionEffect.__tablename__)
),
primary_key=True
)
item = relationship("Item", foreign_keys="PotionItem.item_id")
effect = relationship(
"PotionEffect", foreign_keys="PotionItem.effect_id"
)
priority = Column(Integer)
mag_multiplier = Column(Float)
dur_multiplier = Column(Float)
val_multiplier = Column(Float)
def __repr__(self):
return "<{base}.PotionItem(item={item}, effect={effect}, priority={priority}, mag_multiplier=x{mag}, dur_multiplier=x{dur}, val_multiplier=x{val})>".format(
base=__name__,
item=self.item_id,
effect=self.effect_id,
priority=self.priority,
mag=self.mag_multiplier,
dur=self.dur_multiplier,
val=self.val_multiplier
)
class Requirement(Base):
__tablename__ = "recipe_requirements"
id = Column(Integer, primary_key=True)
name = Column(String)
desc = Column(String)
def __repr__(self):
return "<{base}.Requirement(id={id}, name={name}, desc={desc})>".format(
base=__name__,
id=self.id,
name=self.name,
desc=self.desc,
)
class Recipe(Base):
__tablename__ = "recipes"
id = Column(Integer, primary_key=True)
result_id = Column(
Integer,
ForeignKey("{table}.id".format(table=Item.__tablename__)),
)
result = relationship("Item", foreign_keys="Recipe.result_id")
requirement_id = Column(
Integer,
ForeignKey(
"{table}.id".format(table=Requirement.__tablename__)
),
)
requirement = relationship(
"Requirement", foreign_keys="Recipe.requirement_id"
)
quantity = Column(Integer)
def __repr__(self):
return "<{base}.Recipe(id={id}, result={result}, quantity={quantity})>".format(
base=__name__,
id=self.id,
result=self.result_id,
quantity=self.quantity
)
class CraftingIngredient(Base):
__tablename__ = "crafting_ingredients"
recipe_id = Column(
Integer,
ForeignKey("{table}.id".format(table=Recipe.__tablename__)),
primary_key=True
)
ingredient_id = Column(
Integer,
ForeignKey("{table}.id".format(table=Item.__tablename__)),
primary_key=True
)
recipe = relationship(
"Recipe", foreign_keys="CraftingIngredient.recipe_id"
)
ingredient = relationship(
"Item", foreign_keys="CraftingIngredient.ingredient_id"
)
quantity = Column(Integer)
def __repr__(self):
return "<{base}.CraftingIngredient(recipe={recipe}, ingredient={ingredient}, quantity={quantity})>".format(
base=__name__,
recipe=self.recipe_id,
ingredient=self.ingredient_id,
quantity=self.quantity
)
| 29.884615 | 172 | 0.614114 |
772c66d44a2b9ddc9d871461748b7dde7ca886fc | 1,311 | py | Python | NEST-14.0-FPGA/examples/nest/music/minimalmusicsetup_receivenest.py | OpenHEC/SNN-simulator-on-PYNQcluster | 14f86a76edf4e8763b58f84960876e95d4efc43a | [
"MIT"
] | 45 | 2019-12-09T06:45:53.000Z | 2022-01-29T12:16:41.000Z | NEST-14.0-FPGA/examples/nest/music/minimalmusicsetup_receivenest.py | zlchai/SNN-simulator-on-PYNQcluster | 14f86a76edf4e8763b58f84960876e95d4efc43a | [
"MIT"
] | 2 | 2020-05-23T05:34:21.000Z | 2021-09-08T02:33:46.000Z | NEST-14.0-FPGA/examples/nest/music/minimalmusicsetup_receivenest.py | OpenHEC/SNN-simulator-on-PYNQcluster | 14f86a76edf4e8763b58f84960876e95d4efc43a | [
"MIT"
] | 10 | 2019-12-09T06:45:59.000Z | 2021-03-25T09:32:56.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# minimalmusicsetup_receivenest.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
nest.sli_run("statusdict/have_music ::")
if not nest.spp():
import sys
print("NEST was not compiled with support for MUSIC, not running.")
sys.exit()
nest.set_verbosity("M_ERROR")
meip = nest.Create('music_event_in_proxy')
nest.SetStatus(meip, {'port_name': 'spikes_in', 'music_channel': 0})
n = nest.Create('iaf_psc_alpha')
nest.Connect(meip, n, 'one_to_one', {'weight': 750.0})
vm = nest.Create('voltmeter')
nest.SetStatus(vm, {'to_memory': False, 'to_screen': True})
nest.Connect(vm, n)
nest.Simulate(10)
| 27.893617 | 71 | 0.7254 |
283ad86c9732fcf50dbc3629eb6e6878bbbd0e2c | 14,334 | py | Python | paxes_nova/virt/ibmpowervm/vif/ivm/utils.py | windskyer/k_nova | 63579dbfcfcda5def5b588a6728bfff85ad4564e | [
"Apache-2.0"
] | null | null | null | paxes_nova/virt/ibmpowervm/vif/ivm/utils.py | windskyer/k_nova | 63579dbfcfcda5def5b588a6728bfff85ad4564e | [
"Apache-2.0"
] | null | null | null | paxes_nova/virt/ibmpowervm/vif/ivm/utils.py | windskyer/k_nova | 63579dbfcfcda5def5b588a6728bfff85ad4564e | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# =================================================================
# =================================================================
"""
IBMPowerVM VIF driver utilities
"""
import logging
from paxes_nova.virt.ibmpowervm.vif.common import ras
from paxes_nova import _
# maximum number of VEA allowed per SEA.
SEA_MAX_VEAS = 16
# maximum number of 8021Q tagged VLAN id supported on a single VEA
VEA_MAX_VLANS = 20
LOG = logging.getLogger(__name__)
def is_valid_vlan_id(vlan_id):
"""
check whether vlan_id is in the range of [1,4094]
:param vlan_id: vlan id to validate
:returns: True or False
"""
return True if vlan_id and (vlan_id > 0 and vlan_id < 4095) else False
def get_veth_list_by_vlan_id_cmd():
"""
create the IVM command to retrieve the veth device by vlan id
:return: The command to run against the IVM.
The results will look like the following:
1,12,3333,none,1,0
1,13,4093,"201,207",1,1
The result order is:
- lpar_id
- slot_num
- port_vlan_id
- is_trunk
- ieee_virtual_eth
- addl_vlan_ids
"""
return ('lshwres -r virtualio --rsubtype eth --level lpar '
' -F lpar_id,slot_num,port_vlan_id,is_trunk,ieee_virtual_eth,'
'addl_vlan_ids')
def get_lpar_id_list():
"""
Creates an IVM command that will return a list of all the LPAR identifiers.
It will include the VIO Server LPAR ID. It may also have multiple lpars
returned multiple times, so the results need to be pared down.
:return: The result order is:
1
1
1
1
2
"""
return ('lshwres -r virtualio --rsubtype eth --level lpar -F lpar_id')
def get_veth_list_for_lpar(lpar_id):
"""
Creates an IVM command that will return the information needed for a
VM import operation.
:param lpar_id: The LPAR Identifier to gather the information for.
:return: The command to run against the IVM.
The results will look like the following:
1,1,06-04D7A,EEE91D8C7003,1,none
2,1,06-04D7A,EEE91D8C7004,2,"201,207"
3,1,06-04D7A,EEE91D8C7005,3,none
4,1,06-04D7A,EEE91D8C7006,4,none
The result order is:
- slot_num
- lpar_id
- lpar_name
- mac_addr
- port_vlan_id
- addl_vlan_ids
"""
return ('lshwres -r virtualio --rsubtype eth --level lpar -F '
'slot_num,lpar_id,lpar_name,mac_addr,port_vlan_id,addl_vlan_ids '
'--filter "lpar_ids=%(lpar)s"' % {'lpar': lpar_id})
def get_slot_descriptions(lpar_id):
"""
Returns a command that will show the descriptions of each slots device.
Example output:
RAID Controller
Fibre Channel Serial Bus
Fibre Channel Serial Bus
Empty slot
Empty slot
Network and Computing en/decryption
Empty slot
Ethernet controller
"""
return ('lshwres -r io --rsubtype slot -F description --filter '
'"lpar_ids=%(lpar)s"' % {'lpar': lpar_id})
def get_vios_lpar_id_name():
"""
Generates an IVM command that will return the LPAR ID and Name for the
VIO Server.
Output to this command will be:
- '1 06-04C7A'
The first space dictates the LPAR id. The rest is the LPAR name.
"""
return 'ioscli lslparinfo'
def get_virt_adapters_with_physloc_cmd(lpar_id):
"""
Get all the available virtual adapters with physloc
:returns: A VIOS command that can get virtual adapters with
physloc.
"""
return ('ioscli lsdev -slots |grep "\-V%s"' % lpar_id)
def create_8021Q_vea_cmd(lpar_id, slotnum, port_vlan_id, addl_vlan_ids):
"""
Generate IVM command to create 8021Q virtual ethernet adapter.
:param lpar_id: LPAR id
:param slotnum: virtual adapter slot number
:param port_vlan_id: untagged port vlan id
:param addl_vlan_ids: tagged VLAN id list
:returns: A HMC command that can create 8021Q veth based on the
specification from input.
"""
vids = [] if not addl_vlan_ids else addl_vlan_ids
cmd = ('chhwres -r virtualio --rsubtype eth -o a -s %(slot)s '
'--id %(lparid)s -a ieee_virtual_eth=1,'
'port_vlan_id=%(pvid)s,is_trunk=1,'
'trunk_priority=1,\\\"addl_vlan_ids=%(addlvids)s\\\"' %
{'slot': slotnum,
'lparid': lpar_id,
'pvid': port_vlan_id,
'addlvids': ",".join(str(x) for x in vids)})
return cmd
def get_newly_added_slot_name_cmd(mts, lpar_id, slotnum):
"""
run cfgdev of the newly configured virtual slot and return the
VIOS device name for that slot.
:param mts: PowerVM MTS string for virtual adapter
:param lpar_id: LPAR id
:param slotnum: virtual slot number
:returns: A VIOS command that returns the device name based on physloc
"""
return ('ioscli cfgdev -dev vio0 && ioscli lsdev -plc '
'%(mts)s-V%(lparid)s-C%(slot)s-T1 -fmt : '
' -field name' %
{'mts': mts,
'lparid': lpar_id,
'slot': slotnum})
def get_rmc_status():
"""
Return the IVM command to get the RMC status on the system.
"""
return 'lssyscfg -r lpar -F lpar_id,rmc_state'
def change_sea_virt_adapters_cmd(seaname, pveaname, virt_list):
"""
Change the SEA device with the updated virt_list.
:param seaname: SEA devname for chdev
:param pveaname: pvid_adapter devname
:param virt_list: virt_adapters list
:returns: A VIOS command to change the attribute of a given SEA
"""
virt_list = [] if not virt_list else virt_list
if len(virt_list) > 0 and not isinstance(virt_list, list):
raise TypeError(_('change_sea_virt_adapters_cmd(): virt_list'
' is not list.'))
additional_adapters = ""
if len(virt_list) > 0:
additional_adapters = ',' + ','.join(virt_list)
return ('ioscli chdev -dev %(sea)s -attr '
'virt_adapters=%(pvea)s%(virtlist)s '
'pvid_adapter=%(pvea)s' %
{'sea': seaname,
'pvea': pveaname,
'virtlist': additional_adapters})
def remove_virtual_device(devname):
"""
Generate command to remove a virtual device
:param devname: Virtual device name to remove
:returns: Command to remove the virtual device
"""
return ("ioscli rmdev -dev %s" % devname)
def remove_virtual_slot_cmd(lpar_id, slot_num):
"""
Generate HMC command to remove virtual slot.
:param lpar_id: LPAR id
:param slot_num: virtual adapter slot number
:returns: A HMC command to remove the virtual slot.
"""
return ("chhwres -r virtualio --rsubtype eth -o r -s %(slot)s "
"--id %(lparid)s" %
{'slot': slot_num, 'lparid': lpar_id})
def create_non_8021Q_vea_cmd(lpar_id, slot_num, port_vlan_id):
"""
Generate HMC command to create a non-8021Q virtual ethernet adapter
:param lpar_id: LPAR id
:param slot_num: virtual adapter slot number
:param port_vlan_id: untagged port vlan id
:returns: A HMC command to create a non 8021Q veth adapter based on
the specification from input.
"""
return ("chhwres -r virtualio --rsubtype eth -o a -s %(slot)s "
"--id %(lparid)s -a ieee_virtual_eth=0,port_vlan_id=%(pvid)s,"
"is_trunk=1,trunk_priority=1" %
{'slot': slot_num, 'lparid': lpar_id, 'pvid': port_vlan_id})
def get_mgmt_interface_devname_cmd(ip_addr):
"""
Generate the VIOS command to find VIOS/IVM management interface
device name. Be aware, ioscli lstcpip -interface will be a slow
command (about 1 second per configured interface). The alternative
is to use lstcpip -stored to do AIX config database query only
which speed things up dramatically since it won't open/close
the interface.
:param ip_addr: management interface ip address
:returns: A VIOS command to find the SEA that has management host ip
address configured.
"""
return ('ioscli lstcpip -stored | grep -p -w \"%(ip)s\" | grep -p -w '
'\"State = up\"' % {'ip': ip_addr})
def get_all_sea_with_physloc_cmd():
"""
Get all the available SEA configured on VIOS and their physloc
:returns: A VIOS command to get all the virtual adapters (SEAs and VEAs)
configured on VIOS and their physloc. Note, this will even
return adapters in the "Defined" state.
"""
return ("ioscli lsdev -virtual -type adapter -fmt : -field name "
"description physloc|grep '^ent' ")
def parse_physloc_adapter_output(output):
"""
Parses the physical location command output from an IVM command.
:param output: The output from an IVM physical loction command
:returns: The output formatted into a dictionary
"""
# Output example:
# ['ent4:Virtual I/O Ethernet Adapter (l-lan):
# U9117.MMC.0604C17-V100-C2-T1',
# 'ent10:Virtual I/O Ethernet Adapter (l-lan):
# U9117.MMC.0604C17-V100-C10-T1',
# 'ent13:Virtual I/O Ethernet Adapter (l-lan):
# U9117.MMC.0604C17-V100-C13-T1',
# 'ent14:Virtual I/O Ethernet Adapter (l-lan):
# U9117.MMC.0604C17-V100-C14-T1',
# 'ent5:Shared Ethernet Adapter: ',
# 'ent11:Shared Ethernet Adapter: ',
# 'ent12:VLAN: ']
if len(output) == 0:
return False
# convert output to a dictionary with eth key.
# Basically, it breaks each line( VEA or SEA) into two part.
# 1. the devname, which is used as the key for dict virt_eths.
# 2. The content of each dict element will be a list with
# [<description>, <physloc>].
virt_eths = {}
for item in output:
virt_eths[item.split(':')[0]] = item.split(':')[1:]
return virt_eths
def parse_rmc_status(output, lpar_id):
"""
Parses the output from the get_rmc_status() command to return the RMC
state for the given lpar_id.
:param output: Output from IVM command from get_rmc_status()
:param lpar_id: LPAR ID to find the RMC status of.
:returns status: RMC status for the given LPAR. 'inactive' if lpar_id
not found.
"""
# Output example:
# 1,active
# 2,none
# 3,none
# 4,none
# 5,none
# Be sure we got output to parse
if not output:
return 'inactive'
# Inspect each line
for item in output:
# If lpar_id matches this line's lpar_id, return that status
if item.split(',')[0] == str(lpar_id):
return item.split(',')[1]
# If we got here, we didn't find the lpar_id asked for.
return 'inactive'
def get_mts(virt_eths):
"""
Parse the machine type/model/serial from the IVM output.
:param virt_eths: A dictionary with adapter data from IVM
:returns: A string in the form "type.model.serial"
"""
for key in virt_eths.keys():
if 'Virtual I/O Ethernet Adapter (l-lan)' in virt_eths[key][0]:
return virt_eths[key][1].split('-')[0]
return None
def parse_slot_num(virt_eths, adapter_name):
"""
Parse the adapter slot data from the IVM output.
:param virt_eths: A dictionary with adapter data from IVM
:param adapter_name: The name of a SEA or VEA
:returns: An integer of the slot the adapter occupies
"""
try:
physloc = virt_eths[adapter_name][1].strip()
except KeyError:
msg = (ras.vif_get_msg('error', 'VIOS_SEAINVALIDLOCS') %
{'devname': adapter_name})
ras.function_tracepoint(LOG, __name__, ras.TRACE_ERROR,
msg)
return None
return int(physloc.split('-')[2].lstrip('C'))
def get_sea_attribute_cmd(seaname):
"""
Get pvid, pvid_adapter, and virt_adapters from the configured SEA device.
Also get the state of the SEA.
:param seaname: sea device name
:returns: A VIOS command to get the sea adapter's attributes.
"""
return ("ioscli lsdev -dev %(sea)s -attr pvid,pvid_adapter,virt_adapters;"
"ioscli lsdev -type sea | grep %(sea)s" %
{'sea': seaname})
def get_veth_slot_info_cmd(lpar_id, slotnum):
"""
get virtual ethernet slot information
For IVM, vswitch field is not supported.
:param lpar_id: LPAR id
:param slotnum: veth slot number
:returns: A HMC command to get the virtual ethernet adapter information.
"""
return ("lshwres -r virtualio --rsubtype eth --level "
"lpar --filter lpar_ids=%(lparid)s,slots=%(slot)s "
"-F is_trunk,trunk_priority,ieee_virtual_eth,"
"port_vlan_id,addl_vlan_ids" %
{'lparid': lpar_id, 'slot': slotnum})
def get_curr_max_virtual_slots_cmd(lparid=1):
"""
Get the current max virtual slots limit for the LPAR
:param lparid: LPAR ID. For IVM this parameter is not needed
:returns: A HMC command to get the maximum number of virtual slots
configurable on the VIOS.
"""
return ("lshwres -r virtualio --rsubtype slot --level lpar "
"--filter lpar_ids=%(lparid)s -F curr_max_virtual_slots" %
{'lparid': lparid})
def get_cur_active_vids_cmd(lpar_id=0):
"""
Returns a command to list all the PVIDs and 8021Q vids currently on
the LPAR. Command output will be a list of VLANs, with each
new line being a new VLAN. There may be a blank line at the end of the
command, which can be ignored
:param lparid: The LPAR ID to request against
:returns: A HMC command that returns a list of virtual ethernet adapter
that currently has the given VLAN id bridged.
"""
if lpar_id == 0:
#return all the active vids on the system
return ("lshwres -r virtualio --rsubtype eth --level lpar "
"-F port_vlan_id,addl_vlan_ids")
else:
# Only return the active vids for the LPAR.
return ("lshwres -r virtualio --rsubtype eth --level lpar --filter "
"lpar_ids=%(lparid)s -F port_vlan_id,addl_vlan_ids" %
{'lparid': lpar_id})
| 32.283784 | 79 | 0.626552 |
fd9790e026556bc31e62da5c507cbb2b34e9f5c9 | 8,468 | py | Python | skyportal/tests/api/test_annotations.py | rossbar/skyportal | 1f3cbd3700e25286a69b2d183776dfd3a064559d | [
"BSD-3-Clause"
] | null | null | null | skyportal/tests/api/test_annotations.py | rossbar/skyportal | 1f3cbd3700e25286a69b2d183776dfd3a064559d | [
"BSD-3-Clause"
] | null | null | null | skyportal/tests/api/test_annotations.py | rossbar/skyportal | 1f3cbd3700e25286a69b2d183776dfd3a064559d | [
"BSD-3-Clause"
] | null | null | null | import uuid
from skyportal.tests import api
def test_post_without_origin_fails(annotation_token, public_source, public_group):
# this should not work, since no "origin" is given
status, data = api(
'POST',
'annotation',
data={
'obj_id': public_source.id,
'data': {'offset_from_host_galaxy': 1.5},
'group_ids': [public_group.id],
},
token=annotation_token,
)
assert status == 400
assert 'Missing data for required field.' in data["message"]
# this should not work, since "origin" is empty
status, data = api(
'POST',
'annotation',
data={
'obj_id': public_source.id,
'origin': '',
'data': {'offset_from_host_galaxy': 1.5},
'group_ids': [public_group.id],
},
token=annotation_token,
)
assert status == 400
assert 'Input `origin` must begin with alphanumeric/underscore' in data["message"]
def test_post_same_origin_fails(annotation_token, public_source, public_group):
# first time adding an annotation to this object from Kowalski
status, data = api(
'POST',
'annotation',
data={
'obj_id': public_source.id,
'origin': 'kowalski',
'data': {'offset_from_host_galaxy': 1.5},
'group_ids': [public_group.id],
},
token=annotation_token,
)
assert status == 200
# this should not work, since "origin" Kowalski was already posted to this object
# instead, try updating the existing annotation if you have new information!
status, data = api(
'POST',
'annotation',
data={
'obj_id': public_source.id,
'origin': 'kowalski',
'data': {'offset_from_host_galaxy': 1.5},
'group_ids': [public_group.id],
},
token=annotation_token,
)
assert status == 400
assert 'duplicate key value violates unique constraint' in data["message"]
def test_add_and_retrieve_annotation_group_id(
annotation_token, public_source, public_group
):
status, data = api(
'POST',
'annotation',
data={
'obj_id': public_source.id,
'origin': 'kowalski',
'data': {'offset_from_host_galaxy': 1.5},
'group_ids': [public_group.id],
},
token=annotation_token,
)
assert status == 200
annotation_id = data['data']['annotation_id']
status, data = api('GET', f'annotation/{annotation_id}', token=annotation_token)
assert status == 200
assert data['data']['data'] == {'offset_from_host_galaxy': 1.5}
assert data['data']['origin'] == 'kowalski'
def test_add_and_retrieve_annotation_no_group_id(annotation_token, public_source):
status, data = api(
'POST',
'annotation',
data={
'obj_id': public_source.id,
'origin': 'kowalski',
'data': {'offset_from_host_galaxy': 1.5},
},
token=annotation_token,
)
assert status == 200
annotation_id = data['data']['annotation_id']
status, data = api('GET', f'annotation/{annotation_id}', token=annotation_token)
assert status == 200
assert data['data']['data'] == {'offset_from_host_galaxy': 1.5}
assert data['data']['origin'] == 'kowalski'
def test_add_and_retrieve_annotation_group_access(
annotation_token_two_groups,
public_source_two_groups,
public_group2,
public_group,
annotation_token,
):
status, data = api(
'POST',
'annotation',
data={
'obj_id': public_source_two_groups.id,
'origin': 'kowalski',
'data': {'offset_from_host_galaxy': 1.5},
'group_ids': [public_group2.id],
},
token=annotation_token_two_groups,
)
assert status == 200
annotation_id = data['data']['annotation_id']
# This token belongs to public_group2
status, data = api(
'GET', f'annotation/{annotation_id}', token=annotation_token_two_groups
)
assert status == 200
assert data['data']['data'] == {'offset_from_host_galaxy': 1.5}
assert data['data']['origin'] == 'kowalski'
# This token does not belong to public_group2
status, data = api('GET', f'annotation/{annotation_id}', token=annotation_token)
assert status == 400
assert "Insufficient permissions." in data["message"]
# Both tokens should be able to view this annotation
status, data = api(
'POST',
'annotation',
data={
'obj_id': public_source_two_groups.id,
'origin': 'GAIA',
'data': {'offset_from_host_galaxy': 1.5},
'group_ids': [public_group.id, public_group2.id],
},
token=annotation_token_two_groups,
)
assert status == 200
annotation_id = data['data']['annotation_id']
status, data = api(
'GET', f'annotation/{annotation_id}', token=annotation_token_two_groups
)
assert status == 200
assert data['data']['data'] == {'offset_from_host_galaxy': 1.5}
assert data['data']['origin'] == 'GAIA'
status, data = api('GET', f'annotation/{annotation_id}', token=annotation_token)
assert status == 200
assert data['data']['data'] == {'offset_from_host_galaxy': 1.5}
def test_update_annotation_group_list(
annotation_token_two_groups,
public_source_two_groups,
public_group2,
public_group,
annotation_token,
):
status, data = api(
'POST',
'annotation',
data={
'obj_id': public_source_two_groups.id,
'origin': 'kowalski',
'data': {'offset_from_host_galaxy': 1.5},
'group_ids': [public_group2.id],
},
token=annotation_token_two_groups,
)
assert status == 200
annotation_id = data['data']['annotation_id']
# This token belongs to public_group2
status, data = api(
'GET', f'annotation/{annotation_id}', token=annotation_token_two_groups
)
assert status == 200
assert data['data']['origin'] == 'kowalski'
assert data['data']['data'] == {'offset_from_host_galaxy': 1.5}
# This token does not belong to public_group2
status, data = api('GET', f'annotation/{annotation_id}', token=annotation_token)
assert status == 400
assert "Insufficient permissions." in data["message"]
# Both tokens should be able to view annotation after updating group list
status, data = api(
'PUT',
f'annotation/{annotation_id}',
data={
'data': {'offset_from_host_galaxy': 1.7},
'group_ids': [public_group.id, public_group2.id],
},
token=annotation_token_two_groups,
)
assert status == 200
status, data = api(
'GET', f'annotation/{annotation_id}', token=annotation_token_two_groups
)
assert status == 200
assert data['data']['data'] == {'offset_from_host_galaxy': 1.7}
status, data = api('GET', f'annotation/{annotation_id}', token=annotation_token)
assert status == 200
assert data['data']['data'] == {'offset_from_host_galaxy': 1.7}
def test_cannot_add_annotation_without_permission(view_only_token, public_source):
status, data = api(
'POST',
'annotation',
data={
'obj_id': public_source.id,
'origin': 'kowalski',
'data': {'offset_from_host_galaxy': 1.5},
},
token=view_only_token,
)
assert status == 400
assert data['status'] == 'error'
def test_delete_annotation(annotation_token, public_source):
origin = str(uuid.uuid4())
status, data = api(
'POST',
'annotation',
data={
'obj_id': public_source.id,
'origin': origin,
'data': {'offset_from_host_galaxy': 1.5},
},
token=annotation_token,
)
assert status == 200
annotation_id = data['data']['annotation_id']
status, data = api('GET', f'annotation/{annotation_id}', token=annotation_token)
assert status == 200
assert data['data']['data'] == {'offset_from_host_galaxy': 1.5}
assert data['data']['origin'] == origin
status, data = api('DELETE', f'annotation/{annotation_id}', token=annotation_token)
assert status == 200
status, data = api('GET', f'annotation/{annotation_id}', token=annotation_token)
assert status == 400
| 30.242857 | 87 | 0.607818 |
fe76062ab01d453d064c1ba906cf1181f6a47190 | 5,410 | py | Python | autorest/codegen/models/operation_group.py | Azure/autorest.python | c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7 | [
"MIT"
] | 35 | 2018-04-03T12:15:53.000Z | 2022-03-11T14:03:34.000Z | autorest/codegen/models/operation_group.py | Azure/autorest.python | c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7 | [
"MIT"
] | 652 | 2017-08-28T22:44:41.000Z | 2022-03-31T21:20:31.000Z | autorest/codegen/models/operation_group.py | Azure/autorest.python | c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7 | [
"MIT"
] | 29 | 2017-08-28T20:57:01.000Z | 2022-03-11T14:03:38.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import logging
from typing import Dict, List, Any, Set
from .base_model import BaseModel
from .operation import Operation
from .lro_operation import LROOperation
from .paging_operation import PagingOperation
from .lro_paging_operation import LROPagingOperation
from .imports import FileImport, ImportType
_LOGGER = logging.getLogger(__name__)
def _get_operation(code_model, yaml_data: Dict[str, Any]) -> Operation:
lro_operation = yaml_data.get("extensions", {}).get("x-ms-long-running-operation")
paging_operation = yaml_data.get("extensions", {}).get("x-ms-pageable")
operation_schema = Operation
if lro_operation and paging_operation:
operation_schema = LROPagingOperation
elif lro_operation:
operation_schema = LROOperation
elif paging_operation:
operation_schema = PagingOperation
operation = operation_schema.from_yaml(yaml_data, code_model=code_model)
return operation
class OperationGroup(BaseModel):
"""Represent an operation group.
"""
def __init__(
self,
code_model,
yaml_data: Dict[str, Any],
name: str,
class_name: str,
operations: List[Operation],
api_versions: Set[str]
) -> None:
super().__init__(yaml_data)
self.code_model = code_model
self.name = name
self.class_name = class_name
self.operations = operations
self.api_versions = api_versions
def imports_for_multiapi(self, async_mode: bool) -> FileImport:
file_import = FileImport()
for operation in self.operations:
file_import.merge(operation.imports_for_multiapi(self.code_model, async_mode))
return file_import
def imports(self, async_mode: bool, has_schemas: bool) -> FileImport:
file_import = FileImport()
file_import.add_from_import("azure.core.exceptions", "ClientAuthenticationError", ImportType.AZURECORE)
file_import.add_from_import("azure.core.exceptions", "ResourceNotFoundError", ImportType.AZURECORE)
file_import.add_from_import("azure.core.exceptions", "ResourceExistsError", ImportType.AZURECORE)
for operation in self.operations:
file_import.merge(operation.imports(self.code_model, async_mode))
if self.code_model.options["tracing"]:
if async_mode:
file_import.add_from_import(
"azure.core.tracing.decorator_async", "distributed_trace_async", ImportType.AZURECORE,
)
else:
file_import.add_from_import(
"azure.core.tracing.decorator", "distributed_trace", ImportType.AZURECORE,
)
local_path = "..." if async_mode else ".."
if has_schemas and self.code_model.options["models_mode"]:
file_import.add_from_import(local_path, "models", ImportType.LOCAL, alias="_models")
if self.code_model.options["builders_visibility"] == "embedded" and async_mode:
if not self.code_model.options["combine_operation_files"]:
operation_group_name = "" if self.is_empty_operation_group else self.name
operation_group_builders = [
r for r in self.code_model.rest.request_builders
if r.operation_group_name == operation_group_name
]
else:
operation_group_builders = self.code_model.rest.request_builders
for request_builder in operation_group_builders:
file_import.add_from_import(
f"...operations.{self.filename}",
request_builder.name,
import_type=ImportType.LOCAL
)
return file_import
@property
def filename(self) -> str:
if self.code_model.options["combine_operation_files"]:
return "_operations"
basename = self.name
if self.is_empty_operation_group:
basename = self.code_model.module_name
if basename == "operations":
return f"_operations"
return f"_{basename}_operations"
@property
def is_empty_operation_group(self) -> bool:
"""The operation group with no name is the direct client methods.
"""
return not self.yaml_data["language"]["default"]["name"]
@classmethod
def from_yaml(cls, code_model, yaml_data: Dict[str, Any]) -> "OperationGroup":
name = yaml_data["language"]["python"]["name"]
_LOGGER.debug("Parsing %s operation group", name)
operations = []
api_versions: Set[str] = set()
for operation_yaml in yaml_data["operations"]:
operation = _get_operation(code_model, operation_yaml)
operations.append(operation)
api_versions.update(operation.api_versions)
return cls(
code_model=code_model,
yaml_data=yaml_data,
name=name,
class_name=yaml_data["language"]["python"]["className"],
operations=operations,
api_versions=api_versions
)
| 39.779412 | 111 | 0.637338 |
262e4f9b5a8764196ddae8178ea26ef0756230ab | 1,920 | py | Python | configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py | fengyouliang/wheat_detection | d056123426a1260c29b486cbb8e44a88a0a3c5bc | [
"Apache-2.0"
] | null | null | null | configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py | fengyouliang/wheat_detection | d056123426a1260c29b486cbb8e44a88a0a3c5bc | [
"Apache-2.0"
] | null | null | null | configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py | fengyouliang/wheat_detection | d056123426a1260c29b486cbb8e44a88a0a3c5bc | [
"Apache-2.0"
] | null | null | null | _base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
pretrained='open-mmlab://resnet50_caffe_bgr',
backbone=dict(norm_cfg=dict(requires_grad=False), style='caffe'),
rpn_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
bbox_roi_extractor=dict(
roi_layer=dict(
type='RoIAlign', out_size=7, sample_num=2, aligned=False)),
bbox_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_roi_extractor=dict(
roi_layer=dict(
type='RoIAlign', out_size=14, sample_num=2, aligned=False))))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
| 36.923077 | 79 | 0.597917 |
7d969dd8980f8d04cd690d1f230c3a3e026d75bb | 278 | py | Python | test_compat.py | yong422/vistir | 94301fa5fe4d9c51e50834688866940af455e927 | [
"ISC"
] | null | null | null | test_compat.py | yong422/vistir | 94301fa5fe4d9c51e50834688866940af455e927 | [
"ISC"
] | null | null | null | test_compat.py | yong422/vistir | 94301fa5fe4d9c51e50834688866940af455e927 | [
"ISC"
] | null | null | null | # -*- coding: utf-8 -*-
from vistir.compat import fs_decode, fs_encode
def test_fs_encode():
# This fails in the normal backports library:
# see https://github.com/PiDelport/backports.os/issues/13
assert fs_decode(fs_encode(u"unicode\u0141")) == u"unicode\u0141"
| 27.8 | 69 | 0.71223 |
71b385113c13ee3ca4d0e92f32e64ffcce2cf84c | 1,680 | py | Python | chineselm/ch_demo.py | xrick/Chinese-Character-Level_Langugae-Model | 46d958fb44d333424c6a2a9a04bee6582653efa6 | [
"MIT"
] | 1 | 2018-11-04T04:49:37.000Z | 2018-11-04T04:49:37.000Z | chineselm/ch_demo.py | xrick/Chinese-Character-Level_Langugae-Model | 46d958fb44d333424c6a2a9a04bee6582653efa6 | [
"MIT"
] | 1 | 2021-09-29T17:31:51.000Z | 2021-09-29T17:31:51.000Z | chineselm/ch_demo.py | xrick/Chinese-Character-Level_Langugae-Model | 46d958fb44d333424c6a2a9a04bee6582653efa6 | [
"MIT"
] | null | null | null | from pickle import load
from keras.models import load_model
from keras.utils import to_categorical
from keras.preprocessing.sequence import pad_sequences
# generate a sequence of characters with a language model
# load the model
model = load_model('ch_model.h5')
# load the mapping
mapping = load(open('mapping.pkl', 'rb'))
def generate_seq(model, mapping, seq_length, seed_text, n_chars):
in_text = seed_text
# generate a fixed number of characters
for _ in range(n_chars):
# encode the characters as integers
encoded = [mapping[char] for char in in_text]
# truncate sequences to a fixed length
encoded = pad_sequences([encoded], maxlen=seq_length, truncating='pre')
# one hot encode
encoded = to_categorical(encoded, num_classes=len(mapping))
encoded = encoded.reshape(1, encoded.shape[0], encoded.shape[1])
# predict character
yhat = model.predict_classes(encoded, verbose=0)
# reverse map integer to character
out_char = ''
for char, index in mapping.items():
if index == yhat:
out_char = char
break
# append to input
in_text += char
return in_text
def test_generate_seq(model, mapping, seq_len, seed_text, n_chars):
in_text = seed_text
for _ in range(n_chars):
for char in in_text:
print("current processing char is : {}".format(char))
encoded_ch = [mapping[char] for char in in_text]
print("current get encoded_ch is : {}".format(encoded_ch))
"""
print(generate_seq(model, mapping, 14, '台灣 長年 意識', 20))
print(generate_seq(model, mapping, 18, '政黨政治 手段 失能', 20))
print(generate_seq(model, mapping, 16, '耐不住 口號 美麗', 20))
"""
if __name__ == "__main__":
test_generate_seq(model, mapping, 14, '台灣 長年 意識', 20)
| 30 | 73 | 0.725595 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.