hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
16ea2d8be166b5650aea4af33dbde9040a41f768
| 1,438
|
py
|
Python
|
test/test_docker_images.py
|
bauerj/cibuildwheel
|
b4addbf4a94daa76769d4f779e169406b0ef99ae
|
[
"BSD-2-Clause"
] | null | null | null |
test/test_docker_images.py
|
bauerj/cibuildwheel
|
b4addbf4a94daa76769d4f779e169406b0ef99ae
|
[
"BSD-2-Clause"
] | null | null | null |
test/test_docker_images.py
|
bauerj/cibuildwheel
|
b4addbf4a94daa76769d4f779e169406b0ef99ae
|
[
"BSD-2-Clause"
] | null | null | null |
import platform
import textwrap
import pytest
from . import test_projects, utils
dockcross_only_project = test_projects.new_c_project(
setup_py_add=textwrap.dedent(r'''
import os, sys
# check that we're running in the correct docker image as specified in the
# environment options CIBW_MANYLINUX1_*_IMAGE
if "linux" in sys.platform and not os.path.exists("/dockcross"):
raise Exception(
"/dockcross directory not found. Is this test running in the correct docker image?"
)
''')
)
def test(tmp_path):
if utils.platform != 'linux':
pytest.skip('the test is only relevant to the linux build')
if platform.machine() not in ['x86_64', 'i686']:
pytest.skip('this test is currently only possible on x86_64/i686 due to availability of alternative images')
project_dir = tmp_path / 'project'
dockcross_only_project.generate(project_dir)
actual_wheels = utils.cibuildwheel_run(project_dir, add_env={
'CIBW_MANYLINUX_X86_64_IMAGE': 'dockcross/manylinux2010-x64',
'CIBW_MANYLINUX_I686_IMAGE': 'dockcross/manylinux2010-x86',
'CIBW_SKIP': 'pp* cp39-*',
})
# also check that we got the right wheels built
expected_wheels = [w for w in utils.expected_wheels('spam', '0.1.0')
if '-pp' not in w and '-cp39-' not in w]
assert set(actual_wheels) == set(expected_wheels)
| 35.073171
| 116
| 0.672462
| 196
| 1,438
| 4.755102
| 0.459184
| 0.016094
| 0.042918
| 0.040773
| 0.064378
| 0.064378
| 0
| 0
| 0
| 0
| 0
| 0.037138
| 0.232267
| 1,438
| 40
| 117
| 35.95
| 0.807065
| 0.031293
| 0
| 0
| 0
| 0
| 0.491733
| 0.113587
| 0
| 0
| 0
| 0
| 0.033333
| 1
| 0.033333
| false
| 0
| 0.166667
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16eb07b6e691db19202917b717c2ccb87df9fd9d
| 32,556
|
py
|
Python
|
real_trade/MoveAverageTradePosition.py
|
taka-mochi/cryptocurrency-autotrading
|
16677018c793d7bd3fffdcd3575aecb3535dbd04
|
[
"BSD-3-Clause"
] | 3
|
2018-05-22T22:45:23.000Z
|
2020-02-13T16:45:03.000Z
|
real_trade/MoveAverageTradePosition.py
|
taka-mochi/cryptocurrency-autotrading
|
16677018c793d7bd3fffdcd3575aecb3535dbd04
|
[
"BSD-3-Clause"
] | null | null | null |
real_trade/MoveAverageTradePosition.py
|
taka-mochi/cryptocurrency-autotrading
|
16677018c793d7bd3fffdcd3575aecb3535dbd04
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
import math
import dateutil
import dateutil.parser
import json
from ChartBars import Chart
from ChartUpdaterByCCWebsocket import ChartUpdaterByCoincheckWS
from Util import BitcoinUtil
def adjust_price_to_tick(price, tick):
return price - math.fmod(price, tick)
def adjust_amount_to_tick(amount, tick):
return amount - math.fmod(amount, tick)
# a class for one position
class OnePositionTrader(object):
def __init__(self, price_decide_algorithm, api, pair="btc_jpy", use_leverage = True):
self.max_total_position_price_base = 0 # total maximum position size in base currency
self.positioned_price_base = 0 # total position price in base currency (actually paired currency)
self.positioned_value_in_qty = 0 # used only for genbutsu
self.max_free_margin_of_base_currency = 0 # max free margin. we cannot use orders that exceed this margin
self.positions = []
self.position_id_to_sellids = {}
self.got_all_order_ids = []
self.got_close_order_ids = []
self.exist_order_info_list = None
self.exist_close_order_info_list = None
self.last_checked_transaction_id = 0
self.api = api # api: e.g. instance of CoinCheck
self.use_leverage = use_leverage
self.timelimit_to_grouping_transaction = 2 # 約定時刻がこの秒数以下なら同一ポジションとみなす(use_leverage == False の場合のみ)
self.__pair = pair
self.price_decide_algorithm = price_decide_algorithm
print("PositionTrader: inst=" + str(self) + ", pair=" + str(pair))
@property
def pair(self):
return self.__pair
def get_base_currency(self):
return self.pair.split("_")[1].lower()
def get_qty_currency(self):
return self.pair.split("_")[0].lower()
# set usable jpy (available_margin + reserved_margin + (positioned))
def set_max_total_position_price_base(self, p):
self.set_max_total_position_price_of_base_currency(p)
def set_max_total_position_price_of_base_currency(self, p):
self.max_total_position_price_base = p
def set_max_free_margin_of_base_currency(self, p):
self.max_free_margin_of_base_currency = p
def get_max_total_position_price_base(self):
return self.get_max_total_position_price_of_base_currency()
def get_max_total_position_price_of_base_currency(self):
return self.max_total_position_price_base
def get_positioned_price_base(self):
return self.positioned_price_base
def set_timelimit_to_grouping_transaction(self, timelimit_to_grouping_transaction):
self.timelimit_to_grouping_transaction = timelimit_to_grouping_transaction
# check current status and make new positions according to algorithm
# notice: this method should be called after update_status
def update_new_orders(self, chart, do_not_create_new_order=False):
assert (self.price_decide_algorithm is not None)
position_type = None
target_value = None
stoploss_rate = None
decide_make_ret = self.price_decide_algorithm.decide_make_position_order(chart)
if len(decide_make_ret) == 3:
(position_type, target_value, stoploss_rate) = decide_make_ret
else:
(position_type, target_value) = decide_make_ret
if target_value is None or position_type is None:
# algorithm says this instance should not make order. cancel all
if self.exist_order_info_list is not None:
for exist_order_info in self.exist_order_info_list:
self._cancel_order(exist_order_info["id"])
self.exist_order_info_list = None
return False
# round to possible price
tick = self.api.order.tick_price(self.pair)
target_value = adjust_price_to_tick(target_value, tick)
if stoploss_rate is not None:
stoploss_rate = adjust_price_to_tick(stoploss_rate, tick)
# !!round to possible amount
possible_make_total_price_base_cur = self.get_max_total_position_price_of_base_currency() - self.positioned_price_base
possible_make_total_price_base_cur = min(possible_make_total_price_base_cur, self.max_free_margin_of_base_currency)
amount_tick = self.api.order.tick_amount(self.pair)
possible_amount = 1.0 * possible_make_total_price_base_cur / target_value
possible_amount = adjust_amount_to_tick(possible_amount,amount_tick)
print("possible_create_in_base = %f, want to make amount in base = %f, possible amount = %f" %
(self.get_max_total_position_price_of_base_currency() - self.positioned_price_base,
possible_make_total_price_base_cur, possible_amount))
#print("base_cur = %f, positioned = %f, others = %f" % (self.get_max_total_position_price_of_base_currency(), self.positioned_price_base, self.other_reserved_base,))
#print("target_value = %f, possible_base = %f" % (target_value, possible_make_total_price_base_cur,))
if possible_amount <= 0.000001:
# too few btc
print("want to make (price,amount) = (%f,%f) but too few amount" % (target_value, possible_amount))
return False
if not do_not_create_new_order:
success, new_order_created = self._update_or_create_order(position_type, target_value, possible_amount, stop_loss_rate=stoploss_rate)
return new_order_created
else:
self._cancel_exist_all_buy_orders()
print("algorithm wants to create a new order but DO_NOT_CREATE_NEW flag = true")
return False
# update close orders according to current positions
# this class should be called after update_status
def update_close_orders(self, chart, current_time_timezone_aware):
for position in self.positions:
open_rate = float(position["open_rate"])
amount = float(position["amount"])
created_time = position["created_at_datetime"]
target_value = None
if self.price_decide_algorithm.market_sell_decide_algorithm(chart, open_rate, created_time, current_time_timezone_aware) is True:
# market order close
pass
else:
target_value = self.price_decide_algorithm.sell_price_decide_algorithm(open_rate)
target_value = adjust_price_to_tick(target_value, self.api.order.tick_price(self.pair))
self._update_or_create_close_order(position, target_value)
# interface to update internal position & order status
def update_status(self, valid_position_info, valid_transaction_info, valid_order_info):
# update position/order status (assume: pagenations are already cleared)
self._update_order_id_status(valid_order_info)
if self.use_leverage:
self._update_position_status(valid_position_info)
else:
self._update_transaction_status(valid_transaction_info)
def _update_position_status(self, valid_position_info):
# apply real positions status to this instance
# レバレッジ用
if not self.use_leverage:
return
"""
position example (array of "data" will be passed)
{
"data": [
{
"id": 10,
"pair": "btc_jpy",
"status": "open",
"created_at": "2015-12-02T05:27:53.000Z",
"closed_at": null,
"open_rate": "43553.0",
"closed_rate": null,
"amount": "1.51347797",
"all_amount": "1.51045705",
"side": "sell",
"pl": "-8490.81029287",
"new_order": {
"id": 23104033,
"side": "sell",
"rate": null,
"amount": null,
"pending_amount": "0",
"status": "complete",
"created_at": "2015-12-02T05:27:52.000Z"
},
"close_orders": [
{
"id": 23755132,
"side": "buy",
"rate": "10000.0",
"amount": "1.0",
"pending_amount": "0.0",
"status": "cancel",
"created_at": "2015-12-05T05:03:56.000Z"
}
]
}
]
}
"""
####
# parse positions
####
self.positions = []
self.position_id_to_sellids = {}
all_positions = valid_position_info
positioned_value_in_base = 0
for position in all_positions:
status = position["status"]
if status != "open":
continue
pair = position["pair"]
if pair != self.pair:
continue
position_id = position["id"]
# check position that is created by the new_order that is self.order_id:
new_order = position["new_order"]
if new_order["status"] == "cancel":
print("new order: " + str(new_order["id"]) + " state is 'cancel'. probably partially contracted and remain is canceled. this position is not ignored")
#continue
new_order_id = new_order["id"]
if new_order_id in self.got_all_order_ids:
# this position is created by this class's order
created_time = dateutil.parser.parse(position["created_at"])
position["created_at_datetime"] = created_time
amount = position["amount"]
all_amount = position["all_amount"]
if all_amount is not None and all_amount < amount:
amount = all_amount
position["amount"] = position["all_amount"] = amount
self.positions.append(position)
open_rate = position["open_rate"]
positioned_value_in_base += float(amount) * float(open_rate)
# check close orders
self.position_id_to_sellids[position_id] = \
list(map(lambda x:x["id"], filter(lambda x:x["status"] != "cancel", position["close_orders"])))
self.positioned_price_base = positioned_value_in_base
def _update_transaction_status(self, valid_transaction_info):
if self.use_leverage:
return
# 現物用。transactionの結果からポジションの状態を解析. 基本的にupdate_position_statusと挙動は同じ。parseするjsonが異なる
# * ただし、前フレームからの情報を引き継ぐところがupdate_position_statusと違う (現物にはpositionという概念が無い)
positions = self.positions
position_id_to_sellids = self.position_id_to_sellids
close_transactions = []
all_transactions = valid_transaction_info
positioned_value_in_qty = self.positioned_value_in_qty
qty_cur = self.get_qty_currency()
base_cur = self.get_base_currency()
last_transaction_id_in_this_frame = self.last_checked_transaction_id
for transaction in all_transactions:
transaction_id = int(transaction["id"]) # transaction_id means position_id
transaction["id"] = transaction_id
# check only new id
if self.last_checked_transaction_id >= transaction_id:
continue
last_transaction_id_in_this_frame = max(last_transaction_id_in_this_frame, transaction_id)
# check pair
this_pair = transaction["pair"]
if this_pair != self.pair:
continue
# check position that is created by the new_order that is self.order_id:
new_order_id = int(transaction["order_id"])
transaction["order_id"] = new_order_id
is_position_transaction = new_order_id in self.got_all_order_ids
is_close_transaction = new_order_id in self.got_close_order_ids
if not is_position_transaction and not is_close_transaction:
continue
# other pair
if qty_cur not in transaction["funds"] or base_cur not in transaction["funds"]:
continue
# this position is created by this class's order
qty_amount = float(transaction["funds"][qty_cur])
transaction["amount"] = transaction["amount"] = qty_amount
transaction["open_rate"] = float(transaction["rate"])
open_rate = float(transaction["open_rate"])
positioned_value_in_qty += float(qty_amount)
created_time = dateutil.parser.parse(transaction["created_at"])
transaction["created_at_datetime"] = created_time
if is_position_transaction:
# check close orders
# 漏れがあるとまずい(cancelしなくなる)ので、とりあえずあるだけリンクしておく
position_id_to_sellids[transaction_id] = []
transaction["close_orders"] = []
positions.append(transaction)
else:
close_transactions.append(transaction)
# in next frame, only transaction_id > self.last_checked_transaction_id will be checked
self.last_checked_transaction_id = last_transaction_id_in_this_frame
print("last_checked_transaction_id = ", self.last_checked_transaction_id)
print("self.exist_close_order_info_list", self.exist_close_order_info_list)
if self.exist_close_order_info_list is not None:
for pos_i, position in enumerate(positions):
transaction_id = position["id"]
position_id_to_sellids[transaction_id] = list(map(lambda x:x["id"], self.exist_close_order_info_list))
position["close_orders"] = self.exist_close_order_info_list
for i, order in enumerate(position["close_orders"]):
order["status"] = "open"
order["side"] = order["order_type"]
if "amount" not in order:
order["amount"] = float(order["pending_amount"])
position["close_orders"][i] = order
positions[pos_i] = position
# round very small value
if abs(positioned_value_in_qty) < self.api.order.min_create_amount(self.pair)*0.1:
positioned_value_in_qty = 0
positions = sorted(positions, key=lambda x:-x["id"]) # order by desc
# concat very near created_at transactions
grouped_positions = self._group_near_transactions(positions)
# remove closed position & update positioned_value_in_jpy
valid_positions, positioned_value_in_base = self._remain_non_closed_transactions(grouped_positions, positioned_value_in_qty)
if abs(positioned_value_in_base) < self.api.order.tick_price(self.pair) * self.api.order.min_create_amount(self.pair) * 0.1:
positioned_value_in_base = 0
# merge position_id_to_sellids
self.position_id_to_sellids = {}
for position in valid_positions:
pos_id = position["id"]
self.position_id_to_sellids[pos_id] = position_id_to_sellids[pos_id]
self.positioned_price_base = positioned_value_in_base
self.positioned_value_in_qty = positioned_value_in_qty
self.position_id_to_sellids = position_id_to_sellids
self.positions = valid_positions
print("position_count=%d, positioned_%s=%f, positioned_%s=%f" % (len(self.positions), base_cur, self.positioned_price_base, qty_cur, self.positioned_value_in_qty,))
# close したかどうか、残っているポジション残量を計算するのに、全て遡らないといけないのは現実的ではない
# 既にこの段階で解決できるポジション状態(close order id見て、それがあれば反対売買が成立している)
# を用い、↑で貯めたpositionsから、反対売買済みのものを(amount基準で)消していき(前回フレームで残っていたpositionも含めて)、残ったpositionだけを生きているポジションとし、1つに集約する(現物用なので、idが分かれている意味はない)
# その残ったpositionID, 消費した反対売買IDのIDを持っておき、次回からはそれより新しいIDのみを反映する
# ただし、ずっと続けると計算誤差がたまるので、jpyもしくはbtcベースでその合計値が極めて小さくなったら丸めてノーポジ扱いにする
# うーん...現物とレバレッジで管理が結構変わるから同じクラスにするのはまずかった?ごちゃごちゃしてきてしまった
# 時間的に約定時刻が近いpositionをまとめる
def _group_near_transactions(self, target_transactions):
grouped_positions = []
positions = target_transactions
if len(positions) > 0:
def grouping(desced_position_array):
ret_pos = dict(desced_position_array[0])
total_amount = 0
total_jpy = 0
for p in desced_position_array:
total_amount += p["amount"]
total_jpy += p["amount"] * p["open_rate"]
ret_pos["amount"] = total_amount
ret_pos["open_rate"] = total_jpy / total_amount
return ret_pos
concat_start_index = 0
prev_created_at = positions[0]["created_at_datetime"]
for idx, pos in enumerate(positions):
cur_created_at = pos["created_at_datetime"]
if abs((cur_created_at - prev_created_at).total_seconds()) <= self.timelimit_to_grouping_transaction:
# can group
prev_created_at = cur_created_at
continue
# this position cannot be grouped. make a new group from pos[start_index] - pos[idx-1]
grouped_positions.append(grouping(positions[concat_start_index:idx]))
#print(grouped_positions[-1])
concat_start_index = idx
prev_created_at = cur_created_at
# remain positioned not be grouped
grouped_positions.append(grouping(positions[concat_start_index:]))
return grouped_positions
# まだcloseされていないtransactionだけを残す
def _remain_non_closed_transactions(self, target_transactions, positioned_value_in_qty):
valid_positions = []
remain_qty = positioned_value_in_qty
total_base = 0
for position in target_transactions:
if remain_qty <= 0: break
amount = position["amount"]
if remain_qty >= amount:
remain_qty -= amount
else:
position["amount"] = remain_qty
remain_qty = 0
valid_positions.append(position)
total_base += position["amount"] * position["open_rate"]
return valid_positions, total_base
def _update_order_id_status(self, valid_order_info):
####
# parse orders
####
"""
orders example (array of "orders" will be passed)
{
"success": true,
"orders": [
{
"id": 202835,
"order_type": "buy",
"rate": 26890,
"pair": "btc_jpy",
"pending_amount": "0.5527",
"pending_market_buy_amount": null,
"stop_loss_rate": null,
"created_at": "2015-01-10T05:55:38.000Z"
},
{
"id": 202836,
"order_type": "sell",
"rate": 26990,
"pair": "btc_jpy",
"pending_amount": "0.77",
"pending_market_buy_amount": null,
"stop_loss_rate": null,
"created_at": "2015-01-10T05:55:38.000Z"
},
{
"id": 38632107,
"order_type": "buy",
"rate": null,
"pair": "btc_jpy",
"pending_amount": null,
"pending_market_buy_amount": "10000.0",
"stop_loss_rate": "50000.0",
"created_at": "2016-02-23T12:14:50.000Z"
}
]
}
"""
#exist_order_ids = list(map(lambda x:x["id"], valid_order_info))
exist_orders = []
exist_close_orders = []
other_orders = []
for idx, order in enumerate(valid_order_info):
order_id = order["id"]
order_pair = order["pair"]
is_added = False
if order_pair == self.pair:
if order_id in self.got_all_order_ids:
is_added = True
exist_orders.append(order)
elif order_id in self.got_close_order_ids:
is_added = True
exist_close_orders.append(order)
if not is_added:
other_orders.append(order)
print("exist_create_orders", exist_orders)
print("exist_close_orders", exist_close_orders)
self.exist_order_info_list = exist_orders if len(exist_orders) > 0 else None
self.exist_close_order_info_list = exist_close_orders if len(exist_close_orders) > 0 else None
#self.other_reserved_base = 0
#if not self.use_leverage:
# for o in other_orders:
# if o["order_type"] == "buy":
# self.other_reserved_base += float(o["pending_amount"]) * float(o["rate"])
# returns: (is_success, is_new_order_created)
def _update_or_create_order(self, position_type, target_value, possible_qty, stop_loss_rate = None):
assert (self.api is not None)
# order list は現物とleverageで変わらない
if self.exist_order_info_list is not None:
# check the same value or not
if len(self.exist_order_info_list) == 1:
exist_order_info = self.exist_order_info_list[0]
cur_rate = exist_order_info["rate"] if "rate" in exist_order_info else None
# get current stoploss
cur_stoploss = exist_order_info["stop_loss_rate"] if "stop_loss_rate" in exist_order_info else None
cur_stoploss_float_or_none = None
if cur_stoploss is not None:
cur_stoploss_float_or_none = float(cur_stoploss)
target_stoploss_float_or_none = None
if stop_loss_rate is not None:
target_stoploss_float_or_none = float(stop_loss_rate)
cur_amount = None
if "amount" in exist_order_info:
cur_amount = exist_order_info["amount"]
elif "pending_amount" in exist_order_info:
cur_amount = exist_order_info["pending_amount"]
order_type = None
if "order_type" in exist_order_info:
if exist_order_info["order_type"] == "buy" or\
exist_order_info["order_type"] == "leverage_buy":
order_type = "long"
if exist_order_info["order_type"] == "sell" or \
exist_order_info["order_type"] == "leverage_sell":
order_type = "short"
if cur_rate is not None and cur_amount is not None and order_type is not None:
if abs(float(cur_rate)-float(target_value)) < 0.00001 and \
abs(float(cur_amount)-float(possible_qty)) < 0.00001 and \
cur_stoploss_float_or_none == target_stoploss_float_or_none and \
order_type == position_type:
# same order. do nothing
print("You already ordered this order: rate=%.1f, amount=%f, stoploss_rate=%s, position_type=%s" % (target_value, possible_qty, str(stop_loss_rate), position_type,))
return True, False
# cancel all exist orders
if not self._cancel_exist_all_buy_orders():
return False, False
# check minimum btc
min_qty = self.api.order.min_create_amount(self.pair)
if possible_qty < min_qty:
print("Minimum order btc = %f, you requested = %f" % (min_qty, possible_qty,))
return False, False
# make new order
"""
ret val example
"success": true,
"id": 12345,
"rate": "30010.0",
"amount": "1.3",
"order_type": "sell",
"stop_loss_rate": null,
"pair": "btc_jpy",
"created_at": "2015-01-10T05:55:38.000Z"
"""
is_long = position_type == "long"
order_type = 'leverage_buy' if is_long else 'leverage_sell'
if not self.use_leverage:
order_type = 'buy' if is_long else 'sell'
order = {
'rate': "%.8f" % target_value,
'amount': "%.8f" % possible_qty,
'order_type': order_type,
'pair': self.pair
}
# not correct
# this "stop_loss_rate" means: if a value >= stop_loss_rate, sashine will be placed at "rate"
if stop_loss_rate is not None:
order["stop_loss_rate"] = stop_loss_rate
ret_str = self.api.order.create(order)
ret = None
if ret_str is not None:
try:
ret = json.loads(ret_str)
except:
print("failed to parse api.order.create result")
try:
print(ret_str)
except Exception as e:
print("failed to show returned json str")
print(e)
if ret is None or ret["success"] is not True or "id" not in ret:
print("Failed to create order!!")
try:
print(ret_str)
except Exception as e:
print("failed to show returned json str")
print(e)
return False, False
self.exist_order_info_list = [ret]
self.got_all_order_ids.append(ret["id"])
# remove very old orders
if len(self.got_all_order_ids) > 500:
self.got_all_order_ids = self.got_all_order_ids[-500:]
print("order success!", ret_str)
return True, True
def _cancel_exist_all_buy_orders(self):
failed_to_cancel = False
exist_order_i = 0
while exist_order_i < len(self.exist_order_info_list):
exist_order_info = self.exist_order_info_list[exist_order_i]
if self._cancel_order(exist_order_info["id"]) is False:
# something error happened!!
print("order cancel failed %d even if there is a valid order in internal state" % (exist_order_info["id"],))
failed_to_cancel = True
del self.exist_order_info_list[exist_order_i]
else:
exist_order_i += 1
if len(self.exist_order_info_list) == 0:
self.exist_order_info_list = None
if failed_to_cancel:
return False
return True
# target_value: sashine value. if None, market-make
def _update_or_create_close_order(self, position, target_value):
position_id = position["id"]
if position_id not in self.position_id_to_sellids:
return False
sell_qty = float(position["amount"])
sell_ids = self.position_id_to_sellids[position_id]
position_type = position["side"]
# convert position type name
if position_type == "buy": position_type = "long"
if position_type == "sell": position_type = "short"
is_close_long = True
if position_type == "long": is_close_long = True
if position_type == "short": is_close_long = False
# check exist sell-orders. if target value and amount are completely same, do not pass new order
valid_close_orders = list(filter(lambda x:x["status"] != "cancel" and x["id"] in sell_ids, position["close_orders"]))
print("valid_close_order count = %d" % len(valid_close_orders))
if len(valid_close_orders) == 1 and target_value is not None:
# check the order is already created on exchanger
valid_close_order = valid_close_orders[0]
print("your order: rate=%f, amount=%f" % (target_value, sell_qty,))
print("valid_close_order[0]:")
print(valid_close_order)
rate = None
if "rate" in valid_close_order:
rate = float(valid_close_order["rate"])
amount = valid_close_order["amount"]
is_cur_close_long = False
if "side" in valid_close_order:
is_cur_close_long = valid_close_order["side"] == "sell"
elif "order_type" in valid_close_order:
is_cur_close_long = valid_close_order["order_type"] == "sell"
if abs(float(rate)-float(target_value)) < 0.00001 and \
abs(float(amount)-float(sell_qty)) < 0.00001 and \
is_close_long == is_cur_close_long:
# completely same!!
print("requested close order is already ordered on server:")
print(" position id:%s, target_value:%s, amount:%s, close_long:%s" % (str(position_id), str(target_value), str(amount), str(is_cur_close_long),))
return True
min_qty = self.api.order.min_create_amount(self.pair)
if sell_qty < min_qty:
qty_cur = self.get_qty_currency()
print("Minimum order %s = %f, you requested = %f" % (qty_cur, min_qty, sell_qty,))
return False
# cancel all
for sell_id in sell_ids:
self._cancel_order(sell_id)
self.position_id_to_sellids[position_id] = []
# make new order
order = {}
if self.use_leverage:
order = {
'amount': '%.8f' % BitcoinUtil.roundBTCby1satoshi(sell_qty),
'position_id': position_id,
'order_type': 'close_long' if is_close_long else 'close_short',
'pair': 'btc_jpy',
}
if target_value is not None:
order['rate'] = target_value
else:
# if not leverage order, close order is always "sell"
if not is_close_long:
print("normal order cannot make short position!")
print("you passed close 'short' for normal order")
return False
order = {
'amount': '%.8f' % BitcoinUtil.roundBTCby1satoshi(sell_qty),
'order_type': 'sell',
'pair': self.pair,
}
if target_value is None:
# market_sell
order['order_type'] = "market_sell"
else:
order['rate'] = target_value
ret = self.api.order.create(order)
ret_str = ret
if ret is not None:
try:
ret = json.loads(ret)
except:
print("failed to parse close_long order result")
try:
print(ret_str)
except Exception as e:
print("failed to print error")
print(e)
if ret is None or ret["success"] is not True or "id" not in ret or ret["id"] is None:
print("sell order canceled but failed to create new sell order!!: position id: %s" % (str(position_id),))
try:
print(ret_str)
except Exception as e:
print("failed to print error")
print(e)
return False
sell_ids = [ret["id"]]
self.position_id_to_sellids[position_id] = sell_ids
self.got_close_order_ids.append(ret["id"])
if len(self.got_close_order_ids) > 500:
self.got_close_order_ids = self.got_close_order_ids[-500:]
return True
def _cancel_order(self, order_id):
# call apis for current orders
if order_id is None:
print("order is already canceled")
return True
# do something
ret_str = self.api.order.cancel({"id": order_id, "pair": self.pair})
ret = None
if ret_str is not None:
try:
ret = json.loads(ret_str)
except:
print("failed to parse cancel order ret str")
try:
print(ret_str)
except Exception as e:
print("failed to print returned error json")
print(e)
if ret is None or ret["success"] is not True or "id" not in ret:
print("Failed to cancel order %s: %s" % (str(order_id), str(ret_str),))
return False
return True
| 40.593516
| 189
| 0.593593
| 3,837
| 32,556
| 4.708887
| 0.095126
| 0.022415
| 0.02557
| 0.017877
| 0.394786
| 0.307394
| 0.228027
| 0.160615
| 0.116504
| 0.084625
| 0
| 0.015373
| 0.322675
| 32,556
| 801
| 190
| 40.644195
| 0.803954
| 0.138807
| 0
| 0.240084
| 0
| 0.002088
| 0.09426
| 0.003947
| 0
| 0
| 0
| 0
| 0.004175
| 1
| 0.05428
| false
| 0.004175
| 0.014614
| 0.016701
| 0.139875
| 0.093946
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16ebce5b29644a3fdd8bee60c8ef43a322219b10
| 9,086
|
py
|
Python
|
bot/cogs/clan.py
|
johnvictorfs/atlantisbot-rewrite
|
ac6887f91438206ba926be59d8fd2bedd07923ad
|
[
"MIT"
] | null | null | null |
bot/cogs/clan.py
|
johnvictorfs/atlantisbot-rewrite
|
ac6887f91438206ba926be59d8fd2bedd07923ad
|
[
"MIT"
] | 5
|
2018-09-28T18:01:28.000Z
|
2019-02-12T18:49:06.000Z
|
bot/cogs/clan.py
|
johnvictorfs/atlantisbot-rewrite
|
ac6887f91438206ba926be59d8fd2bedd07923ad
|
[
"MIT"
] | 1
|
2018-10-15T22:41:47.000Z
|
2018-10-15T22:41:47.000Z
|
import rs3clans
import discord
from discord.ext import commands
from bot.bot_client import Bot
from bot.utils.tools import separator
from bot.utils.context import Context
class Clan(commands.Cog):
def __init__(self, bot: Bot):
self.bot = bot
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=['clan'])
async def clan_detail_info(self, ctx: Context, *, clan_name: str):
try:
clan = rs3clans.Clan(name=clan_name, set_exp=True)
except ConnectionError:
return await ctx.send(f"Houve um erro ao tentar conectar a API da Jagex. Tente novamente mais tarde.")
except rs3clans.ClanNotFoundError:
return await ctx.send(f"O clã '{clan_name}' não existe.")
clan_leader = None
for member in clan:
if member.rank == 'Owner':
clan_leader = member.name
clan_url = clan.name.replace(' ', '%20')
clan_embed = discord.Embed(
title=clan.name,
color=discord.Color.green(),
url=f'http://services.runescape.com/m=clan-home/clan/{clan_url}'
)
clan_embed.set_author(name='RuneClan', url=f'https://runeclan.com/clan/{clan_url}')
clan_embed.set_thumbnail(url=f'http://services.runescape.com/m=avatar-rs/{clan_url}/clanmotif.png')
clan_embed.add_field(name="Exp Total", value=f'{clan.exp:,}')
clan_embed.add_field(name="Membros", value=str(clan.count))
clan_embed.add_field(name="Líder", value=clan_leader)
clan_embed.add_field(name="Exp Média por Membro", value=f'{clan.avg_exp:,.0f}')
return await ctx.send(embed=clan_embed)
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=['claninfo', 'clanexp', 'claexp', 'clainfo', 'clãexp', 'clãinfo'])
async def clan_user_info(self, ctx: Context, *, username: str):
try:
player = rs3clans.Player(name=username, runemetrics=True)
except ConnectionError:
return await ctx.send(f"Houve um erro ao tentar conectar a API da Jagex. Tente novamente mais tarde.")
if not player.exists:
return await ctx.send(f"Jogador '{player.name}' não existe.")
if not player.clan:
return await ctx.send(f"Jogador '{player.name}' não está em um clã.")
user_clan = rs3clans.Clan(name=player.clan)
member = user_clan.get_member(username)
user_clan_exp = member.exp
user_rank = member.rank
display_username = player.name
if self.bot.setting.show_titles:
if player.suffix:
display_username = f"{player.name} {player.title}"
else:
display_username = f"{player.title} {player.name}"
user_url_name = player.name.replace(" ", "%20")
user_url_clan = player.clan.replace(" ", "%20")
icon_url = f"https://secure.runescape.com/m=avatar-rs/{user_url_name}/chat.png"
runeclan_url = f"https://runeclan.com/user/{user_url_name}"
clan_banner_url = f"http://services.runescape.com/m=avatar-rs/l=3/a=869/{user_url_clan}/clanmotif.png"
embed_title = "RuneClan"
rank_header = "__Rank__"
clan_header = "__Clã__"
exp_header = "__Exp no Clã__"
total_exp_header = "__Exp Total__"
private_profile_header = "Indisponível - Perfil Privado"
rank_emoji = self.bot.setting.clan_settings[user_rank]['Emoji']
user_rank = self.bot.setting.clan_settings[user_rank]['Translation']
clan_info_embed = discord.Embed(
title=embed_title,
description="",
color=discord.Colour.dark_blue(),
url=runeclan_url,
)
clan_info_embed.set_author(
icon_url=icon_url, name=display_username
)
clan_info_embed.set_thumbnail(
url=clan_banner_url
)
clan_info_embed.add_field(
name=clan_header,
value=player.clan
)
clan_info_embed.add_field(
name=rank_header,
value=f"{user_rank} {rank_emoji}"
)
clan_info_embed.add_field(
name=exp_header,
value=f"{user_clan_exp:,}"
)
if player.private_profile:
clan_info_embed.add_field(
name=total_exp_header,
value=private_profile_header,
inline=False
)
else:
clan_info_embed.add_field(
name=total_exp_header,
value=f"{player.exp:,}"
)
return await ctx.send(content=None, embed=clan_info_embed)
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=['ranksupdate', 'upranks', 'rank'])
async def ranks(self, ctx: Context, *, clan: str = 'Atlantis'):
if clan.lower() == 'atlantis argus':
return await ctx.send('`!rank argus` irmão')
elif clan.lower() == 'atlantis':
exp_general = 2_000_000_000
exp_captain = 1_000_000_000
exp_lieutenant = 500_000_000
exp_seargent = 250_000_000
exp_corporal = 125_000_000
elif clan.lower() == 'argus':
exp_general = 500_000_000
exp_captain = 250_000_000
exp_lieutenant = 125_000_000
exp_seargent = 60_000_000
exp_corporal = 30_000_000
clan = 'Atlantis Argus'
else:
return await ctx.send('Clã não reconhecido.')
rank_emoji = {
'Recruit': self.bot.setting.clan_settings['Recruit']['Emoji'],
'Corporal': self.bot.setting.clan_settings['Corporal']['Emoji'],
'Sergeant': self.bot.setting.clan_settings['Sergeant']['Emoji'],
'Lieutenant': self.bot.setting.clan_settings['Lieutenant']['Emoji'],
'Captain': self.bot.setting.clan_settings['Captain']['Emoji'],
'General': self.bot.setting.clan_settings['General']['Emoji'],
}
ranks_embed = discord.Embed(
title="__Ranks a Atualizar__",
description=" ",
)
found = False
clan = rs3clans.Clan(clan, set_exp=False)
clan_members = reversed([member for member in clan])
member: rs3clans.ClanMember
for member in clan_members:
if len(ranks_embed.fields) >= 20:
await ctx.send('Muitos ranks a serem atualizados, enviando apenas os 20 primeiros.')
break
if member.exp >= exp_corporal and member.rank == 'Recruit':
ranks_embed.add_field(
name=member.name,
value=f"Recruta {rank_emoji['Recruit']} ❯ Cabo {rank_emoji['Corporal']}\n"
f"**__Exp:__** {member.exp:,}\n{separator}",
inline=False)
found = True
elif member.exp >= exp_general and member.rank == 'Captain':
ranks_embed.add_field(
name=member.name,
value=f"Capitão {rank_emoji['Captain']} ❯ General {rank_emoji['General']}\n"
f"**__Exp:__** {member.exp:,}\n{separator}",
inline=False)
found = True
elif member.exp >= exp_captain and member.rank == 'Lieutenant':
ranks_embed.add_field(
name=member.name,
value=f"Tenente {rank_emoji['Lieutenant']} ❯ Capitão {rank_emoji['Captain']}\n"
f"**__Exp:__** {member.exp:,}\n{separator}",
inline=False)
found = True
elif member.exp >= exp_lieutenant and member.rank == 'Sergeant':
ranks_embed.add_field(
name=member.name,
value=f"Sargento {rank_emoji['Sergeant']} ❯ Tenente {rank_emoji['Lieutenant']}\n"
f"**__Exp:__** {member.exp:,}\n{separator}",
inline=False)
found = True
elif member.exp >= exp_seargent and member.rank == 'Corporal':
ranks_embed.add_field(
name=member.name,
value=f"Cabo {rank_emoji['Corporal']} ❯ Sargento {rank_emoji['Sergeant']}\n"
f"**__Exp:__** {member.exp:,}\n{separator}",
inline=False)
found = True
if not found:
ranks_embed.add_field(
name="Nenhum Rank a ser atualizado no momento :)",
value=separator,
inline=False
)
return await ctx.send(embed=ranks_embed)
def setup(bot):
bot.add_cog(Clan(bot))
| 42.064815
| 115
| 0.569117
| 1,046
| 9,086
| 4.717017
| 0.191205
| 0.024321
| 0.039522
| 0.051682
| 0.378597
| 0.315971
| 0.270369
| 0.250709
| 0.250709
| 0.181394
| 0
| 0.018992
| 0.316201
| 9,086
| 215
| 116
| 42.260465
| 0.774344
| 0
| 0
| 0.270833
| 0
| 0.005208
| 0.214181
| 0.0434
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010417
| false
| 0
| 0.03125
| 0
| 0.098958
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16ec4bab280bd7d838f873bdb4d147f41ca2f107
| 2,539
|
py
|
Python
|
otcextensions/tests/functional/osclient/vpc/v2/common.py
|
zsoltn/python-otcextensions
|
4c0fa22f095ebd5f9636ae72acbae5048096822c
|
[
"Apache-2.0"
] | 10
|
2018-03-03T17:59:59.000Z
|
2020-01-08T10:03:00.000Z
|
otcextensions/tests/functional/osclient/vpc/v2/common.py
|
zsoltn/python-otcextensions
|
4c0fa22f095ebd5f9636ae72acbae5048096822c
|
[
"Apache-2.0"
] | 208
|
2020-02-10T08:27:46.000Z
|
2022-03-29T15:24:21.000Z
|
otcextensions/tests/functional/osclient/vpc/v2/common.py
|
zsoltn/python-otcextensions
|
4c0fa22f095ebd5f9636ae72acbae5048096822c
|
[
"Apache-2.0"
] | 15
|
2020-04-01T20:45:54.000Z
|
2022-03-23T12:45:43.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import json
import uuid
from datetime import datetime
from openstackclient.tests.functional import base
class VpcTestCase(base.TestCase):
"""Common functional test bits for VPC commands"""
CURR_TIME = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
def setUp(self):
super(VpcTestCase, self).setUp()
UUID = uuid.uuid4().hex[:8]
self.LOCAL_ROUTER_NAME = 'test-local-router-otce-cli' + UUID
self.PEER_ROUTER_NAME = 'test-peer-router-otce-cli' + UUID
self.PEERING_NAME = 'test-peering-otce-cli-' + UUID
self.LOCAL_ROUTER_ID = None
self.PEER_ROUTER_ID = None
self.PEERING_ID = None
def create_vpc_peering(self, name=None):
self._create_routers()
name = name or self.PEERING_NAME
json_output = json.loads(self.openstack(
'vpc peering create '
'{name} '
'--local-router-id "{local_router_id}" '
'--peer-router-id "{peer_router_id}" '
'-f json'.format(
name=name,
local_router_id=self.LOCAL_ROUTER_ID,
peer_router_id=self.PEER_ROUTER_ID)
))
self.assertIsNotNone(json_output)
self.PEERING_ID = json_output['id']
return json_output
def delete_vpc_peering(self):
self.addCleanup(self._delete_routers)
self.openstack('vpc peering delete {}'.format(self.PEERING_ID))
def _create_routers(self):
local_router = json.loads(self.openstack(
'router create -f json ' + self.LOCAL_ROUTER_NAME
))
self.LOCAL_ROUTER_ID = local_router['id']
peer_router = json.loads(self.openstack(
'router create -f json ' + self.PEER_ROUTER_NAME
))
self.PEER_ROUTER_ID = peer_router['id']
def _delete_routers(self):
self.openstack(
'router delete {} {}'.format(
self.LOCAL_ROUTER_ID, self.PEER_ROUTER_ID
))
| 33.853333
| 77
| 0.639228
| 329
| 2,539
| 4.756839
| 0.340426
| 0.081789
| 0.066454
| 0.057508
| 0.197444
| 0.170607
| 0.104792
| 0.104792
| 0.06262
| 0.06262
| 0
| 0.003188
| 0.258763
| 2,539
| 74
| 78
| 34.310811
| 0.828374
| 0.2308
| 0
| 0.083333
| 0
| 0
| 0.149793
| 0.037707
| 0
| 0
| 0
| 0
| 0.020833
| 1
| 0.104167
| false
| 0
| 0.083333
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16f050092210b638486f36ba124add5847de3ce7
| 9,390
|
py
|
Python
|
test/cpython/test_base64.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:28:45.000Z
|
2020-02-06T14:28:45.000Z
|
test/cpython/test_base64.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
test/cpython/test_base64.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:29:00.000Z
|
2020-02-06T14:29:00.000Z
|
import unittest
from test import test_support
import base64
class LegacyBase64TestCase(unittest.TestCase):
def test_encodestring(self):
eq = self.assertEqual
eq(base64.encodestring("www.python.org"), "d3d3LnB5dGhvbi5vcmc=\n")
eq(base64.encodestring("a"), "YQ==\n")
eq(base64.encodestring("ab"), "YWI=\n")
eq(base64.encodestring("abc"), "YWJj\n")
eq(base64.encodestring(""), "")
eq(base64.encodestring("abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}"),
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n")
# Non-bytes
eq(base64.encodestring(bytearray('abc')), 'YWJj\n')
def test_decodestring(self):
eq = self.assertEqual
eq(base64.decodestring("d3d3LnB5dGhvbi5vcmc=\n"), "www.python.org")
eq(base64.decodestring("YQ==\n"), "a")
eq(base64.decodestring("YWI=\n"), "ab")
eq(base64.decodestring("YWJj\n"), "abc")
eq(base64.decodestring("YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n"),
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}")
eq(base64.decodestring(''), '')
# Non-bytes
eq(base64.decodestring(bytearray("YWJj\n")), "abc")
def test_encode(self):
eq = self.assertEqual
from cStringIO import StringIO
infp = StringIO('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789!@#0^&*();:<>,. []{}')
outfp = StringIO()
base64.encode(infp, outfp)
eq(outfp.getvalue(),
'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE'
'RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT'
'Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n')
def test_decode(self):
from cStringIO import StringIO
infp = StringIO('d3d3LnB5dGhvbi5vcmc=')
outfp = StringIO()
base64.decode(infp, outfp)
self.assertEqual(outfp.getvalue(), 'www.python.org')
class BaseXYTestCase(unittest.TestCase):
def test_b64encode(self):
eq = self.assertEqual
# Test default alphabet
eq(base64.b64encode("www.python.org"), "d3d3LnB5dGhvbi5vcmc=")
eq(base64.b64encode('\x00'), 'AA==')
eq(base64.b64encode("a"), "YQ==")
eq(base64.b64encode("ab"), "YWI=")
eq(base64.b64encode("abc"), "YWJj")
eq(base64.b64encode(""), "")
eq(base64.b64encode("abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}"),
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Test with arbitrary alternative characters
eq(base64.b64encode('\xd3V\xbeo\xf7\x1d', altchars='*$'), '01a*b$cd')
# Non-bytes
eq(base64.b64encode(bytearray('abcd')), 'YWJjZA==')
self.assertRaises(TypeError, base64.b64encode,
'\xd3V\xbeo\xf7\x1d', altchars=bytearray('*$'))
# Test standard alphabet
eq(base64.standard_b64encode("www.python.org"), "d3d3LnB5dGhvbi5vcmc=")
eq(base64.standard_b64encode("a"), "YQ==")
eq(base64.standard_b64encode("ab"), "YWI=")
eq(base64.standard_b64encode("abc"), "YWJj")
eq(base64.standard_b64encode(""), "")
eq(base64.standard_b64encode("abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}"),
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Non-bytes
eq(base64.standard_b64encode(bytearray('abcd')), 'YWJjZA==')
# Test with 'URL safe' alternative characters
eq(base64.urlsafe_b64encode('\xd3V\xbeo\xf7\x1d'), '01a-b_cd')
# Non-bytes
eq(base64.urlsafe_b64encode(bytearray('\xd3V\xbeo\xf7\x1d')), '01a-b_cd')
def test_b64decode(self):
eq = self.assertEqual
eq(base64.b64decode("d3d3LnB5dGhvbi5vcmc="), "www.python.org")
eq(base64.b64decode('AA=='), '\x00')
eq(base64.b64decode("YQ=="), "a")
eq(base64.b64decode("YWI="), "ab")
eq(base64.b64decode("YWJj"), "abc")
eq(base64.b64decode("YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ=="),
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}")
eq(base64.b64decode(''), '')
# Test with arbitrary alternative characters
eq(base64.b64decode('01a*b$cd', altchars='*$'), '\xd3V\xbeo\xf7\x1d')
# Non-bytes
eq(base64.b64decode(bytearray("YWJj")), "abc")
# Test standard alphabet
eq(base64.standard_b64decode("d3d3LnB5dGhvbi5vcmc="), "www.python.org")
eq(base64.standard_b64decode("YQ=="), "a")
eq(base64.standard_b64decode("YWI="), "ab")
eq(base64.standard_b64decode("YWJj"), "abc")
eq(base64.standard_b64decode(""), "")
eq(base64.standard_b64decode("YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ=="),
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}")
# Non-bytes
eq(base64.standard_b64decode(bytearray("YWJj")), "abc")
# Test with 'URL safe' alternative characters
eq(base64.urlsafe_b64decode('01a-b_cd'), '\xd3V\xbeo\xf7\x1d')
# Non-bytes
eq(base64.urlsafe_b64decode(bytearray('01a-b_cd')), '\xd3V\xbeo\xf7\x1d')
def test_b64decode_error(self):
self.assertRaises(TypeError, base64.b64decode, 'abc')
def test_b32encode(self):
eq = self.assertEqual
eq(base64.b32encode(''), '')
eq(base64.b32encode('\x00'), 'AA======')
eq(base64.b32encode('a'), 'ME======')
eq(base64.b32encode('ab'), 'MFRA====')
eq(base64.b32encode('abc'), 'MFRGG===')
eq(base64.b32encode('abcd'), 'MFRGGZA=')
eq(base64.b32encode('abcde'), 'MFRGGZDF')
# Non-bytes
eq(base64.b32encode(bytearray('abcd')), 'MFRGGZA=')
def test_b32decode(self):
eq = self.assertEqual
eq(base64.b32decode(''), '')
eq(base64.b32decode('AA======'), '\x00')
eq(base64.b32decode('ME======'), 'a')
eq(base64.b32decode('MFRA===='), 'ab')
eq(base64.b32decode('MFRGG==='), 'abc')
eq(base64.b32decode('MFRGGZA='), 'abcd')
eq(base64.b32decode('MFRGGZDF'), 'abcde')
# Non-bytes
self.assertRaises(TypeError, base64.b32decode, bytearray('MFRGG==='))
def test_b32decode_casefold(self):
eq = self.assertEqual
eq(base64.b32decode('', True), '')
eq(base64.b32decode('ME======', True), 'a')
eq(base64.b32decode('MFRA====', True), 'ab')
eq(base64.b32decode('MFRGG===', True), 'abc')
eq(base64.b32decode('MFRGGZA=', True), 'abcd')
eq(base64.b32decode('MFRGGZDF', True), 'abcde')
# Lower cases
eq(base64.b32decode('me======', True), 'a')
eq(base64.b32decode('mfra====', True), 'ab')
eq(base64.b32decode('mfrgg===', True), 'abc')
eq(base64.b32decode('mfrggza=', True), 'abcd')
eq(base64.b32decode('mfrggzdf', True), 'abcde')
# Expected exceptions
self.assertRaises(TypeError, base64.b32decode, 'me======')
# Mapping zero and one
eq(base64.b32decode('MLO23456'), 'b\xdd\xad\xf3\xbe')
eq(base64.b32decode('M1023456', map01='L'), 'b\xdd\xad\xf3\xbe')
eq(base64.b32decode('M1023456', map01='I'), 'b\x1d\xad\xf3\xbe')
def test_b32decode_error(self):
self.assertRaises(TypeError, base64.b32decode, 'abc')
self.assertRaises(TypeError, base64.b32decode, 'ABCDEF==')
def test_b16encode(self):
eq = self.assertEqual
eq(base64.b16encode('\x01\x02\xab\xcd\xef'), '0102ABCDEF')
eq(base64.b16encode('\x00'), '00')
# Non-bytes
eq(base64.b16encode(bytearray('\x01\x02\xab\xcd\xef')), '0102ABCDEF')
def test_b16decode(self):
eq = self.assertEqual
eq(base64.b16decode('0102ABCDEF'), '\x01\x02\xab\xcd\xef')
eq(base64.b16decode('00'), '\x00')
# Lower case is not allowed without a flag
self.assertRaises(TypeError, base64.b16decode, '0102abcdef')
# Case fold
eq(base64.b16decode('0102abcdef', True), '\x01\x02\xab\xcd\xef')
# Non-bytes
eq(base64.b16decode(bytearray("0102ABCDEF")), '\x01\x02\xab\xcd\xef')
def test_main():
test_support.run_unittest(__name__)
if __name__ == '__main__':
test_main()
| 43.271889
| 81
| 0.587859
| 865
| 9,390
| 6.317919
| 0.139884
| 0.125892
| 0.065325
| 0.032205
| 0.609515
| 0.46441
| 0.262397
| 0.16505
| 0.154071
| 0.069167
| 0
| 0.098772
| 0.245261
| 9,390
| 216
| 82
| 43.472222
| 0.672358
| 0.049521
| 0
| 0.245614
| 0
| 0
| 0.284157
| 0.155169
| 0
| 0
| 0
| 0
| 0.105263
| 1
| 0.081871
| false
| 0
| 0.02924
| 0
| 0.122807
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16f095ebea3707b39efe449bdb8d248fee8a8b6e
| 7,154
|
py
|
Python
|
src/Path.py
|
gabbonj/Workbench
|
86bbb2e3184e0f2fc5e9ac6dc7cfec86473fb7b9
|
[
"MIT"
] | 2
|
2020-08-06T12:20:24.000Z
|
2020-08-06T12:20:43.000Z
|
src/Path.py
|
gabbonj/Workbench
|
86bbb2e3184e0f2fc5e9ac6dc7cfec86473fb7b9
|
[
"MIT"
] | null | null | null |
src/Path.py
|
gabbonj/Workbench
|
86bbb2e3184e0f2fc5e9ac6dc7cfec86473fb7b9
|
[
"MIT"
] | null | null | null |
import numpy as np
from ctypes import c_void_p
from .Shader import Shader
from .transforms import *
from OpenGL.GL import *
class Path:
# position=[x1, y1, z1, ..., xn, yn, zn] ; rotation = [[Rx1, Ry1, Rz1], ..., [Rxn, Ryn, Rzn]]
def __init__(self, position, rotation=None):
self.loadPath(position)
if rotation:
assert len(position) == len(rotation) * 3
self.loadRotation(rotation)
else:
self.rotation = 'Pio è un figo'
def loadPath(self, position):
# compiling shader
self.path_shader = Shader('src\\shaders\\path\\pathvert.glsl',
'src\\shaders\\path\\pathfrag.glsl').shaderProgram
# setting path buffer
self.vertices = position
self.patharray = glGenVertexArrays(1)
glBindVertexArray(self.patharray)
self.lineBuffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.lineBuffer)
glBufferData(GL_ARRAY_BUFFER, np.array(self.vertices, dtype='float32'), GL_STATIC_DRAW)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * 4, c_void_p(0))
def loadRotation(self, rotation):
self.rotation = rotation
# compiling shader
self.xpath_shader = Shader('src\\shaders\\path\\pathvert.glsl',
'src\\shaders\\path\\xpathfrag.glsl').shaderProgram
self.ypath_shader = Shader('src\\shaders\\path\\pathvert.glsl',
'src\\shaders\\path\\ypathfrag.glsl').shaderProgram
self.zpath_shader = Shader('src\\shaders\\path\\pathvert.glsl',
'src\\shaders\\path\\zpathfrag.glsl').shaderProgram
# setting versors
self.xvertices = []
self.yvertices = []
self.zvertices = []
for pos in range(len(rotation)):
xversor = self.getVersorAtTime(np.array([1, 0, 0, 1], dtype='float32'), pos)
yversor = self.getVersorAtTime(np.array([0, 1, 0, 1], dtype='float32'), pos)
zversor = self.getVersorAtTime(np.array([0, 0, 1, 1], dtype='float32'), pos)
pos = [self.vertices[pos*3], self.vertices[pos*3 + 1], self.vertices[pos*3 + 2]]
self.xvertices.extend(pos)
self.xvertices.extend([xversor[0], xversor[1], xversor[2]])
self.yvertices.extend(pos)
self.yvertices.extend([yversor[0], yversor[1], yversor[2]])
self.zvertices.extend(pos)
self.zvertices.extend([zversor[0], zversor[1], zversor[2]])
#setting xline bufer
self.xpatharray = glGenVertexArrays(1)
glBindVertexArray(self.xpatharray)
self.xlineBuffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.xlineBuffer)
glBufferData(GL_ARRAY_BUFFER, np.array(self.xvertices, dtype='float32'), GL_STATIC_DRAW)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * 4, c_void_p(0))
# setting yline buffer
self.ypatharray = glGenVertexArrays(1)
glBindVertexArray(self.ypatharray)
self.ylineBuffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.ylineBuffer)
glBufferData(GL_ARRAY_BUFFER, np.array(self.yvertices, dtype='float32'), GL_STATIC_DRAW)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * 4, c_void_p(0))
#setting xline bufer
self.zpatharray = glGenVertexArrays(1)
glBindVertexArray(self.zpatharray)
self.zlineBuffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.zlineBuffer)
glBufferData(GL_ARRAY_BUFFER, np.array(self.zvertices, dtype='float32'), GL_STATIC_DRAW)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * 4, c_void_p(0))
def getVersorAtTime(self, versor, index):
r_versor = np.dot(get_rot(self.rotation[index][0], 0), versor)
r_versor = np.dot(get_rot(self.rotation[index][1], 1), r_versor)
r_versor = np.dot(get_rot(self.rotation[index][2], 2), r_versor)
t_versor = np.dot(get_traslation(self.vertices[index*3], self.vertices[index*3 + 1], self.vertices[index*3 + 2]), r_versor)
return t_versor
def renderPath(self, camera):
model = np.identity(4)
view = camera.view
proj = camera.proj
# rendering the path
glBindVertexArray(self.patharray)
glUseProgram(self.path_shader)
modelLocation = glGetUniformLocation(self.path_shader, 'model')
viewLocation = glGetUniformLocation(self.path_shader, 'view')
projectionLocation = glGetUniformLocation(self.path_shader, 'projection')
glUniformMatrix4fv(modelLocation, 1, GL_TRUE, model)
glUniformMatrix4fv(viewLocation, 1, GL_TRUE, view)
glUniformMatrix4fv(projectionLocation, 1, GL_FALSE, proj)
glEnableVertexAttribArray(0)
glDrawArrays(GL_LINE_STRIP, 0, int(len(self.vertices)/3))
glDisableVertexAttribArray(0)
# rendering the xlines
if self.rotation != 'Pio è un figo':
glBindVertexArray(self.xpatharray)
glUseProgram(self.xpath_shader)
modelLocation = glGetUniformLocation(self.xpath_shader, 'model')
viewLocation = glGetUniformLocation(self.xpath_shader, 'view')
projectionLocation = glGetUniformLocation(self.xpath_shader, 'projection')
glUniformMatrix4fv(modelLocation, 1, GL_TRUE, model)
glUniformMatrix4fv(viewLocation, 1, GL_TRUE, view)
glUniformMatrix4fv(projectionLocation, 1, GL_FALSE, proj)
glEnableVertexAttribArray(0)
glDrawArrays(GL_LINES, 0, int(len(self.xvertices)/3))
glDisableVertexAttribArray(0)
# rendering the ylines
glBindVertexArray(self.ypatharray)
glUseProgram(self.ypath_shader)
modelLocation = glGetUniformLocation(self.ypath_shader, 'model')
viewLocation = glGetUniformLocation(self.ypath_shader, 'view')
projectionLocation = glGetUniformLocation(self.ypath_shader, 'projection')
glUniformMatrix4fv(modelLocation, 1, GL_TRUE, model)
glUniformMatrix4fv(viewLocation, 1, GL_TRUE, view)
glUniformMatrix4fv(projectionLocation, 1, GL_FALSE, proj)
glEnableVertexAttribArray(0)
glDrawArrays(GL_LINES, 0, int(len(self.xvertices)/3))
glDisableVertexAttribArray(0)
# rendering the zlines
glBindVertexArray(self.zpatharray)
glUseProgram(self.zpath_shader)
modelLocation = glGetUniformLocation(self.zpath_shader, 'model')
viewLocation = glGetUniformLocation(self.zpath_shader, 'view')
projectionLocation = glGetUniformLocation(self.zpath_shader, 'projection')
glUniformMatrix4fv(modelLocation, 1, GL_TRUE, model)
glUniformMatrix4fv(viewLocation, 1, GL_TRUE, view)
glUniformMatrix4fv(projectionLocation, 1, GL_FALSE, proj)
glEnableVertexAttribArray(0)
glDrawArrays(GL_LINES, 0, int(len(self.xvertices)/3))
glDisableVertexAttribArray(0)
| 46.75817
| 131
| 0.643836
| 756
| 7,154
| 5.966931
| 0.179894
| 0.063844
| 0.024828
| 0.019508
| 0.5409
| 0.42474
| 0.414986
| 0.345821
| 0.345821
| 0.338063
| 0
| 0.023588
| 0.247414
| 7,154
| 152
| 132
| 47.065789
| 0.814264
| 0.042354
| 0
| 0.29661
| 0
| 0
| 0.061129
| 0.039047
| 0
| 0
| 0
| 0
| 0.008475
| 1
| 0.042373
| false
| 0
| 0.042373
| 0
| 0.101695
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc3fd4c771ec63f015857f770191b3f22d0f45f1
| 1,406
|
py
|
Python
|
icfree/echo_instructor/args.py
|
brsynth/icfree-ml
|
7f6c67f26bf60e9cadd59855aebb6bdb5bd64fda
|
[
"MIT"
] | 1
|
2022-01-13T17:54:12.000Z
|
2022-01-13T17:54:12.000Z
|
icfree/echo_instructor/args.py
|
brsynth/icfree-ml
|
7f6c67f26bf60e9cadd59855aebb6bdb5bd64fda
|
[
"MIT"
] | null | null | null |
icfree/echo_instructor/args.py
|
brsynth/icfree-ml
|
7f6c67f26bf60e9cadd59855aebb6bdb5bd64fda
|
[
"MIT"
] | null | null | null |
from argparse import (
ArgumentParser
)
from os import getcwd as os_getcwd
DEFAULT_OUTPUT_FOLDER = os_getcwd()
DEFAULT_SAMPLE_VOLUME = 10000
def build_args_parser(
program,
description):
parser = ArgumentParser(
program,
description,
)
parser = add_arguments(parser)
return parser
def add_arguments(parser):
parser.add_argument(
'cfps',
type=str,
help='Path to a .tsv file containing CFPS parameters and features',
)
parser.add_argument(
'init_tset',
type=str,
help='Path to a .tsv file containing initial training set',
)
parser.add_argument(
'norm_set',
type=str,
help='Path to a .tsv file containing normalizer set',
)
parser.add_argument(
'autofluo_set',
type=str,
help='Path to a .tsv file containing autofluorescence set',
)
parser.add_argument(
'-v', '--sample_volume',
type=int,
default=DEFAULT_SAMPLE_VOLUME,
help=('Final sample volume in each well in nL'
f' (default: {DEFAULT_SAMPLE_VOLUME})')
)
parser.add_argument(
'-of', '--output-folder',
type=str,
default=DEFAULT_OUTPUT_FOLDER,
help=('Output folder to write output files'
f' (default: {DEFAULT_OUTPUT_FOLDER})')
)
return parser
| 20.985075
| 75
| 0.600996
| 158
| 1,406
| 5.170886
| 0.35443
| 0.077111
| 0.124847
| 0.073439
| 0.178703
| 0.178703
| 0.178703
| 0.178703
| 0.178703
| 0.093023
| 0
| 0.005128
| 0.306543
| 1,406
| 66
| 76
| 21.30303
| 0.832821
| 0
| 0
| 0.294118
| 0
| 0
| 0.296586
| 0.034139
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039216
| false
| 0
| 0.039216
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc4085bfce6da5fce4ce47af500b1138fc887137
| 246
|
py
|
Python
|
ex1_01.py
|
sitdh/59.com-prog
|
24f536a72b0467ff3ee1615f515ecff9fbf36bb3
|
[
"MIT"
] | 1
|
2021-04-25T14:46:12.000Z
|
2021-04-25T14:46:12.000Z
|
ex1_01.py
|
sitdh/com-prog
|
24f536a72b0467ff3ee1615f515ecff9fbf36bb3
|
[
"MIT"
] | null | null | null |
ex1_01.py
|
sitdh/com-prog
|
24f536a72b0467ff3ee1615f515ecff9fbf36bb3
|
[
"MIT"
] | null | null | null |
import math
x = float(input())
prop_2 = -(x**2) / math.factorial(2)
prop_3 = (x**4) / math.factorial(4)
prop_4 = -(x**6) / math.factorial(6)
cos_x = float(1 + prop_2 + prop_3 + prop_4)
print(prop_2)
print(prop_3)
print(prop_4)
print(cos_x)
| 14.470588
| 43
| 0.646341
| 48
| 246
| 3.083333
| 0.291667
| 0.101351
| 0.081081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077295
| 0.158537
| 246
| 16
| 44
| 15.375
| 0.637681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.4
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc41a5fa588f792a592b96d3c6500dbf29045ec5
| 3,211
|
py
|
Python
|
test/datagateway_api/icat/filters/test_where_filter.py
|
MRichards99/datagateway-api
|
2e6133636fed950a16190d2f703f152c73bb5b1b
|
[
"Apache-2.0"
] | 2
|
2022-02-10T17:47:53.000Z
|
2022-02-10T19:04:02.000Z
|
test/datagateway_api/icat/filters/test_where_filter.py
|
MRichards99/datagateway-api
|
2e6133636fed950a16190d2f703f152c73bb5b1b
|
[
"Apache-2.0"
] | 183
|
2020-12-02T11:34:18.000Z
|
2022-03-29T15:19:23.000Z
|
test/datagateway_api/icat/filters/test_where_filter.py
|
MRichards99/datagateway-api
|
2e6133636fed950a16190d2f703f152c73bb5b1b
|
[
"Apache-2.0"
] | 7
|
2021-04-13T17:26:05.000Z
|
2021-11-22T14:24:24.000Z
|
import pytest
from datagateway_api.src.common.exceptions import BadRequestError, FilterError
from datagateway_api.src.datagateway_api.filter_order_handler import FilterOrderHandler
from datagateway_api.src.datagateway_api.icat.filters import PythonICATWhereFilter
class TestICATWhereFilter:
@pytest.mark.parametrize(
"operation, value, expected_condition_value",
[
pytest.param("eq", 5, ["%s = '5'"], id="equal"),
pytest.param("ne", 5, ["%s != 5"], id="not equal"),
pytest.param("like", 5, ["%s like '%%5%%'"], id="like"),
pytest.param("ilike", 5, ["UPPER(%s) like UPPER('%%5%%')"], id="ilike"),
pytest.param("nlike", 5, ["%s not like '%%5%%'"], id="not like"),
pytest.param(
"nilike", 5, ["UPPER(%s) not like UPPER('%%5%%')"], id="not ilike",
),
pytest.param("lt", 5, ["%s < '5'"], id="less than"),
pytest.param("lte", 5, ["%s <= '5'"], id="less than or equal"),
pytest.param("gt", 5, ["%s > '5'"], id="greater than"),
pytest.param("gte", 5, ["%s >= '5'"], id="greater than or equal"),
pytest.param("in", [1, 2, 3, 4], ["%s in (1, 2, 3, 4)"], id="in a list"),
pytest.param("in", [], ["%s in (NULL)"], id="empty list"),
],
)
def test_valid_operations(
self, icat_query, operation, value, expected_condition_value,
):
test_filter = PythonICATWhereFilter("id", value, operation)
test_filter.apply_filter(icat_query)
assert icat_query.conditions == {"id": expected_condition_value}
def test_invalid_in_operation(self, icat_query):
with pytest.raises(BadRequestError):
PythonICATWhereFilter("id", "1, 2, 3, 4, 5", "in")
def test_invalid_operation(self, icat_query):
test_filter = PythonICATWhereFilter("id", 10, "non")
with pytest.raises(FilterError):
test_filter.apply_filter(icat_query)
def test_valid_internal_icat_value(self, icat_query):
"""Check that values that point to other values in the schema are applied"""
test_filter = PythonICATWhereFilter("startDate", "o.endDate", "lt")
test_filter.apply_filter(icat_query)
assert icat_query.conditions == {"startDate": ["%s < o.endDate"]}
def test_valid_field(self, icat_query):
test_filter = PythonICATWhereFilter("title", "Investigation Title", "eq")
test_filter.apply_filter(icat_query)
assert icat_query.conditions == {"title": ["%s = 'Investigation Title'"]}
def test_invalid_field(self, icat_query):
test_filter = PythonICATWhereFilter("random_field", "my_value", "eq")
with pytest.raises(FilterError):
test_filter.apply_filter(icat_query)
def test_multiple_conditions_per_field(self, icat_query):
lt_filter = PythonICATWhereFilter("id", 10, "lt")
gt_filter = PythonICATWhereFilter("id", 5, "gt")
filter_handler = FilterOrderHandler()
filter_handler.add_filters([lt_filter, gt_filter])
filter_handler.apply_filters(icat_query)
assert icat_query.conditions == {"id": ["%s < '10'", "%s > '5'"]}
| 43.391892
| 87
| 0.617253
| 387
| 3,211
| 4.927649
| 0.235142
| 0.080231
| 0.047719
| 0.015732
| 0.378081
| 0.315155
| 0.224961
| 0.153644
| 0.153644
| 0.153644
| 0
| 0.016413
| 0.222049
| 3,211
| 73
| 88
| 43.986301
| 0.746998
| 0.0218
| 0
| 0.125
| 0
| 0
| 0.17512
| 0.007656
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.125
| false
| 0
| 0.071429
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc441b2065c8199c0dd4d1448231c084f1b1cfa3
| 7,160
|
py
|
Python
|
codetools/contexts/multi_context.py
|
enthought/codetools
|
20d8bb1eba68145750a1b689655b839078121474
|
[
"BSD-3-Clause"
] | 29
|
2015-08-10T20:25:00.000Z
|
2021-11-30T23:34:24.000Z
|
codetools/contexts/multi_context.py
|
enthought/codetools
|
20d8bb1eba68145750a1b689655b839078121474
|
[
"BSD-3-Clause"
] | 40
|
2015-01-05T15:01:37.000Z
|
2022-03-11T13:47:06.000Z
|
codetools/contexts/multi_context.py
|
enthought/codetools
|
20d8bb1eba68145750a1b689655b839078121474
|
[
"BSD-3-Clause"
] | 4
|
2015-04-14T10:06:26.000Z
|
2021-01-19T16:46:48.000Z
|
#
# (C) Copyright 2013 Enthought, Inc., Austin, TX
# All right reserved.
#
# This file is open source software distributed according to the terms in
# LICENSE.txt
#
""" Context holding multiple subcontexts.
"""
from __future__ import absolute_import
from itertools import chain
from collections import MutableMapping as DictMixin
from traits.api import (Bool, List, Str, Undefined, Supports,
adapt, provides, on_trait_change)
from .data_context import DataContext, ListenableMixin, PersistableMixin
from .i_context import ICheckpointable, IDataContext, IRestrictedContext
from .utils import safe_repr
@provides(IDataContext)
class MultiContext(ListenableMixin, PersistableMixin, DictMixin):
""" Wrap several subcontexts.
"""
#: The name of the context.
name = Str("multidummy")
#: The underlying dictionary.
subcontexts = List(Supports(IRestrictedContext, factory=DataContext))
#: Suppress subcontext modified events
veto_subcontext_modified = Bool(True)
def __init__(self, *subcontexts, **traits):
subcontexts = list(subcontexts)
super(MultiContext, self).__init__(subcontexts=subcontexts, **traits)
#### IContext interface ####################################################
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(list(self.keys()))
def __contains__(self, key):
for c in self.subcontexts:
if key in c:
return True
return False
def __delitem__(self, key):
""" Remove the given key with [] access.
Only deletes the first instance of the key.
Parameters
----------
key : str
Raises
------
KeyError if the kew is not available in the context.
"""
for c in self.subcontexts:
try:
del c[key]
return
except KeyError:
continue
raise KeyError(key)
def __getitem__(self, key):
for c in self.subcontexts:
try:
return c[key]
except KeyError:
continue
raise KeyError(key)
def __setitem__(self, key, value):
""" Set item with [] access.
The first subcontext which allows the key/value pair will get it. If an
earlier subcontext has the key, but does not allow the assignment, then
that key will be deleted. Later contexts with the key will be untouched.
If the key/value pair cannot be assigned to anything, no deletion will
take place.
Parameters
----------
key : str
value : object
Raises
------
ValueError if the key is not permitted to be assigned that value.
"""
# Let subtypes dictate compatibility independently of contained contexts
if not self.allows(value, key):
raise ValueError('Disallowed mapping: %s = %s' % (key, safe_repr(value)))
set = False
blocking_contexts = []
for c in self.subcontexts:
if not set:
if c.allows(value, key):
if key in c:
added = []
current_value = c[key]
try:
is_modified = bool(current_value != value)
except Exception:
is_modified = current_value is not value
if is_modified:
modified = [key]
c[key] = value
else:
modified = []
else:
added = [key]
modified = []
c[key] = value
set = True
break
elif key in c:
# Record this context as blocking access to the final
# location of the value.
blocking_contexts.append(c)
# Remove all blocking instances.
for c in blocking_contexts:
del c[key]
if not set:
raise ValueError('Disallowed mapping: %s = %s' % (key, safe_repr(value)))
def keys(self):
return list(set(chain(*[list(c.keys()) for c in self.subcontexts])))
# Expose DictMixin's get method over HasTraits'.
get = DictMixin.get
def __str__(self):
# Maybe a good default string
subcontext_str = '[%s]' % ', '.join([str(x) for x in self.subcontexts])
return '%s(name=%r, subcontexts=%s)' % (type(self).__name__, self.name,
subcontext_str)
def __repr__(self):
# Maybe a good default representation
return '%s(name=%r)' % (type(self).__name__, self.name)
#### IRestrictedContext interface ##########################################
def allows(self, value, name=None):
for c in self.subcontexts:
if c.allows(value, name=name):
return True
return False
#### Trait Event Handlers ##################################################
@on_trait_change('subcontexts:items_modified')
def subcontexts_items_modified(self, event):
""" Pass events up.
"""
if event is Undefined:
# Nothing to do.
return
event.veto = self.veto_subcontext_modified
self._fire_event(added=event.added, removed=event.removed,
modified=event.modified, context=event.context)
def _subcontexts_items_changed(self, event):
""" Trait listener for items of subcontexts list.
"""
added = []
removed = []
# Add to the list of items added
if len(event.added):
for context in event.added:
added.extend(list(context.keys()))
# Add to the list of items removed
if len(event.removed):
for context in event.removed:
removed.extend(list(context.keys()))
self._fire_event(added=added, removed=removed)
#### ICheckpointable interface ############################################
def checkpoint(self):
""" Make a shallow copy of the context.
Technically, this is actually a fairly deep copy. All of the object
structure should be replicated, but the actual dictionary storage will
be shallowly copied::
copy = context.shallow_copy()
copy[key] is context[key] for key in context.keys()
These semantics are useful for saving out checkpointed versions of the
context for implementing an undo/redo stack. They may not be useful for
other purposes.
Returns
-------
copy : IContext
"""
copy = self.clone_traits()
new_subcontexts = []
for context in self.subcontexts:
checkpointable_subcontext = adapt(context, ICheckpointable)
new_subcontexts.append(checkpointable_subcontext.checkpoint())
copy.subcontexts = new_subcontexts
return copy
| 30.468085
| 85
| 0.557682
| 757
| 7,160
| 5.150594
| 0.305152
| 0.034624
| 0.034881
| 0.015389
| 0.11644
| 0.090023
| 0.061041
| 0.025648
| 0.025648
| 0.025648
| 0
| 0.000841
| 0.335754
| 7,160
| 234
| 86
| 30.598291
| 0.818966
| 0.276117
| 0
| 0.324324
| 0
| 0
| 0.028836
| 0.005595
| 0
| 0
| 0
| 0
| 0
| 1
| 0.126126
| false
| 0
| 0.063063
| 0.036036
| 0.351351
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc447d214c0f2c389991fd5918f6f13fed4aaf6b
| 634
|
py
|
Python
|
line_counter/TestCodes/python_test.py
|
FMoller/coding-auxiliary-tools
|
21784f01731404f33059f3a8c4e73a104709ffe9
|
[
"MIT"
] | null | null | null |
line_counter/TestCodes/python_test.py
|
FMoller/coding-auxiliary-tools
|
21784f01731404f33059f3a8c4e73a104709ffe9
|
[
"MIT"
] | null | null | null |
line_counter/TestCodes/python_test.py
|
FMoller/coding-auxiliary-tools
|
21784f01731404f33059f3a8c4e73a104709ffe9
|
[
"MIT"
] | null | null | null |
"""A simple file to test the line_counter performance in python
This is a multiline doctest
"""
__author__ = "Frederico Moeller"
__copyright__ = ""
__credits__ = ["Frederico Moeller"]
__license__ = "MIT"
__version__ = "1.0.1"
__maintainer__ = "Frederico Moeller"
__email__ = ""
__status__ = ""
#import things
import math
#define things
def some_function(var_one, var_two,
var_three):
"""This is a function that do things"""
if var_one > var_two:
if var_two*var_three > var_one:
return "blab" #this happens
else:
return "blob"
else:
return "fish"
| 21.133333
| 63
| 0.641956
| 78
| 634
| 4.679487
| 0.615385
| 0.131507
| 0.038356
| 0.065753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00641
| 0.26183
| 634
| 29
| 64
| 21.862069
| 0.773504
| 0.253943
| 0
| 0.111111
| 0
| 0
| 0.154684
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc44b8524b66c7a720d547f156846ae7572f5832
| 4,602
|
py
|
Python
|
causal_attribution/data.py
|
VaniW/deconfounded-lexicon-induction
|
419ecf717f51cfd1741732ca3191b36b565bd1a4
|
[
"MIT"
] | 25
|
2020-11-03T16:38:51.000Z
|
2022-03-28T11:53:08.000Z
|
causal_attribution/data.py
|
VaniW/deconfounded-lexicon-induction
|
419ecf717f51cfd1741732ca3191b36b565bd1a4
|
[
"MIT"
] | 1
|
2019-12-15T08:33:47.000Z
|
2019-12-16T17:33:15.000Z
|
causal_attribution/data.py
|
VaniW/deconfounded-lexicon-induction
|
419ecf717f51cfd1741732ca3191b36b565bd1a4
|
[
"MIT"
] | 7
|
2021-05-03T01:01:28.000Z
|
2022-02-19T04:06:20.000Z
|
"""Data pipelines."""
from collections import defaultdict, OrderedDict
from tqdm import tqdm
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
import torch
def get_info(examples, vocab=None, max_seq_len=256):
"""Gathers info on and creats a featurized example generator for a list of raw examples.
Args:
examples: list(list, float, or string). Examples to create generator for.
vocab: list(str). A vocabulary for discrete datatypes (e.g. text or categorical).
max_seq_len: int. maximum sequence length for text examples.
Returns:
A dict of info about this variable as well as a generator over featurized examples.
"""
assert isinstance(examples, list), 'examples must be list; got ' + str(type(examples))
assert len(examples) > 0, 'Empty example list!'
# Text
if isinstance(examples[0], list):
assert vocab is not None, 'ERROR: must provide a vocab.'
example_type = 'input'
vocab = ['UNK', 'PAD'] + vocab
tok2id = {tok: i for i, tok in enumerate(vocab)}
ngrams = max(len(x.split()) for x in vocab)
unk_id = 0
def featurizer(example):
ids = []
for n in range(1, ngrams + 1):
toks = [' '.join(example[i: i + n]) for i in range(len(example) - n + 1)]
ids += [tok2id.get(x, 0) for x in toks]
ids = ids[:max_seq_len]
padded_ids = ids + ([1] * (max_seq_len - len(ids))) # pad idx = 1
return padded_ids
# Continuous
elif isinstance(examples[0], float) or isinstance(examples[0], int):
example_type = 'continuous'
vocab = ['N/A']
if isinstance(examples[0], int):
featurizer = lambda ex: float(ex)
else:
featurizer = lambda ex: ex
# Categorical
elif isinstance(examples[0], str):
example_type = 'categorical'
if not vocab:
vocab = ['UNK'] + sorted(list(set(examples)))
tok2id = {tok: i for i, tok in enumerate(vocab)}
featurizer = lambda ex: tok2id.get(ex, 0) # 0 is the unk id.
else:
print("ERROR: unrecognized example type: ", examples[0])
quit()
return featurizer, example_type, vocab
def get_iterator(vocab, df, name_to_type, batch_size=32, max_seq_len=256):
"""Builds a data iterator for text, confounds, and outcomes.
Args:
vocab: list(str). The vocabulary to use.
df: pandas.df. The data we want to iterate over. The columns of
these data should be a superset of the keys in name_to_type.
name_to_type: dict. A mapping from variable names to whether they are
"input", "predict", or "control" variables.
batch_size: int. The batch size to use.
max_seq_len: int. Maximum length of text sequences.
Returns:
A generator which yields dictionaries where variable names are mapped
to tensors of batched data.
"""
def featurize(featurizer):
return [featurizer(ex) for ex in examples]
var_info = defaultdict(lambda: OrderedDict())
featurized_data = defaultdict(list)
for var_name, var_type in name_to_type.items():
examples = list(df[var_name])
if var_type == 'input':
examples = [x.split() for x in examples]
featurizer, _, vocab = get_info(examples, vocab, max_seq_len)
var_info[var_name] = {
'control': False, 'name': var_name,
'type': var_type, 'vocab': vocab
}
else:
featurizer, varType, vocab = get_info(examples)
var_info[var_name] = {
'control': var_type == 'control',
'name': var_name, 'type': varType, 'vocab': vocab
}
featurized_data[var_name] = [featurizer(ex) for ex in examples]
def to_tensor(var_name):
dtype = torch.float
if var_info[var_name]['type'] in {'categorical', 'input'}:
dtype = torch.long
return torch.tensor(featurized_data[var_name], dtype=dtype)
feature_names = sorted(featurized_data.keys())
data = TensorDataset(*[to_tensor(name) for name in feature_names])
dataloader = DataLoader(
dataset=data,
sampler=RandomSampler(data),
collate_fn=lambda batch: [torch.stack(x) for x in zip(*batch)], # group by datatype.
batch_size=batch_size)
def iterator():
for batch in dataloader:
yield dict(zip(feature_names, batch))
return iterator, var_info
| 35.674419
| 93
| 0.612994
| 596
| 4,602
| 4.620805
| 0.271812
| 0.025418
| 0.022876
| 0.015251
| 0.081336
| 0.043573
| 0.023965
| 0.023965
| 0.023965
| 0
| 0
| 0.0085
| 0.284224
| 4,602
| 128
| 94
| 35.953125
| 0.827565
| 0.240113
| 0
| 0.092105
| 0
| 0
| 0.064469
| 0
| 0
| 0
| 0
| 0
| 0.039474
| 1
| 0.078947
| false
| 0
| 0.052632
| 0.013158
| 0.197368
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc498b7d39a14ae7cd3ad1e6341af40bb6279e72
| 5,144
|
py
|
Python
|
image_store_processing.py
|
olubiyiontheweb/digid_websearch_flask
|
181107eaa60faff9429b754236406eed56e3c1ec
|
[
"MIT"
] | 1
|
2021-12-15T18:56:05.000Z
|
2021-12-15T18:56:05.000Z
|
image_store_processing.py
|
olubiyiontheweb/similar_image_websearch
|
ddb79a3e627c1143ff7f64e6d82f0d8b9dcd8047
|
[
"MIT"
] | null | null | null |
image_store_processing.py
|
olubiyiontheweb/similar_image_websearch
|
ddb79a3e627c1143ff7f64e6d82f0d8b9dcd8047
|
[
"MIT"
] | null | null | null |
from skimage.metrics import structural_similarity as ssim
from glob import glob
from PIL import Image
import numpy as np
import ntpath
import dhash
import cv2
from database_structure import database_migrations
IMAGE_FOLDER = "./image_store"
ALLOWED_EXTENSIONS = ['png', 'jpg', 'jpeg']
image_store_hash = dict()
db_ops = database_migrations()
class preprocess:
def __init__(self):
# image table values to insert in database
self.images_list = dict()
self.image_store = list()
def load_images_into_to_db(self):
for img_type in ALLOWED_EXTENSIONS:
images = glob(IMAGE_FOLDER + "/*" + img_type)
for img in images:
imgname = ntpath.basename(img)
values = imgname, IMAGE_FOLDER, "local"
print(values)
db_ops.image_store_migrations()
# TODO Abstract requests and insert from database
db_ops.insert_operations("image_store", values)
def request_list_of_images_in_db(self):
# images = glob(IMAGE_FOLDER + "/*" + img_type)
images = db_ops.request_matches("image_store")
print("list from database" + str(images))
self.image_store.clear()
self.images_list.clear()
for img in images:
# get image name
print("current list" + str(self.image_store))
self.images_list["image_id"] = img[0]
self.images_list["image_name"] = img[1]
print("Check the values" + str(self.images_list))
self.image_store.append(self.images_list.copy())
print("Check the images" + str(self.image_store))
print("We have" + str(len(self.image_store)) + "images in the store")
print(self.image_store)
return self.image_store
def generate_hash(self):
images_in_db = self.request_list_of_images_in_db()
print(images_in_db)
for img in images_in_db:
image = Image.open(IMAGE_FOLDER + "\\" + img["image_name"])
row, col = dhash.dhash_row_col(image)
img_hash = dhash.format_hex(row, col)
values = img_hash, img["image_id"]
db_ops.image_store_migrations()
print(values)
db_ops.insert_operations("image_store_hash", values)
class compare_files:
def __init__(self):
# image table values to insert in database
self.images_list = dict()
self.image_store = list()
def request_image_hashes(self):
images = db_ops.request_matches("image_store_hash")
print("list from database" + str(images))
self.image_store.clear()
self.images_list.clear()
for img in images:
# get image name
print("current list" + str(img))
self.images_list["image_hash"] = img[1]
# request image name from image store database
img_name = db_ops.conditional_request_matches(
"image_store", img[2], "image_name", "image_id")
self.images_list["image_name"] = img_name[0][0]
print("Check the values" + str(self.images_list))
self.image_store.append(self.images_list.copy())
print("Check the images" + str(self.image_store))
print("We have" + str(len(self.image_store)) + "images in the store")
print(self.image_store)
return self.image_store
def calculate_hamming_dist(self, uploaded_hash, db_store_hash):
i = 0
count = 0
while (i < len(uploaded_hash)):
if (uploaded_hash[i] != db_store_hash[i]):
count += 1
i += 1
return count
def mean_squared_error(self, uploaded_image, db_store_image):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((uploaded_image.astype("float") -
db_store_image.astype("float"))**2)
err /= float(uploaded_image.shape[0] * uploaded_image.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
def structural_similarity_index(self, uploaded_image, db_store_image):
ssim_index = ssim(uploaded_image, db_store_image)
return ssim_index
def convert_and_resize_compare(self, uploaded_image, db_store_image):
# TODO: make structural similarity and mean squared error functionals
uploaded_image = cv2.imread(uploaded_image)
db_store_image = cv2.imread(db_store_image)
uploaded_image = cv2.resize()
db_store_image = cv2.resize()
uploaded_image = cv2.cvtColor(uploaded_image, cv2.COLOR_BGR2GRAY)
db_store_image = cv2.cvtColor(db_store_image, cv2.COLOR_BGR2GRAY)
mean_sq_error = self.mean_squared_error(uploaded_image, db_store_image)
ssim_index = self.structural_similarity_index(uploaded_image,
db_store_image)
return ssim_index, mean_sq_error
| 36.742857
| 79
| 0.631221
| 661
| 5,144
| 4.633888
| 0.189107
| 0.081619
| 0.06856
| 0.045707
| 0.444989
| 0.420503
| 0.329742
| 0.284688
| 0.25857
| 0.25857
| 0
| 0.006452
| 0.276827
| 5,144
| 139
| 80
| 37.007194
| 0.816935
| 0.107309
| 0
| 0.313131
| 0
| 0
| 0.077982
| 0
| 0
| 0
| 0
| 0.007194
| 0
| 1
| 0.10101
| false
| 0
| 0.080808
| 0
| 0.262626
| 0.151515
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc49a143fb9688648101a0602142d480263709b3
| 8,823
|
py
|
Python
|
cogs/jpserv.py
|
elthorito/Rai
|
a6f05567a0d4ed98a09676e507c478a27630bf1c
|
[
"MIT"
] | null | null | null |
cogs/jpserv.py
|
elthorito/Rai
|
a6f05567a0d4ed98a09676e507c478a27630bf1c
|
[
"MIT"
] | null | null | null |
cogs/jpserv.py
|
elthorito/Rai
|
a6f05567a0d4ed98a09676e507c478a27630bf1c
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
import os
import json
from datetime import date, datetime, timedelta
from .utils import helper_functions as hf
from copy import deepcopy
dir_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))).replace('\\', '/')
class Jpserv(commands.Cog):
"""Modules unique for the Japanese server"""
def __init__(self, bot):
self.bot = bot
async def cog_check(self, ctx):
if not ctx.guild:
return
return ctx.guild.id == 189571157446492161 or ctx.guild.id == 275146036178059265
# these commands are only useable on Japanese server or my testing server
@commands.command()
@hf.is_admin()
async def swap(self, ctx):
"""Swaps JHO/JHO2's names and positions in the lists, for if we temporarily want welcome messages to go to
JHO2"""
jpJHO = self.bot.get_channel(189571157446492161)
jpJHO2 = self.bot.get_channel(326263874770829313)
if jpJHO.position == 4:
await jpJHO.edit(position=5, name='just_hanging_out_2')
await jpJHO2.edit(position=4, name='just_hanging_out')
else:
await jpJHO.edit(position=4, name='just_hanging_out')
await jpJHO2.edit(position=5, name='just_hanging_out_2')
@commands.group(invoke_without_command=True, aliases=['uhc'])
async def ultrahardcore(self, ctx, *, member=None):
"""Irreversible hardcore mode. Must talk to an admin to have this undone."""
# if ctx.guild.id != 189571157446492161:
# return
role = ctx.guild.get_role(486851965121331200)
config = self.bot.db['ultraHardcore']['users']
if member: # if you specified someone else's ID, then remove UHC from them
member = await hf.member_converter(ctx, member)
if not member:
return
if hf.admin_check(ctx) and ctx.author.id != member.id:
if str(member.id) in config:
if config[str(member.id)][0]:
config[str(member.id)][0] = False
else:
await ctx.send("That user is not in UHC")
return
else:
await ctx.send("That user is not in UHC mode.")
return
await hf.dump_json()
try:
await member.remove_roles(role)
except discord.errors.Forbidden:
await ctx.send("I couldn't remove the ultra hardcore role")
await ctx.send(f'Undid ultra hardcore mode for {member.name}')
else:
await ctx.send("You can not remove UHC. Ask a mod/admin to help you.")
else:
if str(ctx.author.id) in config:
if config[str(ctx.author.id)][0]:
await ctx.invoke(self.explanation)
return
await ctx.send(f"This is ultra hardcore mode. It means you must speak in the language you are learning"
f" (for example, if you are learning Japanese, any messages in English will be deleted)."
f" This can not be undone unless you ask a mod to remove it for you. \n\n"
f"To enable ultra hardcore mode, type `;uhc on` or `;uhc enable`. ")
@ultrahardcore.command(aliases=['enable'])
async def on(self, ctx):
"""Enables UHC"""
if ctx.guild.id != 189571157446492161:
return
role = ctx.guild.get_role(486851965121331200)
config = self.bot.db['ultraHardcore']['users']
if str(ctx.author.id) in config: # if not enabled
user = config[str(ctx.author.id)]
if user[0]:
await ctx.send("You're already in ultra hardcore mode.")
return
else:
user[0] = True
else:
config[str(ctx.author.id)] = [True, date.today().strftime("%Y/%m/%d"), 0]
await hf.dump_json()
try:
await ctx.author.add_roles(role)
except discord.errors.Forbidden:
await ctx.send("I couldn't add the ultra hardcore role")
await ctx.send(f"{ctx.author.name} has chosen to enable ultra hardcore mode. It works the same as "
"normal hardcore mode except that you can't undo it and asterisks don't change "
"anything. Talk to a mod to undo this.")
@ultrahardcore.command()
async def list(self, ctx):
"""Lists the people currently in ultra hardcore mode"""
if ctx.guild.id != 189571157446492161:
return
string = 'The members in ultra hardcore mode right now are '
guild = self.bot.get_guild(189571157446492161)
members = []
config = self.bot.db['ultraHardcore']['users']
for member_id in config.copy():
if config[member_id][0]:
member = guild.get_member(int(member_id))
if member is not None: # in case a member leaves
members.append(member.name)
else:
del config[member_id]
await ctx.send(f'Removed <@{member_id}> from the list, as they seem to have left the server')
await ctx.send(string + ', '.join(members))
@ultrahardcore.command()
async def explanation(self, ctx):
"""Explains ultra hardcore mode for those who are using it and can't explain it"""
if ctx.guild.id != 189571157446492161:
return
if str(ctx.author.id) in self.bot.db['ultraHardcore']['users']:
if self.bot.db['ultraHardcore']['users'][str(ctx.author.id)][0]:
await ctx.send(f"{ctx.author.mention} is currently using ultra hardcore mode. In this mode, they can't"
f" speak their native language, and they also cannot undo this mode themselves.")
return
await ctx.send(f"{ctx.author.mention} is currently NOT using hardcore mode, so I don't know why "
f"they're trying to use this command. But, ultra hardcore mode means a user can't speak "
f"any English, and can't undo this mode themselves no matter what.")
@ultrahardcore.command(aliases=['lb'])
async def leaderboard(self, ctx):
"""Shows a leaderboard of who has had UHC on for the longest"""
if ctx.guild.id != 189571157446492161:
return
time_dict = deepcopy(self.bot.db['ultraHardcore']['users'])
for i in time_dict:
if time_dict[i][0]:
time_dict[i][2] += (datetime.today() - datetime.strptime(time_dict[i][1], "%Y/%m/%d")).days
# {('243703909166612480', [True, '2019/02/14', 124]),
# ('219617844973797376', [False, '2018/11/30', 122]), ...}
to_sort = [[i[0], i[1][0], i[1][2]] for i in list(time_dict.items())]
# to_sort: [['243703909166612480', True, 162], ['219617844973797376', False, 122], ...]
sorted_dict = sorted(to_sort, key=lambda x: x[2], reverse=True)
leaderboard = f"The number of days each user has had UHC enabled " \
f"(Bold = This user currently has UHC enabled)\n\n"
for i in sorted_dict:
user = ctx.guild.get_member(int(i[0]))
if (i[2] < 10 and not i[1]) or (not user):
continue
if user.nick:
name_str = f"{user.mention} ({user.name})"
else:
name_str = f"{user.name}"
if i[1]:
leaderboard += f"**{i[2]}: {name_str}**\n"
else:
leaderboard += f"{i[2]}: {name_str}\n"
emb = discord.Embed(title="UHC Leaderboard", description=leaderboard,
color=discord.Color(int('ff5500', 16)))
await ctx.send(embed=emb)
@ultrahardcore.command()
@hf.is_admin()
async def ignore(self, ctx):
"""Ignores a channel for UHC"""
if ctx.guild.id != 189571157446492161:
return
config = self.bot.db['ultraHardcore']
try:
if ctx.channel.id not in config['ignore']:
config['ignore'].append(ctx.channel.id)
await ctx.send(f"Added {ctx.channel.name} to list of ignored channels for UHC")
else:
config['ignore'].remove(ctx.channel.id)
await ctx.send(f"Removed {ctx.channel.name} from list of ignored channels for UHC")
except KeyError:
config['ignore'] = [ctx.channel.id]
await ctx.send(f"Added {ctx.channel.name} to list of ignored channels for UHC")
await hf.dump_json()
def setup(bot):
bot.add_cog(Jpserv(bot))
| 45.715026
| 120
| 0.572821
| 1,142
| 8,823
| 4.37303
| 0.232925
| 0.030437
| 0.040849
| 0.023428
| 0.330597
| 0.284742
| 0.203244
| 0.160993
| 0.12495
| 0.108931
| 0
| 0.062438
| 0.315652
| 8,823
| 192
| 121
| 45.953125
| 0.764657
| 0.051796
| 0
| 0.314465
| 0
| 0.018868
| 0.242631
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012579
| false
| 0
| 0.044025
| 0
| 0.138365
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc4d5b7bde1ce5d45b97c67684a8f6c61429eb5b
| 5,144
|
py
|
Python
|
keras/layers/pooling/base_pooling3d.py
|
itsraina/keras
|
5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35
|
[
"Apache-2.0"
] | null | null | null |
keras/layers/pooling/base_pooling3d.py
|
itsraina/keras
|
5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35
|
[
"Apache-2.0"
] | null | null | null |
keras/layers/pooling/base_pooling3d.py
|
itsraina/keras
|
5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Private base class for pooling 3D layers."""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
from keras.utils import conv_utils
class Pooling3D(Layer):
"""Pooling layer for arbitrary pooling functions, for 3D inputs.
This class only exists for code reuse. It will never be an exposed API.
Args:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`.
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or
`channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)`
while `channels_first` corresponds to
inputs with shape `(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
"""
def __init__(
self,
pool_function,
pool_size,
strides,
padding="valid",
data_format="channels_last",
name=None,
**kwargs
):
super().__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 3, "pool_size")
self.strides = conv_utils.normalize_tuple(
strides, 3, "strides", allow_zero=True
)
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def call(self, inputs):
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
if self.data_format == "channels_first":
# TF does not support `channels_first` with 3D pooling operations,
# so we must handle this case manually.
# TODO(fchollet): remove this when TF pooling is feature-complete.
inputs = tf.transpose(inputs, (0, 2, 3, 4, 1))
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper(),
)
if self.data_format == "channels_first":
outputs = tf.transpose(outputs, (0, 4, 1, 2, 3))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if self.data_format == "channels_first":
len_dim1 = input_shape[2]
len_dim2 = input_shape[3]
len_dim3 = input_shape[4]
else:
len_dim1 = input_shape[1]
len_dim2 = input_shape[2]
len_dim3 = input_shape[3]
len_dim1 = conv_utils.conv_output_length(
len_dim1, self.pool_size[0], self.padding, self.strides[0]
)
len_dim2 = conv_utils.conv_output_length(
len_dim2, self.pool_size[1], self.padding, self.strides[1]
)
len_dim3 = conv_utils.conv_output_length(
len_dim3, self.pool_size[2], self.padding, self.strides[2]
)
if self.data_format == "channels_first":
return tf.TensorShape(
[input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3]
)
else:
return tf.TensorShape(
[input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4]]
)
def get_config(self):
config = {
"pool_size": self.pool_size,
"padding": self.padding,
"strides": self.strides,
"data_format": self.data_format,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| 37.823529
| 80
| 0.623639
| 662
| 5,144
| 4.670695
| 0.297583
| 0.045278
| 0.023286
| 0.020699
| 0.189198
| 0.179819
| 0.060802
| 0.060802
| 0.04075
| 0.04075
| 0
| 0.017943
| 0.274106
| 5,144
| 135
| 81
| 38.103704
| 0.810123
| 0.383554
| 0
| 0.101266
| 0
| 0
| 0.040642
| 0
| 0
| 0
| 0
| 0.007407
| 0
| 1
| 0.050633
| false
| 0
| 0.063291
| 0
| 0.177215
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc4d689703a555cde99de572dc764b14b5f45f70
| 726
|
py
|
Python
|
main.py
|
lucastan96/video2bitmap
|
8a54f33af92b5088d29322abf936a6ce2ecc0ac4
|
[
"MIT"
] | 1
|
2020-12-30T00:57:38.000Z
|
2020-12-30T00:57:38.000Z
|
main.py
|
lucastan96/video2bitmap
|
8a54f33af92b5088d29322abf936a6ce2ecc0ac4
|
[
"MIT"
] | null | null | null |
main.py
|
lucastan96/video2bitmap
|
8a54f33af92b5088d29322abf936a6ce2ecc0ac4
|
[
"MIT"
] | null | null | null |
import moviepy.editor as mpy
import moviepy.video.fx.all as vfx
import subprocess as sp
# Crop and resize video
clip = mpy.VideoFileClip("smoke.mp4")
(w, h) = clip.size
cropped_clip = vfx.crop(clip, width=(h/128)*64, height=h, x1=w/4*3-100, y1=0).resize((64, 128))
cropped_clip.write_videofile('smoke-cropped.mp4')
# Convert video to frames
# Make sure to install ffmpeg on machine
cmd='ffmpeg -i /path/to/smoke-cropped.mp4 /path/to/frames_temp/%d.bmp'
sp.call(cmd,shell=True)
# Convert image to black and white bitmap
for i in range(202):
col = Image.open("frames_temp/" + str(i + 1) + ".bmp")
gray = col.convert('L')
bw = gray.point(lambda x: 0 if x<128 else 255, '1')
bw.save("frames/" + str(i) + ".bmp")
| 34.571429
| 95
| 0.688705
| 130
| 726
| 3.807692
| 0.569231
| 0.052525
| 0.060606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053398
| 0.14876
| 726
| 21
| 96
| 34.571429
| 0.747573
| 0.170799
| 0
| 0
| 0
| 0.071429
| 0.198997
| 0.088629
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc4f5018d00b3586d20735c150c38e4b306f48f3
| 325
|
py
|
Python
|
models/minimize_model.py
|
MichalBusta/OpenCitiesAIC
|
2358118a782edde27a588d6adaf79941cbd90de6
|
[
"MIT"
] | 7
|
2020-03-23T21:43:32.000Z
|
2021-03-30T09:11:45.000Z
|
models/minimize_model.py
|
MichalBusta/OpenCitiesAIC
|
2358118a782edde27a588d6adaf79941cbd90de6
|
[
"MIT"
] | 4
|
2020-05-09T01:13:24.000Z
|
2022-01-13T02:24:14.000Z
|
models/minimize_model.py
|
MichalBusta/OpenCitiesAIC
|
2358118a782edde27a588d6adaf79941cbd90de6
|
[
"MIT"
] | 4
|
2020-04-17T15:06:36.000Z
|
2021-03-30T09:11:47.000Z
|
'''
Created on Mar 22, 2020
@author: Michal.Busta at gmail.com
'''
#get rid of the optimizer state ...
import torch
MODEL_PATH = '/models/model-b2-2.pth'
state = torch.load(MODEL_PATH, map_location=lambda storage, loc: storage)
state_out = {
"state_dict": state["state_dict"],
}
torch.save(state_out, 'model-b2-2.pth')
| 20.3125
| 73
| 0.707692
| 52
| 325
| 4.288462
| 0.653846
| 0.080717
| 0.071749
| 0.098655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035587
| 0.135385
| 325
| 16
| 74
| 20.3125
| 0.758007
| 0.289231
| 0
| 0
| 0
| 0
| 0.251121
| 0.098655
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc4fb0ed6bbdc4f3f5e43225548f14915b084779
| 1,125
|
py
|
Python
|
setup.py
|
thomas-kloeber/braumeister
|
1045df0ad95eb6a4b9b16bb91ece64b09ff1b1f7
|
[
"MIT"
] | 6
|
2018-02-09T15:03:12.000Z
|
2021-02-18T07:21:34.000Z
|
setup.py
|
thomas-kloeber/braumeister
|
1045df0ad95eb6a4b9b16bb91ece64b09ff1b1f7
|
[
"MIT"
] | 17
|
2018-03-20T09:28:32.000Z
|
2022-01-27T08:48:41.000Z
|
setup.py
|
thomas-kloeber/braumeister
|
1045df0ad95eb6a4b9b16bb91ece64b09ff1b1f7
|
[
"MIT"
] | 7
|
2018-02-09T15:06:11.000Z
|
2020-03-02T10:23:10.000Z
|
import os
import re
from setuptools import setup
version = re.search(
'^__version__\s*=\s*"(.*)"',
open('braumeister/braumeister.py').read(),
re.M
).group(1)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="braumeister",
packages=["braumeister", "braumeister.actions"],
version=version,
author="Marcel Steffen",
author_email="marcel@talentsconnect.com",
description="Easy release bulding, combining JIRA and git",
long_description=read('README.md'),
license="MIT",
keywords="git jira release",
url="https://www.talentsconnect.com",
include_package_data=True,
install_requires=['requests', 'colorama'],
entry_points={
'console_scripts': ["braumeister = braumeister.braumeister:main"]
},
python_requires='!=2.7, !=3.4, >=3.5',
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Topic :: Utilities",
"Topic :: Software Development :: Version Control :: Git"
],
)
| 26.162791
| 73
| 0.639111
| 123
| 1,125
| 5.707317
| 0.674797
| 0.125356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008879
| 0.199111
| 1,125
| 42
| 74
| 26.785714
| 0.770255
| 0
| 0
| 0
| 0
| 0
| 0.427556
| 0.092444
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.083333
| 0.027778
| 0.138889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc500a982cb78bb46e6aa705ee116eae36444405
| 3,493
|
py
|
Python
|
modules/inference.py
|
rubelchowdhury20/wuton-with-densepose
|
5485f1f311724d8f8b887d669a8b55c73849eb98
|
[
"MIT"
] | 12
|
2020-11-13T01:51:24.000Z
|
2022-03-17T03:14:27.000Z
|
modules/inference.py
|
rubelchowdhury20/wuton-with-densepose
|
5485f1f311724d8f8b887d669a8b55c73849eb98
|
[
"MIT"
] | 1
|
2021-10-12T06:10:22.000Z
|
2021-10-12T06:10:22.000Z
|
modules/inference.py
|
rubelchowdhury20/wuton-with-densepose
|
5485f1f311724d8f8b887d669a8b55c73849eb98
|
[
"MIT"
] | 2
|
2021-01-10T17:51:34.000Z
|
2022-03-02T10:53:11.000Z
|
# standard library imports
import os
# third party imports
import numpy as np
from PIL import Image
import torch.nn as nn
from torchvision import transforms
# local imports
import config
from . import utils
from . import geometric_transformer
class GeoTransformationInfer(nn.Module):
def __init__(self, output_dir="./output/results"):
super(GeoTransformationInfer, self).__init__()
self.output_dir = output_dir
utils.ensure_folder(self.output_dir)
def forward(self, model_apparel, warped_image, model_image, warped_model_image, random_product_image, random_product_image_warped, output_on_random_product, batch_index, epoch):
batch_size = warped_image.shape[0]
model_apparel = model_apparel.cpu().numpy()
warped_image = warped_image.cpu().numpy()
model_image = model_image.cpu().numpy()
warped_model_image = warped_model_image.cpu().numpy()
random_product_image = random_product_image.cpu().numpy()
random_product_image_warped = random_product_image_warped.cpu().numpy()
output_on_random_product = output_on_random_product.cpu().numpy()
for i in range(batch_size):
self._save_image_sheet(
batch_index*config.PARAMS["batch_size"] + i,
model_apparel[i],
warped_image[i],
model_image[i],
warped_model_image[i],
random_product_image[i],
random_product_image_warped[i],
output_on_random_product[i],
epoch)
def _save_image_sheet(self,
idx,
model_apparel,
warped_image,
model_image,
warped_model_image,
random_product_image,
random_product_image_warped,
output_on_random_product,
epoch):
# inverse normalization of the images along with channel first to channel last steps and finally converting np array to pillow format for saving
model_apparel = np.moveaxis(model_apparel, 0, 2) * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]
model_apparel = Image.fromarray(np.uint8(model_apparel * 255))
warped_image = np.moveaxis(warped_image, 0, 2) * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]
warped_image = Image.fromarray(np.uint8(warped_image * 255))
model_image = np.moveaxis(model_image, 0, 2) * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]
model_image = Image.fromarray(np.uint8(model_image * 255))
warped_model_image = np.moveaxis(warped_model_image, 0, 2) * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]
warped_model_image = Image.fromarray(np.uint8(warped_model_image * 255))
random_product_image = np.moveaxis(random_product_image, 0, 2) * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]
random_product_image = Image.fromarray(np.uint8(random_product_image * 255))
random_product_image_warped = np.moveaxis(random_product_image_warped, 0, 2) * [0.229, 0.224, 0.225] + (0.485, 0.456, 0.406)
random_product_image_warped = Image.fromarray(np.uint8(random_product_image_warped * 255))
output_on_random_product = np.moveaxis(output_on_random_product, 0, 2) * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]
output_on_random_product = Image.fromarray(np.uint8(output_on_random_product * 255))
sheet = Image.new('RGB', (1568, 224), 'white')
sheet.paste(model_apparel, (0, 0))
sheet.paste(warped_image, (224, 0))
sheet.paste(model_image, (448, 0))
sheet.paste(warped_model_image, (672, 0))
sheet.paste(random_product_image, (896, 0))
sheet.paste(random_product_image_warped, (1120, 0))
sheet.paste(output_on_random_product, (1344, 0))
sheet.save(os.path.join(self.output_dir, "image_sheet_{}-epoch{}".format(idx, str(epoch).zfill(3)) + ".jpg"))
| 41.094118
| 178
| 0.742056
| 543
| 3,493
| 4.480663
| 0.187845
| 0.160296
| 0.155364
| 0.098644
| 0.427045
| 0.328812
| 0.235101
| 0.203042
| 0.203042
| 0.203042
| 0
| 0.081303
| 0.130261
| 3,493
| 84
| 179
| 41.583333
| 0.719552
| 0.057544
| 0
| 0
| 0
| 0
| 0.018259
| 0.006695
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.121212
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc50340e05b5a45da8fec5c4d61ac3cccc89e3f0
| 6,577
|
py
|
Python
|
imggen/fonts.py
|
p-lambda/unlabeled_outputs
|
18cda9e922591ec99d70caaa173abbb049ef274b
|
[
"MIT"
] | 4
|
2021-07-02T03:08:29.000Z
|
2022-03-12T07:13:13.000Z
|
imggen/fonts.py
|
p-lambda/unlabeled_outputs
|
18cda9e922591ec99d70caaa173abbb049ef274b
|
[
"MIT"
] | 1
|
2021-12-25T21:24:23.000Z
|
2021-12-25T21:24:23.000Z
|
imggen/fonts.py
|
p-lambda/unlabeled_outputs
|
18cda9e922591ec99d70caaa173abbb049ef274b
|
[
"MIT"
] | 1
|
2021-12-26T07:33:45.000Z
|
2021-12-26T07:33:45.000Z
|
from pathlib import Path
import h5py
import numpy as np
from torchvision.datasets.vision import VisionDataset
from PIL import Image
import requests
import zipfile
from tqdm import tqdm
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in tqdm(response.iter_content(CHUNK_SIZE)):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
class Fonts(VisionDataset):
url_id = '0B0GtwTQ6IF9AU3NOdzFzUWZ0aDQ'
base_folder = 'fonts'
def __init__(self, root, split='train',
transform=None, target_transform=None, download=True,
denoise=False, denoise_transform=None, num_fonts_pi=None,
num_examples=2500):
'''
Args:
root (str): path
num_train_domains (int): number of train domains up to 41443
test_mean_chars (bool): Use the mean characters as test set
split (str): 'train', 'val', 'test'
transform: input transformation
target_transform: target transformation
download (bool): download or not
'''
super().__init__(root, transform=transform, target_transform=target_transform)
self.split = split
self.transform = transform
self.target_transform = target_transform
self.denoise = denoise
self.denoise_transform = denoise_transform
self.path = Path(self.root) / self.base_folder
self.path.mkdir(parents=True, exist_ok=True)
self.download_path = self.path / 'fonts.hdf5'
if download:
self.download()
with h5py.File(str(self.download_path), 'r') as f:
data_by_domain = f['fonts'][()]
np.random.seed(484347)
# limit the number of fonts
num_fonts = 100
font_idxs = np.arange(len(data_by_domain))
np.random.shuffle(font_idxs)
if not denoise:
data_by_domain = data_by_domain[font_idxs[:num_fonts]]
print(f"NUM FONTS: {num_fonts}")
print(f"NUM CHARS: {data_by_domain.shape[1]}")
num_classes = data_by_domain.shape[1]
self.all_targets = np.concatenate(
[np.arange(num_classes)]*num_fonts, axis=0)
self.all_domain_labels = np.repeat(np.arange(num_fonts), num_classes)
self.all_data = data_by_domain.reshape(data_by_domain.shape[0]*data_by_domain.shape[1], data_by_domain.shape[2], data_by_domain.shape[3])
idxs = np.arange(len(self.all_data))
np.random.shuffle(idxs)
train_val_max = 2600
if num_examples > train_val_max:
# to be able to heuristically test what happens if we have more training data
train_val_max = 5000
if split == 'train':
idxs = idxs[:num_examples]
elif split == 'val':
idxs = idxs[num_examples: train_val_max]
else:
idxs = idxs[train_val_max:]
self.targets = self.all_targets[idxs]
self.domain_labels = self.all_domain_labels[idxs]
self.data = self.all_data[idxs]
else:
# get the train data
train_dbd = data_by_domain[font_idxs[:num_fonts]]
all_data = train_dbd.reshape(train_dbd.shape[0]*train_dbd.shape[1], train_dbd.shape[2], train_dbd.shape[3])
idxs = np.arange(len(all_data))
np.random.shuffle(idxs)
idxs = idxs[:num_examples]
train_data = all_data[idxs]
if num_fonts_pi is not None:
data_by_domain = data_by_domain[font_idxs[num_fonts:num_fonts+num_fonts_pi]]
else:
data_by_domain = data_by_domain[font_idxs[num_fonts:]]
self.data = data_by_domain.reshape(data_by_domain.shape[0]*data_by_domain.shape[1], data_by_domain.shape[2], data_by_domain.shape[3])
self.data = np.concatenate([train_data, self.data], axis=0)
def get_nearest_neighbor(self, all_imgs, x):
idx = np.argmin(np.sum(np.square(all_imgs - x), axis=(1,2)))
return self[idx]
def download(self):
if not self.download_path.exists():
download_file_from_google_drive(self.url_id, str(self.download_path))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.denoise:
img = self.data[index]
img = Image.fromarray(img)
if self.transform is not None:
tgt_img = self.transform(img)
if self.denoise_transform is not None:
src_img = self.denoise_transform(img)
return src_img, tgt_img
else:
img, target = self.data[index], self.targets[index]
domain_label = self.domain_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, domain_label
def get_item_from_all(self, index):
img, target = self.all_data[index], self.all_targets[index]
domain_label = self.all_domain_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, domain_label
def __len__(self):
return len(self.data)
| 35.939891
| 149
| 0.614414
| 846
| 6,577
| 4.556738
| 0.217494
| 0.032685
| 0.06537
| 0.044099
| 0.366018
| 0.250843
| 0.22153
| 0.214267
| 0.214267
| 0.214267
| 0
| 0.012468
| 0.292687
| 6,577
| 182
| 150
| 36.137363
| 0.816208
| 0.112513
| 0
| 0.178862
| 0
| 0
| 0.033491
| 0.009293
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0.065041
| 0.00813
| 0.219512
| 0.01626
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc535e8c70a7ae7d8c05a67decf44c291034483f
| 2,406
|
py
|
Python
|
adminmgr/media/code/A3/task3/T1_ocefXVJ.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 9
|
2019-11-08T02:05:27.000Z
|
2021-12-13T12:06:35.000Z
|
adminmgr/media/code/A3/task3/T1_ocefXVJ.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 6
|
2019-11-27T03:23:16.000Z
|
2021-06-10T19:15:13.000Z
|
adminmgr/media/code/A3/task3/T1_ocefXVJ.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 4
|
2019-11-26T17:04:27.000Z
|
2021-12-13T11:57:03.000Z
|
import findspark
findspark.init()
from pyspark import SparkConf,SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql import Row,SQLContext
import sys
import requests
def tmp(x):
y = (x.split(';')[7]).split(',')
return (y)
def forf(x):
for i in x:
yield (i,1)
def topprint(time,rdd):
res1=rdd.take(5)
count=0
for i in res1:
if(count==4):
print("%s" % i)
else:
print("%s" % i,end=',')
count = count +1
conf=SparkConf()
conf.setAppName("BigData")
sc=SparkContext(conf=conf)
ssc=StreamingContext(sc,int(sys.argv[1]))
ssc.checkpoint("/checkpoint_BIGDATA")
'''
#Selecting a window :
#outpu3:
inputStream=ssc.socketTextStream("localhost",9009)
dataStream = inputStream.window(int(sys.argv[1]),int(sys.argv[2]))
tweet=dataStream.map(tmp)
septweet=tweet.flatMap(forf)
count=septweet.reduceByKey(lambda x,y:x+y)
sortcount = count.transform(lambda rdd :rdd.sortBy(lambda a:a[1],ascending=False))
tweet1=sortcount.filter(lambda w:w[0] is not '')
tweet1.pprint()
res = tweet1.map(lambda a : a[0])
res.foreachRDD(topprint)
#res.pprint(3)
'''
'''
#Selecting a datastream and then reducing by window:
#outpu2
dataStream=ssc.socketTextStream("localhost",9009)
tweet=dataStream.map(tmp)
septweet=tweet.flatMap(forf)
#septweet.pprint()
count=septweet.reduceByKeyAndWindow(lambda x,y:x+y,int(sys.argv[1]),int(sys.argv[2]))
sortcount = count.transform(lambda rdd :rdd.sortBy(lambda a:a[0],ascending=True))
sortcount = count.transform(lambda rdd :rdd.sortBy(lambda a:a[1],ascending=False))
tweet1=sortcount.filter(lambda w:w[0] is not '')
#tweet1.pprint()
res = tweet1.map(lambda a : a[0])
res.foreachRDD(topprint)
'''
#Try in outpu1
inputStream=ssc.socketTextStream("localhost",9009)
dataStream = inputStream.window(int(sys.argv[2]),int(sys.argv[1]))
tweet=dataStream.map(tmp)
septweet=tweet.flatMap(forf)
count=septweet.reduceByKey(lambda x,y:x+y)
sortcount = count.transform(lambda rdd :rdd.sortBy(lambda a:a[0],ascending=True))
sortcount = sortcount.transform(lambda rdd :rdd.sortBy(lambda a:a[1],ascending=False))
tweet1=sortcount.filter(lambda w:w[0] is not '')
#tweet1.pprint()
res = tweet1.map(lambda a : a[0])
res.foreachRDD(topprint)
#TO maintain state
# totalcount=tweet.updateStateByKey(aggregate_tweets_count)
# totalcount.pprint()
#To Perform operation on each RDD
# totalcount.foreachRDD(process_rdd)
ssc.start()
ssc.awaitTermination(25)
ssc.stop()
| 24.804124
| 86
| 0.741895
| 362
| 2,406
| 4.91989
| 0.287293
| 0.031443
| 0.035935
| 0.058956
| 0.554183
| 0.548568
| 0.548568
| 0.548568
| 0.504211
| 0.504211
| 0
| 0.024357
| 0.095594
| 2,406
| 96
| 87
| 25.0625
| 0.794118
| 0.078969
| 0
| 0
| 0
| 0
| 0.036145
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.15
| 0
| 0.25
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc562cc6c9b35189e9adc0f9ba37a99ec2138c03
| 3,672
|
py
|
Python
|
google_compute_engine/config_manager.py
|
redoxdrh/GCP-Flask
|
34af307df541edca4eee58b1d8be64888550a674
|
[
"Apache-2.0"
] | 2
|
2017-05-04T08:05:29.000Z
|
2019-02-08T21:36:11.000Z
|
google_compute_engine/config_manager.py
|
redoxdrh/GCP-Flask
|
34af307df541edca4eee58b1d8be64888550a674
|
[
"Apache-2.0"
] | null | null | null |
google_compute_engine/config_manager.py
|
redoxdrh/GCP-Flask
|
34af307df541edca4eee58b1d8be64888550a674
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library for retrieving and modifying configuration settings."""
import os
import textwrap
from google_compute_engine import file_utils
from google_compute_engine.compat import parser
CONFIG = '/etc/default/instance_configs.cfg'
class ConfigManager(object):
"""Process the configuration defaults."""
def __init__(self, config_file=None, config_header=None):
"""Constructor.
Args:
config_file: string, the location of the config file.
config_header: string, the message to write at the top of the config.
"""
self.config_file = config_file or CONFIG
self.config_header = config_header
self.config = parser.SafeConfigParser()
self.config.read(self.config_file)
def _AddHeader(self, fp):
"""Create a file header in the config.
Args:
fp: int, a file pointer for writing the header.
"""
text = textwrap.wrap(
textwrap.dedent(self.config_header), break_on_hyphens=False)
fp.write('\n'.join(['# ' + line for line in text]))
fp.write('\n\n')
def GetOptionString(self, section, option):
"""Get the value of an option in the config file.
Args:
section: string, the section of the config file to check.
option: string, the option to retrieve the value of.
Returns:
string, the value of the option or None if the option doesn't exist.
"""
if self.config.has_option(section, option):
return self.config.get(section, option)
else:
return None
def GetOptionBool(self, section, option):
"""Get the value of an option in the config file.
Args:
section: string, the section of the config file to check.
option: string, the option to retrieve the value of.
Returns:
bool, True if the option is enabled or not set.
"""
return (not self.config.has_option(section, option) or
self.config.getboolean(section, option))
def SetOption(self, section, option, value, overwrite=True):
"""Set the value of an option in the config file.
Args:
section: string, the section of the config file to check.
option: string, the option to set the value of.
value: string, the value to set the option.
overwrite: bool, True to overwrite an existing value in the config file.
"""
if not overwrite and self.config.has_option(section, option):
return
if not self.config.has_section(section):
self.config.add_section(section)
self.config.set(section, option, str(value))
def WriteConfig(self, config_file=None):
"""Write the config values to a given file.
Args:
config_file: string, the file location of the config file to write.
"""
config_file = config_file or self.config_file
config_name = os.path.splitext(os.path.basename(config_file))[0]
config_lock = '/var/lock/google_%s.lock' % config_name
with file_utils.LockFile(config_lock):
with open(config_file, 'w') as config_fp:
if self.config_header:
self._AddHeader(config_fp)
self.config.write(config_fp)
| 33.381818
| 78
| 0.699891
| 532
| 3,672
| 4.744361
| 0.302632
| 0.083201
| 0.046355
| 0.029715
| 0.257924
| 0.203249
| 0.190571
| 0.16046
| 0.16046
| 0.16046
| 0
| 0.003106
| 0.210784
| 3,672
| 109
| 79
| 33.688073
| 0.86784
| 0.491285
| 0
| 0
| 0
| 0
| 0.038915
| 0.033608
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.102564
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc56ca67cc1e81684bbce0d45386183e51cffb90
| 10,340
|
py
|
Python
|
examples/pytorch/swin/checkpoint_quantization.py
|
hieuhoang/FasterTransformer
|
440695ccac874574b1d2e1121788e8fa674b4381
|
[
"Apache-2.0"
] | null | null | null |
examples/pytorch/swin/checkpoint_quantization.py
|
hieuhoang/FasterTransformer
|
440695ccac874574b1d2e1121788e8fa674b4381
|
[
"Apache-2.0"
] | null | null | null |
examples/pytorch/swin/checkpoint_quantization.py
|
hieuhoang/FasterTransformer
|
440695ccac874574b1d2e1121788e8fa674b4381
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import re
import numpy as np
import torch
ACTIVATION_AMAX_NUM = 72
INT8O_KERNEL_NUM = 5
INT8O_GEMM_NUM = 7
TRT_FUSED_MHA_AMAX_NUM = 3
SCALE_RESERVE_NUM = 8
def extract_amaxlist(init_dict, depths, ths_path='../lib/libpyt_swintransformer.so', verbose=True):
# print("Quantizing checkpoint ...")
torch.classes.load_library(ths_path)
weight_quantize = torch.ops.fastertransformer.swin_weight_quantize
layer_num = len(depths)
amaxTotalNum = ACTIVATION_AMAX_NUM + INT8O_KERNEL_NUM + INT8O_GEMM_NUM + 1 + TRT_FUSED_MHA_AMAX_NUM + SCALE_RESERVE_NUM
kernel_name_list = ["attn.qkv",
"attn.proj",
"mlp.fc1",
"mlp.fc2"]
amax_name_list = ["attn.qkv._input_quantizer",
"attn.qkv._aftergemm_quantizer",
"attn.proj._input_quantizer",
"attn.proj._aftergemm_quantizer",
"attn.matmul_q_input_quantizer",
"attn.matmul_k_input_quantizer",
"attn.matmul_v_input_quantizer",
"attn.matmul_a_input_quantizer",
"attn.softmax_input_quantizer",
"mlp.fc1._input_quantizer",
"mlp.fc1._aftergemm_quantizer",
"mlp.fc2._input_quantizer",
"mlp.fc2._aftergemm_quantizer",
"add1_residual_input_quantizer",
"add2_residual_input_quantizer"
]
int8O_gemm_weight_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
int8O_gemm_weight_list = ["attn.qkv",
"attn.proj",
"mlp.fc1",
"mlp.fc2",
"attn.matmul_k_input_quantizer",
"attn.matmul_v_input_quantizer"]
int8O_gemm_input_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
int8O_gemm_input_list = ["attn.qkv._input_quantizer",
"attn.proj._input_quantizer",
"mlp.fc1._input_quantizer",
"mlp.fc2._input_quantizer",
"attn.matmul_q_input_quantizer",
"attn.matmul_a_input_quantizer"]
int8O_gemm_output_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
int8O_gemm_output_list = ["attn.qkv._aftergemm_quantizer",
"attn.proj._aftergemm_quantizer",
"mlp.fc1._aftergemm_quantizer",
"mlp.fc2._aftergemm_quantizer",
"attn.softmax_input_quantizer",
"attn.proj._input_quantizer"]
downsample_input = "downsample.reduction._input_quantizer"
downsample_weight = "downsample.reduction._weight_quantizer"
downsample_out = "downsample.reduction._aftergemm_quantizer"
factor = 1000000.0
for i in range(layer_num):
for depth in range(depths[i]):
amaxList = np.zeros([amaxTotalNum]).astype(np.float32)
amax_id = 0
for amax_name in amax_name_list:
quant_max = init_dict["layers.{}.blocks.{}.{}._amax".format(i, depth, amax_name)].item()
amax = abs(quant_max)#round(abs(quant_max)*factor)/factor
if amax_name in int8O_gemm_input_list:
int8O_gemm_input_amax_list[int8O_gemm_input_list.index(amax_name)] = amax
if amax_name in int8O_gemm_output_list:
int8O_gemm_output_amax_list[int8O_gemm_output_list.index(amax_name)] = amax
if amax_name in int8O_gemm_weight_list:
int8O_gemm_weight_amax_list[int8O_gemm_weight_list.index(amax_name)] = amax
amaxList[amax_id] = amax
amax_id += 1
amaxList[amax_id] = amax/127.0
amax_id += 1
amaxList[amax_id] = amax/127.0/127.0
amax_id += 1
amaxList[amax_id] = 127.0/amax
amax_id += 1
# if verbose:
# print(i, amax_name)
# print('quant_max:', quant_max)
# print('amax:', amax)
if i != layer_num - 1:
amax = init_dict["layers.{}.{}._amax".format(i, downsample_input)].item()
amaxList[amax_id] = amax
amax_id += 1
amaxList[amax_id] = amax/127.0
amax_id += 1
amaxList[amax_id] = amax/127.0/127.0
amax_id += 1
amaxList[amax_id] = 127.0/amax
amax_id += 1
amax = init_dict["layers.{}.{}._amax".format(i, downsample_out)].item()
amaxList[amax_id] = amax
amax_id += 1
amaxList[amax_id] = amax/127.0
amax_id += 1
amaxList[amax_id] = amax/127.0/127.0
amax_id += 1
amaxList[amax_id] = 127.0/amax
amax_id += 1
else:
amax_id += 8
if verbose:
print("done process layer_{} block_{} activation amax".format(i, depth))
#kernel amax starts from ACTIVATION_AMAX_NUM
assert amax_id == 68
amax_id = ACTIVATION_AMAX_NUM
for kernel_id, kernel_name in enumerate(kernel_name_list):
kernel = init_dict["layers.{}.blocks.{}.{}.weight".format(i, depth, kernel_name)].transpose(-1, -2).contiguous()
quant_max2 = init_dict["layers.{}.blocks.{}.{}._weight_quantizer._amax".format(i, depth, kernel_name)]
amax2 = abs(quant_max2)
# if (amax2.dim() == 0):
# quant_max_processed = torch.full((kernel.size(1),), amax2.item(), dtype=amax2.dtype, device=amax2.device)
# else:
# quant_max_processed = amax2.view(-1)
kernel_processed = weight_quantize(kernel, amax2.cuda())
init_dict["layers.{}.blocks.{}.{}.weight".format(i, depth, kernel_name)] = kernel_processed
if kernel_name in int8O_gemm_weight_list:
int8O_gemm_weight_amax_list[int8O_gemm_weight_list.index(kernel_name)] = amax2.item()
amaxList[amax_id] = amax2
amax_id += 1
# if verbose:
# print(i, kernel_name)
# print('kernel:', kernel)
# print('quant_max2:', quant_max2)
# print('quant_max_processed_:', quant_max_processed)
if i != layer_num - 1:
amaxList[amax_id] = init_dict["layers.{}.downsample.reduction._weight_quantizer._amax".format(i)].item()
amax_id += 1
assert amax_id == ACTIVATION_AMAX_NUM + INT8O_KERNEL_NUM
#for int8O gemm deQuant
for j in range(INT8O_GEMM_NUM - 1):
amaxList[amax_id] = (int8O_gemm_input_amax_list[j]*int8O_gemm_weight_amax_list[j])/(127.0*int8O_gemm_output_amax_list[j])
# print('layernum:', i, 'j:', j, ' gemm_int8IO_scale:',amaxList[amax_id])
# print(int8O_gemm_input_amax_list[j], int8O_gemm_weight_amax_list[j], int8O_gemm_output_amax_list[j])
amax_id += 1
if i != layer_num - 1:
patchMerge_i = init_dict["layers.{}.{}._amax".format(i, downsample_input)].item()
patchMerge_w = init_dict["layers.{}.{}._amax".format(i, downsample_weight)].item()
patchMerge_o = init_dict["layers.{}.{}._amax".format(i, downsample_out)].item()
amaxList[amax_id] = (patchMerge_i * patchMerge_w) / (127 * patchMerge_o)
amax_id += 1
assert amax_id == ACTIVATION_AMAX_NUM + INT8O_KERNEL_NUM + INT8O_GEMM_NUM
amax_id += 1
#for trt fused MHA amax
#### QKV_addBias_amax
# amaxList[amax_id] = np.maximum(np.maximum(amaxList[16],amaxList[20]), amaxList[24])
# amax_id += 1
# #### softmax amax
# amaxList[amax_id] = amaxList[28]
# amax_id += 1
# #### bmm2 amax
# amaxList[amax_id] = amaxList[8]
# amax_id += 1
qkvMax = np.maximum(np.maximum(amaxList[16],amaxList[20]), amaxList[24])
amaxList[amax_id] = amaxList[16] * amaxList[20] / (127.0 * 127.0)
amax_id += 1
amaxList[amax_id] = 127.0 / amaxList[28]
amax_id += 1
amaxList[amax_id] = amaxList[24] * amaxList[28] / (127.0 * amaxList[8])
amax_id += 1
init_dict["layers.{}.blocks.{}.amaxList".format(i, depth)] = torch.tensor(amaxList, dtype=torch.float32)
if verbose:
print("done process layer_{} block_{} kernel weight".format(i, depth))
if i != layer_num - 1:
kernel = init_dict["layers.{}.downsample.reduction.weight".format(i)]
quant_max2 = init_dict["layers.{}.downsample.reduction._weight_quantizer._amax".format(i)]
amax2 = abs(quant_max2)
kernel = kernel.transpose(-1, -2).contiguous()
kernel_processed = weight_quantize(kernel, amax2.cuda())
init_dict["layers.{}.downsample.reduction.weight".format(i)] = kernel_processed
# print("Quantizing checkpoint done.")
return init_dict
if __name__ == '__main__':
weights = torch.load('pytorch_model.bin')
extract_amaxlist(weights, [2, 2, 6, 2])
| 47.214612
| 137
| 0.562186
| 1,194
| 10,340
| 4.546064
| 0.173367
| 0.05748
| 0.059322
| 0.035925
| 0.56227
| 0.453758
| 0.407701
| 0.348379
| 0.319639
| 0.280951
| 0
| 0.035136
| 0.331141
| 10,340
| 219
| 138
| 47.214612
| 0.749711
| 0.156867
| 0
| 0.421053
| 0
| 0
| 0.179709
| 0.148892
| 0
| 0
| 0
| 0
| 0.019737
| 1
| 0.006579
| false
| 0
| 0.032895
| 0
| 0.046053
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc57b1f771495cf5ea069e99b2859a0f3795d393
| 6,608
|
py
|
Python
|
mars/deploy/kubernetes/core.py
|
tomzhang/mars-1
|
6f1d85e37eb1b383251314cb0ba13e06288af03d
|
[
"Apache-2.0"
] | null | null | null |
mars/deploy/kubernetes/core.py
|
tomzhang/mars-1
|
6f1d85e37eb1b383251314cb0ba13e06288af03d
|
[
"Apache-2.0"
] | null | null | null |
mars/deploy/kubernetes/core.py
|
tomzhang/mars-1
|
6f1d85e37eb1b383251314cb0ba13e06288af03d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import random
import time
from ...actors import new_client, FunctionActor
logger = logging.getLogger(__name__)
class K8SPodsIPWatcher(object):
"""
Pods watcher class, compatible with SchedulerDiscoverer
"""
dynamic = True
def __init__(self, k8s_config=None, k8s_namespace=None, label_selector=None):
from kubernetes import config, client
from gevent.threadpool import ThreadPool
if k8s_config is not None:
self._k8s_config = k8s_config
elif os.environ.get('KUBE_API_ADDRESS'):
self._k8s_config = client.Configuration()
self._k8s_config.host = os.environ['KUBE_API_ADDRESS']
else:
self._k8s_config = config.load_incluster_config()
self._k8s_namespace = k8s_namespace or os.environ.get('MARS_K8S_POD_NAMESPACE') or 'default'
self._label_selector = label_selector
self._client = client.CoreV1Api(client.ApiClient(self._k8s_config))
self._pool = ThreadPool(1)
self._pod_to_ep = None
def __reduce__(self):
return type(self), (self._k8s_config, self._k8s_namespace, self._label_selector)
def _extract_pod_name_ep(self, pod_data):
svc_port = pod_data['spec']['containers'][0]['ports'][0]['container_port']
return pod_data['metadata']['name'], '%s:%s' % (pod_data['status']['pod_ip'], svc_port)
@staticmethod
def _extract_pod_ready(obj_data):
# if conditions not supported, always return True
if 'status' not in obj_data or 'conditions' not in obj_data['status']:
return True
return any(cond['type'] == 'Ready' and cond['status'] == 'True'
for cond in obj_data['status']['conditions'])
def _get_pod_to_ep(self):
query = self._pool.spawn(self._client.list_namespaced_pod,
namespace=self._k8s_namespace,
label_selector=self._label_selector).result().to_dict()
result = dict()
for el in query['items']:
name, pod_ep = self._extract_pod_name_ep(el)
if pod_ep is not None and not self._extract_pod_ready(el):
pod_ep = None
result[name] = pod_ep
return result
def get(self, update=False):
if self._pod_to_ep is None or update:
self._pod_to_ep = self._get_pod_to_ep()
return sorted(a for a in self._pod_to_ep.values() if a is not None)
def is_all_ready(self):
self.get(True)
return all(a is not None for a in self._pod_to_ep.values())
def watch(self):
from urllib3.exceptions import ReadTimeoutError
from kubernetes import watch
cur_pods = set(self.get(True))
w = watch.Watch()
while True:
# when some schedulers are not ready, we refresh faster
linger = 10 if self.is_all_ready() else 1
streamer = w.stream(self._client.list_namespaced_pod,
namespace=self._k8s_namespace,
label_selector=self._label_selector,
timeout_seconds=linger)
while True:
try:
event = self._pool.spawn(next, streamer, StopIteration).result()
if event is StopIteration:
raise StopIteration
except (ReadTimeoutError, StopIteration):
new_pods = set(self.get(True))
if new_pods != cur_pods:
cur_pods = new_pods
yield self.get(False)
break
except: # noqa: E722
logger.exception('Unexpected error when watching on kubernetes')
break
obj_dict = event['object'].to_dict()
pod_name, endpoint = self._extract_pod_name_ep(obj_dict)
self._pod_to_ep[pod_name] = endpoint \
if endpoint and self._extract_pod_ready(obj_dict) else None
yield self.get(False)
class ReadinessActor(FunctionActor):
"""
Dummy actor indicating service start
"""
@classmethod
def default_uid(cls):
return 'k:0:%s' % cls.__name__
class K8SServiceMixin:
@staticmethod
def write_pid_file():
with open('/tmp/mars-service.pid', 'w') as pid_file:
pid_file.write(str(os.getpid()))
def wait_all_schedulers_ready(self):
"""
Wait till all containers are ready, both in kubernetes and in ClusterInfoActor
"""
from ...scheduler.utils import SchedulerClusterInfoActor
# check if all schedulers are ready using Kubernetes API
sleep_fun = (getattr(self, 'pool', None) or time).sleep
while not self.scheduler_discoverer.is_all_ready():
sleep_fun(1)
kube_schedulers = self.scheduler_discoverer.get()
logger.debug('Schedulers all ready in kubernetes, waiting ClusterInfoActor to be ready')
# check if all schedulers are registered in ClusterInfoActor
actor_client = new_client()
while True:
cluster_info = actor_client.actor_ref(
SchedulerClusterInfoActor.default_uid(), address=random.choice(kube_schedulers))
cluster_info_schedulers = cluster_info.get_schedulers()
if set(cluster_info_schedulers) == set(kube_schedulers):
from ...cluster_info import INITIAL_SCHEDULER_FILE
with open(INITIAL_SCHEDULER_FILE, 'w') as scheduler_file:
scheduler_file.write(','.join(cluster_info_schedulers))
logger.debug('Scheduler detection finished. Result: %r', kube_schedulers)
break
sleep_fun(1) # pragma: no cover
def create_scheduler_discoverer(self):
self.scheduler_discoverer = K8SPodsIPWatcher(label_selector='name=marsscheduler')
| 39.333333
| 100
| 0.628783
| 802
| 6,608
| 4.933915
| 0.299252
| 0.019459
| 0.014152
| 0.016679
| 0.083902
| 0.053071
| 0.053071
| 0.053071
| 0.041446
| 0.041446
| 0
| 0.009742
| 0.285412
| 6,608
| 167
| 101
| 39.568862
| 0.828251
| 0.152542
| 0
| 0.125
| 0
| 0
| 0.072191
| 0.00778
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.098214
| 0.017857
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc58243dff3b67ec29b9366a2531008a83301c24
| 767
|
py
|
Python
|
tests/tests_model/tests_bert_model.py
|
elangovana/gene_normalisation
|
9152298e951cd968ee516815c7fa11f1ceabca51
|
[
"MIT"
] | 1
|
2020-10-21T06:01:28.000Z
|
2020-10-21T06:01:28.000Z
|
tests/tests_model/tests_bert_model.py
|
elangovana/gene_normalisation
|
9152298e951cd968ee516815c7fa11f1ceabca51
|
[
"MIT"
] | null | null | null |
tests/tests_model/tests_bert_model.py
|
elangovana/gene_normalisation
|
9152298e951cd968ee516815c7fa11f1ceabca51
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
import torch
import transformers
from model.bert_model import BertModel
class TestBertModel(TestCase):
def test_forward(self):
# Bert Config
vocab_size = 10
sequence_len = 20
batch = 32
num_classes = 3
expected_shape = (batch, sequence_len, num_classes)
input_batch = torch.randint(low=0, high=vocab_size-1, size=(batch, sequence_len))
config= transformers.BertConfig(vocab_size=vocab_size,hidden_size=10, num_hidden_layers=1, num_attention_heads=1,num_labels=num_classes)
sut = BertModel(None, None, bert_config=config)
# Act
actual = sut.forward(input_batch)[0]
# Assert
self.assertEqual(expected_shape, actual.shape)
| 26.448276
| 144
| 0.688396
| 98
| 767
| 5.153061
| 0.479592
| 0.071287
| 0.063366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023729
| 0.230769
| 767
| 28
| 145
| 27.392857
| 0.832203
| 0.028683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 1
| 0.0625
| false
| 0
| 0.25
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc583d8b5318b12422c378e8c294b322b7118447
| 1,593
|
py
|
Python
|
tests/renderer_test.py
|
tmcclintock/PyDonJuan
|
ab6d567b568c3e0dd976b10c2628ad99ca81b953
|
[
"CC0-1.0"
] | 2
|
2020-12-14T20:50:57.000Z
|
2021-05-26T04:32:24.000Z
|
tests/renderer_test.py
|
tmcclintock/PyDonJuan
|
ab6d567b568c3e0dd976b10c2628ad99ca81b953
|
[
"CC0-1.0"
] | 29
|
2020-12-18T15:56:14.000Z
|
2021-01-12T01:17:48.000Z
|
tests/renderer_test.py
|
tmcclintock/donjuan
|
ab6d567b568c3e0dd976b10c2628ad99ca81b953
|
[
"CC0-1.0"
] | null | null | null |
import json
import os
import tempfile
from unittest import TestCase
import pytest
from donjuan import Dungeon, DungeonRandomizer, Renderer
class RendererTest(TestCase):
def setUp(self):
super().setUp()
self.TEMP_DIR = tempfile.mkdtemp()
def test_smoke(self):
r = Renderer()
assert r is not None
def test_scale(self):
r = Renderer(scale=3)
assert r.scale == 3
@pytest.mark.slow
def test_render_dummy_dungeon(self):
inpath = os.path.abspath(os.path.dirname(__file__))
inpath = os.path.join(inpath, "fixtures/dummy_dungeon.json")
with open(inpath, "r") as f:
darr = json.load(f)["dungeon"]
n_rows = len(darr)
n_cols = len(darr)
dungeon = Dungeon(n_rows=n_rows, n_cols=n_cols)
for i in range(n_rows):
for j in range(n_cols):
dungeon.grid.cells[i][j].filled = bool(darr[i][j])
# Render and check for the file
fp = os.path.join(self.TEMP_DIR, "rendered_dungeon.png")
r = Renderer()
r.render(dungeon, file_path=fp)
assert os.path.exists(fp)
@pytest.mark.slow
def test_render_dungeon_with_rooms(self):
randomizer = DungeonRandomizer()
dungeon = Dungeon(10, 10, randomizers=[randomizer])
dungeon.randomize()
dungeon.emplace_rooms()
renderer = Renderer()
# Render and check for the file
fp = os.path.join(self.TEMP_DIR, "rendered_dungeon.png")
renderer.render(dungeon, file_path=fp)
assert os.path.exists(fp)
| 28.963636
| 68
| 0.622724
| 212
| 1,593
| 4.537736
| 0.34434
| 0.043659
| 0.034304
| 0.035343
| 0.280665
| 0.280665
| 0.224532
| 0.224532
| 0.224532
| 0.224532
| 0
| 0.005146
| 0.268048
| 1,593
| 54
| 69
| 29.5
| 0.819897
| 0.037037
| 0
| 0.190476
| 0
| 0
| 0.048988
| 0.017636
| 0
| 0
| 0
| 0
| 0.095238
| 1
| 0.119048
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc5a20c1be48c7dd2648cc88a86c05d54e4b6c1c
| 612
|
py
|
Python
|
src/foremast/validate.py
|
dnava013/foremast
|
9849821b5bb3cd67b438c5adeaa0e42f86e9eaf8
|
[
"Apache-2.0"
] | 157
|
2016-09-12T16:24:14.000Z
|
2018-06-02T15:40:38.000Z
|
src/foremast/validate.py
|
dnava013/foremast
|
9849821b5bb3cd67b438c5adeaa0e42f86e9eaf8
|
[
"Apache-2.0"
] | 206
|
2016-09-12T16:41:31.000Z
|
2018-06-04T21:50:29.000Z
|
src/foremast/validate.py
|
dnava013/foremast
|
9849821b5bb3cd67b438c5adeaa0e42f86e9eaf8
|
[
"Apache-2.0"
] | 34
|
2016-09-12T16:37:57.000Z
|
2018-06-04T18:37:52.000Z
|
"""Spinnaker validate functions."""
import logging
from .consts import API_URL
from .utils.credentials import get_env_credential
LOG = logging.getLogger(__name__)
def validate_gate():
"""Check Gate connection."""
try:
credentials = get_env_credential()
LOG.debug('Found credentials: %s', credentials)
LOG.info('Gate working.')
except TypeError:
LOG.fatal('Gate connection not valid: API_URL = %s', API_URL)
def validate_all(args):
"""Run all validate steps."""
LOG.debug('Args: %s', args)
LOG.info('Running all validate steps.')
validate_gate()
| 23.538462
| 69
| 0.673203
| 76
| 612
| 5.236842
| 0.486842
| 0.045226
| 0.080402
| 0.095477
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.199346
| 612
| 25
| 70
| 24.48
| 0.812245
| 0.124183
| 0
| 0
| 0
| 0
| 0.207692
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.2
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc5b680a2cd25d3fd6125ee9f9722bc8e692640b
| 7,320
|
py
|
Python
|
nova/tests/functional/test_metadata.py
|
Nexenta/nova
|
ccecb507ff4bdcdd23d90e7b5b02a22c5a46ecc3
|
[
"Apache-2.0"
] | 1
|
2020-08-14T02:20:59.000Z
|
2020-08-14T02:20:59.000Z
|
nova/tests/functional/test_metadata.py
|
Nexenta/nova
|
ccecb507ff4bdcdd23d90e7b5b02a22c5a46ecc3
|
[
"Apache-2.0"
] | 2
|
2021-03-31T20:04:16.000Z
|
2021-12-13T20:45:03.000Z
|
nova/tests/functional/test_metadata.py
|
Nexenta/nova
|
ccecb507ff4bdcdd23d90e7b5b02a22c5a46ecc3
|
[
"Apache-2.0"
] | 1
|
2020-07-24T02:31:45.000Z
|
2020-07-24T02:31:45.000Z
|
# Copyright 2016 Rackspace Australia
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import jsonschema
import os
import requests
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as fake_image
class fake_result(object):
def __init__(self, result):
self.status_code = 200
self.text = jsonutils.dumps(result)
real_request = requests.request
def fake_request(obj, url, method, **kwargs):
if url.startswith('http://127.0.0.1:123'):
return fake_result({'a': 1, 'b': 'foo'})
if url.startswith('http://127.0.0.1:124'):
return fake_result({'c': 3})
if url.startswith('http://127.0.0.1:125'):
return fake_result(jsonutils.loads(kwargs.get('data', '{}')))
return real_request(method, url, **kwargs)
class MetadataTest(test.TestCase, integrated_helpers.InstanceHelperMixin):
def setUp(self):
super(MetadataTest, self).setUp()
fake_image.stub_out_image_service(self)
self.addCleanup(fake_image.FakeImageService_reset)
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
self.start_service('conductor')
self.start_service('scheduler')
self.api = self.useFixture(
nova_fixtures.OSAPIFixture(api_version='v2.1')).api
self.start_service('compute')
# create a server for the tests
server = self._build_server(name='test')
server = self.api.post_server({'server': server})
self.server = self._wait_for_state_change(server, 'ACTIVE')
self.api_fixture = self.useFixture(nova_fixtures.OSMetadataServer())
self.md_url = self.api_fixture.md_url
# make sure that the metadata service returns information about the
# server we created above
def fake_get_fixed_ip_by_address(self, ctxt, address):
return {'instance_uuid': server['id']}
self.useFixture(
fixtures.MonkeyPatch(
'nova.network.neutron.API.get_fixed_ip_by_address',
fake_get_fixed_ip_by_address))
def test_lookup_metadata_root_url(self):
res = requests.request('GET', self.md_url, timeout=5)
self.assertEqual(200, res.status_code)
def test_lookup_metadata_openstack_url(self):
url = '%sopenstack' % self.md_url
res = requests.request('GET', url, timeout=5,
headers={'X-Forwarded-For': '127.0.0.2'})
self.assertEqual(200, res.status_code)
def test_lookup_metadata_data_url(self):
url = '%sopenstack/latest/meta_data.json' % self.md_url
res = requests.request('GET', url, timeout=5)
self.assertEqual(200, res.status_code)
j = jsonutils.loads(res.text)
self.assertIn('hostname', j)
self.assertEqual('test.novalocal', j['hostname'])
def test_lookup_external_service(self):
self.flags(
vendordata_providers=['StaticJSON', 'DynamicJSON'],
vendordata_dynamic_targets=[
'testing@http://127.0.0.1:123',
'hamster@http://127.0.0.1:123'
],
group='api'
)
self.useFixture(fixtures.MonkeyPatch(
'keystoneauth1.session.Session.request', fake_request))
url = '%sopenstack/2016-10-06/vendor_data2.json' % self.md_url
res = requests.request('GET', url, timeout=5)
self.assertEqual(200, res.status_code)
j = jsonutils.loads(res.text)
self.assertEqual({}, j['static'])
self.assertEqual(1, j['testing']['a'])
self.assertEqual('foo', j['testing']['b'])
self.assertEqual(1, j['hamster']['a'])
self.assertEqual('foo', j['hamster']['b'])
def test_lookup_external_service_no_overwrite(self):
self.flags(
vendordata_providers=['DynamicJSON'],
vendordata_dynamic_targets=[
'testing@http://127.0.0.1:123',
'testing@http://127.0.0.1:124'
],
group='api'
)
self.useFixture(fixtures.MonkeyPatch(
'keystoneauth1.session.Session.request', fake_request))
url = '%sopenstack/2016-10-06/vendor_data2.json' % self.md_url
res = requests.request('GET', url, timeout=5)
self.assertEqual(200, res.status_code)
j = jsonutils.loads(res.text)
self.assertNotIn('static', j)
self.assertEqual(1, j['testing']['a'])
self.assertEqual('foo', j['testing']['b'])
self.assertNotIn('c', j['testing'])
def test_lookup_external_service_passes_data(self):
# Much of the data we pass to the REST service is missing because of
# the way we've created the fake instance, but we should at least try
# and ensure we're passing _some_ data through to the external REST
# service.
self.flags(
vendordata_providers=['DynamicJSON'],
vendordata_dynamic_targets=[
'testing@http://127.0.0.1:125'
],
group='api'
)
self.useFixture(fixtures.MonkeyPatch(
'keystoneauth1.session.Session.request', fake_request))
url = '%sopenstack/2016-10-06/vendor_data2.json' % self.md_url
res = requests.request('GET', url, timeout=5)
self.assertEqual(200, res.status_code)
j = jsonutils.loads(res.text)
self.assertIn('instance-id', j['testing'])
self.assertTrue(uuidutils.is_uuid_like(j['testing']['instance-id']))
self.assertIn('hostname', j['testing'])
self.assertEqual(self.server['tenant_id'], j['testing']['project-id'])
self.assertIn('metadata', j['testing'])
self.assertIn('image-id', j['testing'])
self.assertIn('user-data', j['testing'])
def test_network_data_matches_schema(self):
self.useFixture(fixtures.MonkeyPatch(
'keystoneauth1.session.Session.request', fake_request))
url = '%sopenstack/latest/network_data.json' % self.md_url
res = requests.request('GET', url, timeout=5)
self.assertEqual(200, res.status_code)
# load the jsonschema for network_data
schema_file = os.path.normpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../../../doc/api_schemas/network_data.json"))
with open(schema_file, 'rb') as f:
schema = jsonutils.load(f)
jsonschema.validate(res.json(), schema)
| 37.927461
| 78
| 0.639617
| 908
| 7,320
| 5.008811
| 0.272026
| 0.05277
| 0.009894
| 0.015831
| 0.404134
| 0.369173
| 0.332894
| 0.332894
| 0.316403
| 0.313984
| 0
| 0.027733
| 0.236475
| 7,320
| 192
| 79
| 38.125
| 0.786008
| 0.132787
| 0
| 0.350746
| 0
| 0
| 0.164269
| 0.06751
| 0
| 0
| 0
| 0
| 0.186567
| 1
| 0.08209
| false
| 0.007463
| 0.08209
| 0.007463
| 0.216418
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc5dd6bb126db54b8402ce56f75664e9271f9ace
| 8,889
|
py
|
Python
|
openue/sequence_labeling/subject_labeling_data_manager.py
|
zxlzr/OpenUE
|
a49f8950dc2b93a489bb8ce0d40abb26c2c0f347
|
[
"MIT"
] | 8
|
2020-01-08T13:05:35.000Z
|
2021-12-20T09:43:57.000Z
|
openue/sequence_labeling/subject_labeling_data_manager.py
|
zxlzr/OpenUE
|
a49f8950dc2b93a489bb8ce0d40abb26c2c0f347
|
[
"MIT"
] | 9
|
2020-09-25T22:36:51.000Z
|
2022-02-10T01:50:44.000Z
|
openue/sequence_labeling/subject_labeling_data_manager.py
|
zxlzr/OpenUE
|
a49f8950dc2b93a489bb8ce0d40abb26c2c0f347
|
[
"MIT"
] | null | null | null |
import os
import sys
import json
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../bert")))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../")))
import tokenization
from config import config
class Model_data_preparation(object):
def __init__(self, DATA_INPUT_DIR="raw_data", DATA_OUTPUT_DIR="SKE_2019_tokened_labeling",
vocab_file_path="vocab.txt", do_lower_case=True,General_Mode = False):
self.bert_tokenizer = tokenization.FullTokenizer(vocab_file=self.get_vocab_file_path(vocab_file_path),
do_lower_case=do_lower_case) # 初始化 bert_token 工具
self.DATA_INPUT_DIR = self.get_data_input_dir(DATA_INPUT_DIR)
self.DATA_OUTPUT_DIR = os.path.join(os.path.dirname(__file__), DATA_OUTPUT_DIR)
self.General_Mode = General_Mode
def get_data_input_dir(self, DATA_INPUT_DIR):
DATAself_INPUT_DIR = os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), "../../")), DATA_INPUT_DIR)
return DATA_INPUT_DIR
def get_vocab_file_path(self, vocab_file_path):
print(vocab_file_path)
return vocab_file_path
def subject_object_labeling(self, spo_list, text):
def _spo_list_to_spo_predicate_dict(spo_list):
spo_predicate_dict = dict()
for spo_item in spo_list:
predicate = spo_item["predicate"]
subject = spo_item["subject"]
object = spo_item["object"]
spo_predicate_dict.setdefault(predicate, []).append((subject, object))
return spo_predicate_dict
def _gen_event_dic(spo_list):
res = []
res_d = {}
predicate = ""
for spo_item in spo_list:
predicate = spo_item["event"]
if 'time' in spo_item:
time = spo_item["time"]
res.append(('time',time))
if 'location' in spo_item:
location = spo_item["location"]
res.append(('location',location))
if 'participant' in spo_item:
participant = spo_item["participant"]
res.append(('participant',participant))
if 'denoter' in spo_item:
denoter = spo_item["denoter"]
res.append(('denoter',denoter))
if 'object' in spo_item:
object = spo_item["object"]
res.append(('object',object))
res_d[predicate] = res
return res_d
def _index_q_list_in_k_list(q_list, k_list):
"""Known q_list in k_list, find index(first time) of q_list in k_list"""
q_list_length = len(q_list)
k_list_length = len(k_list)
for idx in range(k_list_length - q_list_length + 1):
t = [q == k for q, k in zip(q_list, k_list[idx: idx + q_list_length])]
# print(idx, t)
if all(t):
# print(idx)
idx_start = idx
return idx_start
def _labeling_type(subject_object, so_type):
tokener_error_flag = False
so_tokened = self.bert_tokenizer.tokenize(subject_object)
so_tokened_length = len(so_tokened)
idx_start = _index_q_list_in_k_list(q_list=so_tokened, k_list=text_tokened)
if idx_start is None:
tokener_error_flag = True
'''
实体: "1981年" 原句: "●1981年2月27日,中国人口学会成立"
so_tokened ['1981', '年'] text_tokened ['●', '##19', '##81', '年', '2', '月', '27', '日', ',', '中', '国', '人', '口', '学', '会', '成', '立']
so_tokened 无法在 text_tokened 找到!原因是bert_tokenizer.tokenize 分词增添 “##” 所致!
'''
self.bert_tokener_error_log_f.write(subject_object + " @@ " + text + "\n")
self.bert_tokener_error_log_f.write(str(so_tokened) + " @@ " + str(text_tokened) + "\n")
else: #给实体开始处标 B 其它位置标 I
labeling_list[idx_start] = "B-" + so_type
if so_tokened_length == 2:
labeling_list[idx_start + 1] = "I-" + so_type
elif so_tokened_length >= 3:
labeling_list[idx_start + 1: idx_start + so_tokened_length] = ["I-" + so_type] * (so_tokened_length - 1)
return tokener_error_flag
text_tokened = self.bert_tokenizer.tokenize(text)
text_tokened_not_UNK = self.bert_tokenizer.tokenize_not_UNK(text)
if not self.General_Mode:
spo_predicate_dict = _spo_list_to_spo_predicate_dict(spo_list)
else:
spo_predicate_dict = _gen_event_dic(spo_list)
for predicate, spo_list_form in spo_predicate_dict.items():
tokener_error_flag = False
labeling_list = ["O"] * len(text_tokened)
if not self.General_Mode:
for (spo_subject, spo_object) in spo_list_form:
flag_A = _labeling_type(spo_subject, "SUB")
#flag_B = _labeling_type(spo_object, "OBJ")
if flag_A or flag_B:
tokener_error_flag = True
else:
for item in spo_list_form:
if item[1]== None:
continue
flag_A = _labeling_type(item[1],item[0])
if flag_A:
tokener_error_flag = True
#给被bert_tokenizer.tokenize 拆分的词语打上特殊标签[##WordPiece]
for idx, token in enumerate(text_tokened):
"""标注被 bert_tokenizer.tokenize 拆分的词语"""
if token.startswith("##"):
labeling_list[idx] = "[##WordPiece]"
if not tokener_error_flag:
self.token_label_and_one_prdicate_out_f.write(" ".join(labeling_list)+"\t"+predicate+"\n")
self.text_f.write(text + "\n")
self.token_in_f.write(" ".join(text_tokened)+"\t"+predicate+"\n")
self.token_in_not_UNK_f.write(" ".join(text_tokened_not_UNK) + "\n")
def separate_raw_data_and_token_labeling(self):
if not os.path.exists(self.DATA_OUTPUT_DIR):
os.makedirs(os.path.join(self.DATA_OUTPUT_DIR, "train"))
os.makedirs(os.path.join(self.DATA_OUTPUT_DIR, "valid"))
os.makedirs(os.path.join(self.DATA_OUTPUT_DIR, "test"))
for file_set_type in ["train", "valid"]:
print(os.path.join(os.path.join(self.DATA_OUTPUT_DIR, file_set_type)))
self.token_label_and_one_prdicate_out_f = open(os.path.join(os.path.join(self.DATA_OUTPUT_DIR, file_set_type), "token_label_and_one_prdicate_out.txt"), "w", encoding='utf-8')
self.bert_tokener_error_log_f = open(os.path.join(os.path.join(self.DATA_OUTPUT_DIR, file_set_type), "bert_tokener_error_log.txt"), "w", encoding='utf-8')
self.text_f = open(os.path.join(os.path.join(self.DATA_OUTPUT_DIR, file_set_type), "text.txt"), "w", encoding='utf-8')
self.token_in_f = open(os.path.join(os.path.join(self.DATA_OUTPUT_DIR, file_set_type), "token_in.txt"), "w", encoding='utf-8')
self.token_in_not_UNK_f = open(os.path.join(os.path.join(self.DATA_OUTPUT_DIR, file_set_type), "token_in_not_UNK.txt"), "w", encoding='utf-8')
if file_set_type == "train":
path_to_raw_data_file = "train.json"
elif file_set_type == "valid":
path_to_raw_data_file = "valid.json"
else:
pass
with open(os.path.join(self.DATA_INPUT_DIR, path_to_raw_data_file), 'r', encoding='utf-8') as f:
count_numbers = 0
while True:
line = f.readline()
if line:
count_numbers += 1
r = json.loads(line)
text = r["text"]
spo_list = r["spo_list"]
self.subject_object_labeling(spo_list=spo_list, text=text)
else:
break
print("all numbers", count_numbers)
self.text_f.close()
self.token_in_f.close()
self.token_in_not_UNK_f.close()
self.token_label_and_one_prdicate_out_f.close()
self.bert_tokener_error_log_f.close()
if __name__=="__main__":
DATA_INPUT_DIR = config.data_dir
DATA_OUTPUT_DIR = "sequence_labeling_data"
Vocab_Path = config.bert_vocab_dir
General_Mode = False
model_data = Model_data_preparation(General_Mode = General_Mode,DATA_INPUT_DIR=DATA_INPUT_DIR, DATA_OUTPUT_DIR=DATA_OUTPUT_DIR,vocab_file_path=Vocab_Path)
model_data.separate_raw_data_and_token_labeling()
| 49.938202
| 186
| 0.582068
| 1,152
| 8,889
| 4.106771
| 0.157986
| 0.036779
| 0.044388
| 0.027901
| 0.361234
| 0.274572
| 0.216867
| 0.201015
| 0.139294
| 0.101036
| 0
| 0.00699
| 0.307909
| 8,889
| 177
| 187
| 50.220339
| 0.761704
| 0.024412
| 0
| 0.106667
| 0
| 0
| 0.059723
| 0.013125
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0.006667
| 0.033333
| 0
| 0.14
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc5ea97eac050b419965fd5ba95918dc58fe5bee
| 3,222
|
py
|
Python
|
clarifai/rest/grpc/custom_converters/custom_message_to_dict.py
|
Taik/clarifai-python
|
c3b66b84cb348d3cb1edff958f561a4734b78650
|
[
"Apache-2.0"
] | 322
|
2015-08-25T03:16:11.000Z
|
2021-11-08T09:36:50.000Z
|
clarifai/rest/grpc/custom_converters/custom_message_to_dict.py
|
Taik/clarifai-python
|
c3b66b84cb348d3cb1edff958f561a4734b78650
|
[
"Apache-2.0"
] | 76
|
2015-10-25T13:03:47.000Z
|
2022-02-19T09:36:10.000Z
|
clarifai/rest/grpc/custom_converters/custom_message_to_dict.py
|
Taik/clarifai-python
|
c3b66b84cb348d3cb1edff958f561a4734b78650
|
[
"Apache-2.0"
] | 136
|
2015-09-04T13:48:27.000Z
|
2021-06-12T16:48:36.000Z
|
import typing # noqa
from google.protobuf import descriptor
from google.protobuf.json_format import _IsMapEntry, _Printer
from google.protobuf.message import Message # noqa
from clarifai.rest.grpc.proto.clarifai.api.utils import extensions_pb2
def protobuf_to_dict(object_protobuf, use_integers_for_enums=True, ignore_show_empty=False):
# type: (Message, typing.Optional[bool], typing.Optional[bool]) -> dict
# printer = _CustomPrinter(
printer = _CustomPrinter(
including_default_value_fields=False,
preserving_proto_field_name=True,
use_integers_for_enums=use_integers_for_enums,
ignore_show_empty=ignore_show_empty)
# pylint: disable=protected-access
return printer._MessageToJsonObject(object_protobuf)
class _CustomPrinter(_Printer):
def __init__(self, including_default_value_fields, preserving_proto_field_name,
use_integers_for_enums, ignore_show_empty):
super(_CustomPrinter, self).__init__(including_default_value_fields,
preserving_proto_field_name, use_integers_for_enums)
self._ignore_show_empty = ignore_show_empty
def _RegularMessageToJsonObject(self, message, js):
"""
Because of the fields with the custom extension `cl_show_if_empty`, we need to adjust the
original's method's return JSON object and keep these fields.
"""
js = super(_CustomPrinter, self)._RegularMessageToJsonObject(message, js)
message_descriptor = message.DESCRIPTOR
for field in message_descriptor.fields:
if (self._ignore_show_empty and
not field.GetOptions().Extensions[extensions_pb2.cl_default_float]):
continue
if not field.GetOptions().Extensions[extensions_pb2.cl_show_if_empty]:
continue
# Singular message fields and oneof fields will not be affected.
if ((field.label != descriptor.FieldDescriptor.LABEL_REPEATED and
field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE) or
field.containing_oneof):
continue
if self.preserving_proto_field_name:
name = field.name
else:
name = field.json_name
if name in js:
# Skip the field which has been serialized already.
continue
if _IsMapEntry(field):
js[name] = {}
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
js[name] = []
else:
js[name] = self._FieldToJsonObject(field, field.default_value)
return js
def _StructMessageToJsonObject(self, message):
"""
Converts Struct message according to Proto3 JSON Specification.
However, by default, empty objects {} get converted to null. We overwrite this behavior so {}
get converted to {}.
"""
fields = message.fields
ret = {}
for key in fields:
# When there's a Struct with an empty Struct field, this condition will hold True.
# Far as I know this is the only case this condition will be true. If not, this condition
# needs to be amended.
if fields[key].WhichOneof('kind') is None:
json_object = {}
else:
json_object = self._ValueMessageToJsonObject(fields[key])
ret[key] = json_object
return ret
| 36.202247
| 97
| 0.71198
| 395
| 3,222
| 5.546835
| 0.344304
| 0.031949
| 0.047923
| 0.043359
| 0.189868
| 0.189868
| 0.125513
| 0.063898
| 0.063898
| 0.063898
| 0
| 0.001581
| 0.214773
| 3,222
| 88
| 98
| 36.613636
| 0.864427
| 0.240223
| 0
| 0.12963
| 0
| 0
| 0.001669
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.092593
| 0
| 0.240741
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc60aeeb26d899f8ba324554b05c50b567a13167
| 6,525
|
py
|
Python
|
CircleciScripts/run_integrationtests.py
|
aimalygin/aws-sdk-ios
|
6cfaa3c56296300499f4885e9039c2dd24624cfa
|
[
"Apache-2.0"
] | 17
|
2018-02-19T16:29:51.000Z
|
2020-04-03T13:52:52.000Z
|
CircleciScripts/run_integrationtests.py
|
aimalygin/aws-sdk-ios
|
6cfaa3c56296300499f4885e9039c2dd24624cfa
|
[
"Apache-2.0"
] | 2
|
2019-11-07T15:23:33.000Z
|
2020-03-12T18:46:47.000Z
|
CircleciScripts/run_integrationtests.py
|
aimalygin/aws-sdk-ios
|
6cfaa3c56296300499f4885e9039c2dd24624cfa
|
[
"Apache-2.0"
] | 10
|
2018-03-06T14:27:12.000Z
|
2020-10-20T22:01:30.000Z
|
import demjson
import sys
from subprocess import Popen, PIPE
import subprocess
import xml.etree.ElementTree as ET
import os
from datetime import datetime
from functions import runcommand
#from sets import Set
def getfailedcases(withBundle = True):
xmlfile='build/reports/junit.xml'
tree = ET.parse(xmlfile)
root = tree.getroot()
testbundle = root.get('name')
testbundle = testbundle[0:len(testbundle) - 7]
failedtests = set()
#TODO we can filter with condtion
for testsuite in root.findall(".//testsuite"):
for testcase in testsuite.findall('.//testcase[failure]'):
suitename = testsuite.get('name')
casename = testcase.get('name')
if withBundle:
failedtests.add(testbundle + '/' + suitename + '/' + casename)
else:
failedtests.add(suitename + '/' + casename)
return failedtests
#run test
def runtest(otherargments, projectPath, schemeName, projectName, destination, derivedDataPath, timeout = 0):
runcommand("rm raw.log")
runcommand("rm xcpretty.log")
testcommand = "xcodebuild test-without-building -project {0} -scheme {1} -sdk iphonesimulator -destination '{2}' -derivedDataPath {3}/{4}".format(projectPath,schemeName, destination, derivedDataPath, projectName)
testcommand +=" " + otherargments;
rawoutput = open('raw.log','w')
exit_code = runcommand(testcommand,timeout, pipeout = rawoutput)
rawoutput.close()
print("Formatting test result .......")
xcprettycommand = "cat raw.log | xcpretty -r junit | tee xcpretty.log"
runcommand(xcprettycommand)
return exit_code
########################## main function ###############################
# a command will like
if (len(sys.argv) < 3 or sys.argv[1] == '-h' or sys.argv[1] == '-h') :
print("Usage: \r\n {0} <integrationTestsConfiguration json file path> <test result location> <group name>".format(sys.argv[0])) ;
exit(1)
jsonfilename=sys.argv[1]
test_result_folder=sys.argv[2]
group_name = sys.argv[3]
destination = sys.argv[4]
derivedDataPath = sys.argv[5]
with open(jsonfilename, 'r') as jsonfile:
jsonstring = jsonfile.read()
testConfigure = demjson.decode(jsonstring)
runningConfigure = testConfigure['runningConfigure']
projectName = runningConfigure['projectName']
projectPath = runningConfigure['projectPath']
schemeName = runningConfigure['schemeName']
sdkName = runningConfigure['sdkName']
print("group name:", group_name)
testgroup = testConfigure[group_name]
testlist = testgroup['test_list']
if 'projectName' in testgroup.keys() :
projectName = testgroup['projectName']
if 'projectPath' in testgroup.keys():
projectPath = testgroup['projectPath']
if 'schemeName' in testgroup.keys():
schemeName = testgroup['schemeName']
print("projectName, projectPath, schemeName, destination", projectName, projectPath, schemeName, destination)
# testcommandhead = f"xcodebuild test-without-building -project {projectName} -scheme {schemeName} -sdk {sdkName} -destination 'platform={paltformName},name={deviceName},OS={osVersion}'"
# testcommandtail = " | tee raw.log | xcpretty -r junit | tee xcpretty.log"
runcommand('echo "export testresult=0" >> $BASH_ENV')
testresult = 0
for testname in testlist:
print("-------------------------------", testname , "-------------------------------");
test = testlist[testname]
testarguments = ' -only-testing:' + testname
#create skipping tests parameters
skipingtests = ""
if 'excludetests' in test:
for skipingtest in test['excludetests']:
skipingtests += ' -skip-testing:' + testname+ "/" + skipingtest
print("excludetests:", skipingtests)
exit_code = runtest(testarguments + skipingtests, projectPath, schemeName, projectName, destination, derivedDataPath)
print(testname, "exit code:", exit_code)
# if test fails, check if the failed tests can be retried
if exit_code == 65:
retriabletimes = 3 ;
if 'retriabletimes' in test:
retriabletimes = test['retriabletimes']
if retriabletimes > 1:
#get all failed test cases
faileds = getfailedcases()
if len(faileds) == 0 :
print("test command return an error code, but the failed test cases is 0")
print("exit code:", exit_code)
break;
print("failed tests:",faileds)
retrytimes = 1
print('retriabletimes:', retriabletimes)
while retrytimes <= retriabletimes and exit_code > 0:
print("retry ", testname, "for ", retrytimes, " times")
testarguments = ""
for failed in faileds:
testarguments += ' -only-testing:' + failed
retrytimes += 1
exit_code = runtest(testarguments,projectPath, schemeName, projectName, destination, derivedDataPath);
print("retry exit code:", exit_code)
if(exit_code != 0 ):
faileds = getfailedcases()
if exit_code != 0 :
print("exit code:", exit_code)
runcommand('mkdir -p {0}/{1}'.format(test_result_folder,testname))
runcommand('echo "{2}" >> {0}/{1}/exitcode.log'.format(test_result_folder,testname,exit_code))
runcommand('mv raw.log {0}/{1}/raw.log'.format(test_result_folder,testname))
runcommand('mv xcpretty.log {0}/{1}/xcpretty.log'.format(test_result_folder,testname))
runcommand('cp build/reports/junit.xml {0}/{1}/junit.xml'.format(test_result_folder,testname))
ignorefailure = False ;
if exit_code == 65 :
failedtests = getfailedcases(False)
print("failedtests:", failedtests)
if 'ignoreFailures' in test and failedtests :
ignoreFailures = set(test['ignoreFailures'])
if failedtests.issubset(ignoreFailures):
print("There are failed testcases that can be ignored")
ignorefailure = True;
else :
print("Failed testcases that cannot be ignored: ", failedtests - ignoreFailures )
if not ignorefailure:
print("There are faillures in the test")
testresult = 1
else:
print("Test succeed")
print("testresult:", testresult)
runcommand('echo "export testresult={0}" >> $BASH_ENV'.format(testresult))
| 42.927632
| 222
| 0.632337
| 665
| 6,525
| 6.156391
| 0.285714
| 0.035173
| 0.023449
| 0.026869
| 0.171226
| 0.117978
| 0.058622
| 0.021495
| 0.021495
| 0
| 0
| 0.009419
| 0.235249
| 6,525
| 151
| 223
| 43.211921
| 0.811022
| 0.07387
| 0
| 0.073171
| 0
| 0.01626
| 0.225569
| 0.026606
| 0
| 0
| 0
| 0.006623
| 0
| 1
| 0.01626
| false
| 0
| 0.065041
| 0
| 0.097561
| 0.162602
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc633f72ddfead99679ba43f47af451833e0fa30
| 3,563
|
py
|
Python
|
download.py
|
JamesWang007/Open3D-PointNet
|
402847ceef8d364672ca7d81e0afebcb445cceb5
|
[
"MIT"
] | 120
|
2019-04-06T16:04:01.000Z
|
2021-07-22T17:07:51.000Z
|
test/Open3D-PointNet-master/download.py
|
AhsanulIslam/Thesis_Computer_Vision
|
c308cce15146a33a3e474790b0f9535ee9e41eb7
|
[
"MIT"
] | null | null | null |
test/Open3D-PointNet-master/download.py
|
AhsanulIslam/Thesis_Computer_Vision
|
c308cce15146a33a3e474790b0f9535ee9e41eb7
|
[
"MIT"
] | 25
|
2019-04-08T09:39:47.000Z
|
2021-05-12T15:39:56.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""Download big files from Google Drive."""
import shutil
import sys
import requests
import os
import time
import urllib.request
import zipfile
def reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
if percent % 5 == 0:
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def sizeof_fmt(num, suffix='B'):
# https://stackoverflow.com/a/1094933/5308925
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1000.0:
return "%3.2f%s%s" % (num, unit, suffix)
num /= 1000.0
return "%.2f%s%s" % (num, 'Yi', suffix)
def print_status(destination, progress):
message = "Downloading %s... %s" % (destination, sizeof_fmt(progress))
empty_space = shutil.get_terminal_size((80, 20)).columns - len(message)
sys.stdout.write('\r' + message + empty_space * ' ')
sys.stdout.flush()
def download_file_from_google_drive(id, destination):
# https://stackoverflow.com/a/39225039/5308925
def save_response_content(response, destination):
chunk_size = 32768
written_size = 0
with open(destination, "wb") as f:
for chunk in response.iter_content(chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
written_size += chunk_size
print_status(destination, written_size)
print('Done.')
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
url = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(url, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(url, params=params, stream=True)
save_response_content(response, destination)
def download_contents():
# download model
model_path = './cls_model.pth'
if os.path.isfile(model_path):
print('Model file already downloaded in', model_path)
else:
download_file_from_google_drive('1WWf5B5fmik5_P1dwxltJ-atRkYeCcCC5', './cls_model.pth')
# download dataset
dataset_path = './shapenetcore_partanno_segmentation_benchmark_v0.zip'
if os.path.isfile(dataset_path):
print('Dataset file already downloaded in', dataset_path)
else:
dataset_url = 'https://shapenet.cs.stanford.edu/ericyi/shapenetcore_partanno_segmentation_benchmark_v0.zip'
urllib.request.urlretrieve(dataset_url, os.path.basename(dataset_url), reporthook)
# unzip dataset
zip_ref = zipfile.ZipFile(os.path.basename(dataset_url), 'r')
zip_ref.extractall('.')
zip_ref.close()
print('Now unzipping...Wait for 2 minutes ish...!')
return 0
if __name__ == '__main__':
download_contents()
| 31.8125
| 115
| 0.646646
| 455
| 3,563
| 4.894505
| 0.421978
| 0.016165
| 0.020207
| 0.015267
| 0.145487
| 0.041311
| 0
| 0
| 0
| 0
| 0
| 0.030875
| 0.227337
| 3,563
| 111
| 116
| 32.099099
| 0.77806
| 0.122369
| 0
| 0.054054
| 0
| 0
| 0.160129
| 0.027653
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094595
| false
| 0.013514
| 0.094595
| 0
| 0.27027
| 0.081081
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc63b363d6718bb79d14c412bc96475ee3170b28
| 763
|
py
|
Python
|
ls12/demo5.py
|
cklwblove/python-100-days-source-code
|
5d66c7708047f0d7bac0ce05d21834bbbfa6ccf1
|
[
"MIT"
] | null | null | null |
ls12/demo5.py
|
cklwblove/python-100-days-source-code
|
5d66c7708047f0d7bac0ce05d21834bbbfa6ccf1
|
[
"MIT"
] | null | null | null |
ls12/demo5.py
|
cklwblove/python-100-days-source-code
|
5d66c7708047f0d7bac0ce05d21834bbbfa6ccf1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
将耗时间的任务放到线程中以获得更好的用户体验。
"""
import time
import tkinter
import tkinter.messagebox
def download():
# 模拟下载任务需要花费10秒时间
time.sleep(10)
tkinter.messagebox.showinfo('提示', '下载完成')
def show_about():
tkinter.messagebox.showinfo('关于', '作者:罗浩')
def main():
top = tkinter.Tk()
top.title('单线程')
top.geometry('200x150')
top.wm_attributes('-topmost', True)
panel = tkinter.Frame(top)
button1 = tkinter.Button(panel, text='下载', command=download)
button1.pack(side='left')
button2 = tkinter.Button(panel, text='关于', command=show_about)
button2.pack(side='right')
panel.pack(side='bottom')
tkinter.mainloop()
if __name__ == '__main__':
main()
| 20.078947
| 67
| 0.621232
| 86
| 763
| 5.383721
| 0.55814
| 0.110151
| 0.107991
| 0.095032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025168
| 0.218873
| 763
| 37
| 68
| 20.621622
| 0.751678
| 0.081258
| 0
| 0
| 0
| 0
| 0.08855
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.136364
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc652014fdf4755fbb2d576c8ff7469edba046ae
| 3,250
|
py
|
Python
|
hangupsbot/sinks/gitlab/simplepush.py
|
mygreentour/hangoutsbot
|
9ea2da10f546e6f1dd06c8240187049501c5452a
|
[
"Unlicense"
] | null | null | null |
hangupsbot/sinks/gitlab/simplepush.py
|
mygreentour/hangoutsbot
|
9ea2da10f546e6f1dd06c8240187049501c5452a
|
[
"Unlicense"
] | null | null | null |
hangupsbot/sinks/gitlab/simplepush.py
|
mygreentour/hangoutsbot
|
9ea2da10f546e6f1dd06c8240187049501c5452a
|
[
"Unlicense"
] | null | null | null |
"""
GitLab webhook receiver - see http://doc.gitlab.com/ee/web_hooks/web_hooks.html
"""
import asyncio
import json
import logging
from sinks.base_bot_request_handler import AsyncRequestHandler
logger = logging.getLogger(__name__)
try:
import dateutil.parser
except ImportError:
logger.error("missing module python_dateutil: pip3 install python_dateutil")
raise
class webhookReceiver(AsyncRequestHandler):
"""Receive REST API posts from GitLab"""
_bot = None
@asyncio.coroutine
def process_request(self, path, dummy_query_string, content):
"""Process a received POST to a given converstation"""
path = path.split("/")
conv_or_user_id = path[1]
if conv_or_user_id is None:
logger.error("conversation or user id must be provided as part of path")
return
try:
payload = json.loads(content)
except json.JSONDecodeError as err:
logger.exception("invalid payload @%d:%d: %s", err.lineno, err.colno, err)
logger.error("GitLab message: %s", json.dumps(payload))
refs = payload.get("ref", '').split("/")
user = payload.get("user_name")
if not user:
user = payload["user"]["name"]
message = ["GitLab update for [{}]({}) by __{}__".format(
payload["project"]["name"], payload["project"]["web_url"], user)]
if payload["object_kind"] == "push":
message.append("Pushed {} commit(s) on {} branch:".format(
payload["total_commits_count"], "/".join(refs[2:])))
for commit in payload["commits"]:
message.append("{} -- {} at [{:%c}]({})".format(
commit["message"], commit["author"]["name"],
dateutil.parser.parse(commit["timestamp"]), commit["url"]))
elif payload["object_kind"] == "tag_push":
message.append("Pushed tag {}]".format("/".join(refs[2:])))
elif payload["object_kind"] == "issue":
issue = payload["object_attributes"]
message.append("Update {} issue {} at {:%c}\n[{}]({})".format(
issue["state"], issue["id"],
dateutil.parser.parse(issue["updated_at"]),
issue["title"], issue["url"]))
elif payload["object_kind"] == "note":
note = payload["object_attributes"]
message.append("{} note on {}: [{}]({})".format(
note["notable_type"], note["id"], note["note"], note["url"]))
elif payload["object_kind"] == "merge_request":
request = payload["object_attributes"]
message.append("Merge request {}: from [{}:{}]({}) to [{}:{}]({})".format(
request["id"],
request["source"]["name"], request["source_branch"], request["source"]["web_url"],
request["target"]["name"], request["target_branch"], request["target"]["web_url"]))
else:
message.append("{}: unknown gitlab webhook object kind".format(payload["object_kind"]))
logger.warning("%s: unknown gitlab webhook object kind", payload["object_kind"])
if message:
yield from self.send_data(conv_or_user_id, "\n".join(message))
| 37.790698
| 99
| 0.577538
| 356
| 3,250
| 5.129213
| 0.382022
| 0.071194
| 0.06517
| 0.046002
| 0.131435
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001647
| 0.252923
| 3,250
| 85
| 100
| 38.235294
| 0.750412
| 0.050154
| 0
| 0.033333
| 0
| 0
| 0.273379
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016667
| false
| 0
| 0.1
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc65af7557f0841ee2695968775683b6f5578bc6
| 19,786
|
py
|
Python
|
tei_entity_enricher/interface/postprocessing/gnd_connector.py
|
NEISSproject/TEIEntityEnricher
|
09a4a932b30886e50965959935dc803b36063e36
|
[
"Apache-2.0"
] | null | null | null |
tei_entity_enricher/interface/postprocessing/gnd_connector.py
|
NEISSproject/TEIEntityEnricher
|
09a4a932b30886e50965959935dc803b36063e36
|
[
"Apache-2.0"
] | null | null | null |
tei_entity_enricher/interface/postprocessing/gnd_connector.py
|
NEISSproject/TEIEntityEnricher
|
09a4a932b30886e50965959935dc803b36063e36
|
[
"Apache-2.0"
] | 1
|
2021-04-27T13:55:29.000Z
|
2021-04-27T13:55:29.000Z
|
import os
from typing import Union, List
from tei_entity_enricher.interface.postprocessing.io import FileReader, FileWriter
from tei_entity_enricher.util.helper import local_save_path, makedir_if_necessary
from tei_entity_enricher.util.exceptions import FileNotFound
class GndConnector:
def __init__(
self,
gnd_id: Union[str, List[str], None] = None,
apiindex: int = 0,
check_connectivity: bool = True,
show_printmessages: bool = True,
) -> None:
"""establishes connection to api, from which norm data for entities of Deutsche Nationalbibliothek´s database is retrieved,
loaded data can be passed to an instance of Cache class for further processing or FileWriter class to save it
gnd_id:
gnd id number(s)
apiindex:
index of selected api in list defined in self.apilist
check_connectivity:
execute connectivity check in __init__() or not (see connectivitycheck_loop())
show_printmessages:
show class internal printmessages on runtime or not
apilist_filepath:
path to apilist config file
apilist:
list of dicts as configuration data set, delivers a mapping to be able to normalize data from different apis, defines api`s url and aliases for filtering purposes (see get_gnd_data())
connection_established:
data from an api has already been received or not
remaining_apis_to_check:
list of apiindex values, which have not been checked yet in connectivitycheck_loop()"""
print("initializing GndConnector..") if show_printmessages else None
self.show_printmessages: bool = show_printmessages
self.gnd_id: Union[str, List[str], None] = gnd_id
self.apiindex: int = apiindex
self.apilist_filepath: str = os.path.join(local_save_path, "config", "postprocessing", "gnd_apilist.json")
try:
self.apilist: Union[dict, None] = FileReader(
filepath=self.apilist_filepath, origin="local", internal_call=True, show_printmessages=False
).loadfile_json()
except FileNotFound:
print(
"GndConnector: could not find gnd_apilist.json in config dir. creating file with default settings..."
) if self.show_printmessages else None
self.apilist: List[dict] = [
{
"name": "culturegraph",
"baseUrl": "https://hub.culturegraph.org/entityfacts/{}",
"baseAliases": {
"type": [
"@type",
"str",
"categorial",
{
"person": "person",
"organisation": "organisation",
"place": "place",
},
],
"name": ["preferredName", "str", "nominal"],
"furtherNames": ["variantName", ["str"], "nominal"],
"sameAs": ["sameAs", [{"@id": "str"}], "nominal"],
"pseudonyms": [
"pseudonym",
[{"preferredName": "str"}],
"nominal",
],
},
"personAliases": {},
"placeAliases": {},
"organizationAliases": {},
},
{
"name": "lobid",
"baseUrl": "http://lobid.org/gnd/{}",
"baseAliases": {
"type": [
"type",
["str"],
"categorial",
{
"person": "Person",
"organisation": "CorporateBody",
"place": "PlaceOrGeographicName",
},
],
"name": ["preferredName", "str", "nominal"],
"furtherNames": ["variantName", ["str"], "nominal"],
"sameAs": ["sameAs", [{"id": "str"}], "nominal"],
"pseudonyms": [
"variantNameEntityForThePerson",
[{"forename": ["str"], "surname": ["str"]}],
"nominal",
],
},
"personAliases": {},
"placeAliases": {},
"organizationAliases": {},
},
]
self.apiindex: int = 0
try:
makedir_if_necessary(os.path.dirname(self.apilist_filepath))
FileWriter(data=self.apilist, filepath=self.apilist_filepath).writefile_json()
except:
print(
f"GndConnector __init__(): could not create default gnd_apilist.json in config folder."
) if self.show_printmessages == True else None
self.check_connectivity: bool = check_connectivity
self.connection_established: bool = False
self.remaining_apis_to_check: list = [i for i, _ in enumerate(self.apilist)]
if self.check_connectivity == True:
self.connectivitycheck_loop()
else:
print(
"GndConnector: initialization has been done without connectivity check."
) if self.show_printmessages else None
def connectivitycheck_single(self, index_to_test: int, gnd_id_to_test: str = "118540238") -> bool:
"""auxiliary method of connectivitycheck_loop(),
checks a single api`s (from self.apilist) response status code and checks if response data type is json,
preset gnd_id_to_test value refers to Goethe"""
try:
result: dict = FileReader(
filepath=self.apilist[index_to_test]["baseUrl"].format(gnd_id_to_test),
origin="web",
internal_call=True,
show_printmessages=self.show_printmessages,
).loadfile_json()
except:
return False
if type(result) == dict:
return True
return False
def connectivitycheck_loop(self) -> int:
"""recursive connectivity check, checking every single api in self.apilist (ascending)
and setting self.apiindex to the value of those api, which is first to pass the check successfully.
returns 0 or -1 for unittest purposes"""
if self.check_connectivity == False:
self.check_connectivity == True
if len(self.remaining_apis_to_check) > 0:
if self.connectivitycheck_single(self.remaining_apis_to_check[0]) == True:
print(
f"GndConnector: connectivity check passed, connection to {self.apilist[self.remaining_apis_to_check[0]]['name']} api established."
) if self.show_printmessages else None
self.apiindex = self.remaining_apis_to_check[0]
self.remaining_apis_to_check = [i for i, _ in enumerate(self.apilist)]
self.connection_established = True
return 0
else:
print(
f"GndConnector connectivity check: {self.apilist[self.remaining_apis_to_check[0]]['name']} api is currently not responding as expected. checking for alternatives..."
) if self.show_printmessages else None
self.remaining_apis_to_check.remove(self.remaining_apis_to_check[0])
self.connectivitycheck_loop()
else:
print(
"GndConnector connectivity check error: none of the listed apis is responding as expected."
) if self.show_printmessages else None
return -1
def print_complete_url(self, index: int = 0) -> int:
"""print baseUrl string of the currently selected api defined in self.apilist,
formatted with a gnd id number of self.gnd_id (list or str) selected by index value.
returns 0 or -1 for unittest purposes"""
if self.apiindex not in [i for i, _ in enumerate(self.apilist)]:
print(
"GndConnector print_complete_url() error: apiindex is not defined correctly. using default api..."
) if self.show_printmessages else None
self.apiindex = 0
if self.gnd_id is not None:
if type(self.gnd_id) == str:
print(
f"GndConnector complete URL: {self.apilist[self.apiindex]['baseUrl'].format(self.gnd_id)}"
) if self.show_printmessages else None
elif type(self.gnd_id) == list:
print(
f"GndConnector complete URL of gnd id number {index + 1} in passed gnd id list: {self.apilist[self.apiindex]['baseUrl'].format(self.gnd_id[index])}"
) if self.show_printmessages else None
return 0
else:
print(
"GndConnector print_complete_url() internal error: no gnd id number has been passed to connector object yet."
) if self.show_printmessages else None
return -1
def return_complete_url(self, index: int = 0) -> Union[str, None]:
"""return baseUrl string of the currently selected api defined in self.apilist,
formatted with a gnd id number of self.gnd_id (list or str) selected by index value"""
if self.apiindex not in [i for i, _ in enumerate(self.apilist)]:
print(
"GndConnector return_complete_url() error: apiindex is not defined correctly. using default api..."
) if self.show_printmessages else None
self.apiindex = 0
if self.gnd_id is not None:
if type(self.gnd_id) == str:
return self.apilist[self.apiindex]["baseUrl"].format(self.gnd_id)
elif type(self.gnd_id) == list:
return self.apilist[self.apiindex]["baseUrl"].format(self.gnd_id[index])
else:
print(
"GndConnector return_complete_url() internal error: no gnd id number has been passed to connector object yet."
) if self.show_printmessages else None
return None
def get_gnd_data(self, data_selection: Union[str, List[str], None] = None) -> Union[dict, None]:
"""method to receive data from api with the possibility to filter results,
a dict is created, having gnd id numbers as keys and filtered or unfiltered response json data as values
data_selection:
if delivered, a normalized output is generated by renaming keys and re-sorting data from different keys from the raw data into new keys (purpose: json data delivered by different apis comes in different key-value-structures; normalization of this data is achieved with the help of key-value mapping information stored in self.apilist)
can be "base" (all baseAliases data is provided: "type", "name", "furtherNames", "sameAs", "pseudonyms")
can be a list of one or more baseAliases (i.e. ["type", "name"])
(not yet implemented: can be a "person", "place", "organization" or a custom string refering to a user-defined set of keys, for which the mapping is provided in self.apilist)
"""
if self.check_connectivity == False:
print(
f"GndConnector note: connections to apis have not been checked yet. to do so manually execute connectivitycheck_loop() method of the current connector object. continuing attempt to receive gnd data from {self.apilist[self.apiindex]['name']} api..."
) if self.show_printmessages else None
elif self.connection_established == False:
print(
"GndConnector connectivity error: after connectivity check no connection could has been established to any of the available apis. gnd data queries can not be executed at the moment."
) if self.show_printmessages else None
return None
result = {}
if type(self.gnd_id) == str:
_temp_data = {}
try:
filereader = FileReader(
filepath=self.return_complete_url(), origin="web", internal_call=True, show_printmessages=False
)
_temp_data = filereader.loadfile_json()
except:
print(
"GndConnector connectivity error in get_gnd_data() method: could not load resource from api as expected."
) if self.show_printmessages else None
return None
self.connection_established = True
if _temp_data != None and _temp_data != False:
result[self.gnd_id] = _temp_data
print(
f"GndConnector get_gnd_data() status: data for gnd id {self.gnd_id} received."
) if self.show_printmessages else None
else:
print(
f"GndConnector get_gnd_data() status: for gnd id {self.gnd_id} no data could be delivered by api"
) if self.show_printmessages else None
return None
elif type(self.gnd_id) == list:
for index, gnd in enumerate(self.gnd_id):
_temp_data = {}
try:
filereader = FileReader(
filepath=self.return_complete_url(index),
origin="web",
internal_call=True,
show_printmessages=True,
)
_temp_data = filereader.loadfile_json()
except:
print(
f"GndConnector get_gnd_data() status: for gnd id {index + 1} ({gnd}) of {len(self.gnd_id)} no data could be delivered by api"
) if self.show_printmessages else None
result[gnd] = _temp_data
print(
f"GndConnector get_gnd_data() status: gnd id {index + 1} ({gnd}) of {len(self.gnd_id)} processed"
) if self.show_printmessages else None
self.connection_established = True
# filtering: build new dict with selected values, which should be returned (base mode = all base aliases from apilist definition. list mode = select specific aliases from base set)
# defining sub method for filtering
def filter_received_data(gnd_id: str, mode: Union[str, List[str]]) -> dict:
"""sub method, which extracts the key-value pairs from the raw data received from api for one gnd id number and renames the keys and/or values.
alias definitions in self.apilist are used for this filtering process:
the keys of 'baseAliases' dict define the new key names, their value list denotates (in order of the list)
1. the original key name,
2. the original value type (python-wise: i.e. 'str' or '[str]'),
3. the original value type (logic-wise: 'categorial' or 'nominal'),
4. a categorization dict, if the original value type logic-wise is 'categorial':
it delivers mapping information to assign a category (defined keys of this mapping dict) based on specific values (defined in the values of this mapping dict) found in raw data,
example 1: using culturegraph api the value of the base category 'type' is assigned to 'person', if the raw data json object has a key '@type' with the value 'person' of type str,
example 2: using lobid api the value of the base category 'type' is assigned to 'person', if the raw data json object has a key 'type' with a list as a value, which has itself a value 'Person' of type str in it,
mode parameter accepts str 'base' (all base aliases will be extracted) or a list of str (specific aliases will be extracted)"""
# todo: handle additional alias definition sets in gnd_apilist.json by user
# category_sets = {'base': [list(self.apilist[self.apiindex]["baseAliases"].keys()), 'baseAliases'],
# 'custom': [list(self.apilist[self.apiindex]["custom"].keys()), 'custom']
# }
# selected_categories_list = category_sets.get(mode)[0] if type(mode) == str else mode
# selected_categories_alias = category_sets.get(mode)[1] if type(mode) == str else 'baseAliases'
# => allow parsing a list of categories to get_gnd_data() only if they are defined in baseAlias set?
base_categories = list(self.apilist[self.apiindex]["baseAliases"].keys())
selected_categories = base_categories if mode == "base" else mode
selected_categories_data = {}
for category in selected_categories:
_temp_data = []
try:
_temp_data = result[gnd_id][self.apilist[self.apiindex]["baseAliases"][category][0]]
except KeyError:
_temp_data = []
print(
f"GndConnector get_gnd_data() filtering note: could not find {category} information for {gnd_id} in raw data. continuing processing..."
) if self.show_printmessages else None
# handling of categorical data types
if (
len(_temp_data) > 0
and self.apilist[self.apiindex]["baseAliases"][category][2] == "categorial"
and type(self.apilist[self.apiindex]["baseAliases"][category][3] == dict)
):
_temp_category_data_form = self.apilist[self.apiindex]["baseAliases"][category][1]
_temp_categorial_values = self.apilist[self.apiindex]["baseAliases"][category][3]
# change found categorial string to selfdefined string (i.e. 'Person' to 'person')
if type(_temp_category_data_form) == str:
for _type in _temp_categorial_values:
if _temp_data == _temp_categorial_values[_type]:
_temp_data = _type
# replace found categorial list with selfdefined string (i.e. ['Person', 'PoliticalLeader'] to 'person')
elif type(_temp_category_data_form) == list:
for _type in _temp_categorial_values:
if _temp_categorial_values[_type] in _temp_data:
_temp_data = _type
selected_categories_data[category] = _temp_data
return selected_categories_data
# executing sub method for filtering
if data_selection is not None:
if type(self.gnd_id) == str:
_new_dict = {list(result.keys())[0]: filter_received_data(self.gnd_id, data_selection)}
elif type(self.gnd_id) == list:
_new_dict = {}
for key in result:
_new_dict[key] = filter_received_data(key, data_selection)
result = _new_dict
return result
| 58.712166
| 347
| 0.564237
| 2,159
| 19,786
| 5.027327
| 0.159333
| 0.02119
| 0.02073
| 0.046066
| 0.445826
| 0.364842
| 0.30339
| 0.235397
| 0.200755
| 0.170905
| 0
| 0.00368
| 0.354493
| 19,786
| 336
| 348
| 58.886905
| 0.84607
| 0.250278
| 0
| 0.483019
| 0
| 0.033962
| 0.223564
| 0.027697
| 0
| 0
| 0
| 0.002976
| 0
| 1
| 0.026415
| false
| 0.015094
| 0.018868
| 0
| 0.10566
| 0.188679
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc66bbff24da2cc4aab8ede584053c2dba3e5cf5
| 440
|
py
|
Python
|
inference/_archive/render_section.py
|
emitch/SEAMLeSS
|
cae21c67316ed36529fdc2e470a105a9f847975c
|
[
"MIT"
] | 4
|
2018-12-17T18:45:57.000Z
|
2021-04-29T16:30:42.000Z
|
inference/_archive/render_section.py
|
emitch/SEAMLeSS
|
cae21c67316ed36529fdc2e470a105a9f847975c
|
[
"MIT"
] | 19
|
2019-01-02T19:09:12.000Z
|
2020-12-14T18:50:47.000Z
|
inference/_archive/render_section.py
|
emitch/SEAMLeSS
|
cae21c67316ed36529fdc2e470a105a9f847975c
|
[
"MIT"
] | 2
|
2020-03-18T01:24:03.000Z
|
2022-01-06T06:19:58.000Z
|
from args import get_argparser, parse_args, get_aligner, get_bbox
def render(aligner, bbox, z):
aligner.total_bbox = bbox
aligner.zs = z
aligner.render_section_all_mips(z, bbox)
if __name__ == '__main__':
parser = get_argparser()
args = parse_args(parser)
a = get_aligner(args)
bbox = get_bbox(args)
for z in range(args.bbox_start[2], args.bbox_stop[2]):
print('Rendering z={0}'.format(z))
render(a, bbox, z)
| 24.444444
| 66
| 0.697727
| 70
| 440
| 4.071429
| 0.442857
| 0.084211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008242
| 0.172727
| 440
| 17
| 67
| 25.882353
| 0.774725
| 0
| 0
| 0
| 0
| 0
| 0.052392
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.153846
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc6934a711c5b2c64314e9faedf3a6f0838f298a
| 52,806
|
py
|
Python
|
venv/lib/python3.9/site-packages/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.9/site-packages/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.9/site-packages/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.spanner.admin.instance.v1 InstanceAdmin API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.cloud.spanner_admin_instance_v1.gapic import enums
from google.cloud.spanner_admin_instance_v1.gapic import instance_admin_client_config
from google.cloud.spanner_admin_instance_v1.gapic.transports import (
instance_admin_grpc_transport,
)
from google.cloud.spanner_admin_instance_v1.proto import spanner_instance_admin_pb2
from google.cloud.spanner_admin_instance_v1.proto import spanner_instance_admin_pb2_grpc
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import options_pb2
from google.iam.v1 import policy_pb2
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-spanner").version
class InstanceAdminClient(object):
"""
Cloud Spanner Instance Admin API
The Cloud Spanner Instance Admin API can be used to create, delete,
modify and list instances. Instances are dedicated Cloud Spanner serving
and storage resources to be used by Cloud Spanner databases.
Each instance has a "configuration", which dictates where the
serving resources for the Cloud Spanner instance are located (e.g.,
US-central, Europe). Configurations are created by Google based on
resource availability.
Cloud Spanner billing is based on the instances that exist and their
sizes. After an instance exists, there are no additional
per-database or per-operation charges for use of the instance
(though there may be additional network bandwidth charges).
Instances offer isolation: problems with databases in one instance
will not affect other instances. However, within an instance
databases can affect each other. For example, if one database in an
instance receives a lot of requests and consumes most of the
instance resources, fewer resources are available for other
databases in that instance, and their performance may suffer.
"""
SERVICE_ADDRESS = "spanner.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.spanner.admin.instance.v1.InstanceAdmin"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
InstanceAdminClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def instance_path(cls, project, instance):
"""Return a fully-qualified instance string."""
return google.api_core.path_template.expand(
"projects/{project}/instances/{instance}",
project=project,
instance=instance,
)
@classmethod
def instance_config_path(cls, project, instance_config):
"""Return a fully-qualified instance_config string."""
return google.api_core.path_template.expand(
"projects/{project}/instanceConfigs/{instance_config}",
project=project,
instance_config=instance_config,
)
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
"projects/{project}", project=project
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.InstanceAdminGrpcTransport,
Callable[[~.Credentials, type], ~.InstanceAdminGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = instance_admin_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=instance_admin_grpc_transport.InstanceAdminGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = instance_admin_grpc_transport.InstanceAdminGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def create_instance(
self,
parent,
instance_id,
instance,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates an instance and begins preparing it to begin serving. The
returned ``long-running operation`` can be used to track the progress of
preparing the new instance. The instance name is assigned by the caller.
If the named instance already exists, ``CreateInstance`` returns
``ALREADY_EXISTS``.
Immediately upon completion of this request:
- The instance is readable via the API, with all requested attributes
but no allocated resources. Its state is ``CREATING``.
Until completion of the returned operation:
- Cancelling the operation renders the instance immediately unreadable
via the API.
- The instance can be deleted.
- All other attempts to modify the instance are rejected.
Upon completion of the returned operation:
- Billing for all successfully-allocated resources begins (some types
may have lower than the requested levels).
- Databases can be created in the instance.
- The instance's allocated resource levels are readable via the API.
- The instance's state becomes ``READY``.
The returned ``long-running operation`` will have a name of the format
``<instance_name>/operations/<operation_id>`` and can be used to track
creation of the instance. The ``metadata`` field type is
``CreateInstanceMetadata``. The ``response`` field type is ``Instance``,
if successful.
Example:
>>> from google.cloud import spanner_admin_instance_v1
>>>
>>> client = spanner_admin_instance_v1.InstanceAdminClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `instance_id`:
>>> instance_id = ''
>>>
>>> # TODO: Initialize `instance`:
>>> instance = {}
>>>
>>> response = client.create_instance(parent, instance_id, instance)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The name of the project in which to create the instance.
Values are of the form ``projects/<project>``.
instance_id (str): Required. The ID of the instance to create. Valid identifiers are of
the form ``[a-z][-a-z0-9]*[a-z0-9]`` and must be between 2 and 64
characters in length.
instance (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.Instance]): Required. The instance to create. The name may be omitted, but if
specified must be ``<parent>/instances/<instance_id>``.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_admin_instance_v1.types.Instance`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.operation.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_instance" not in self._inner_api_calls:
self._inner_api_calls[
"create_instance"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_instance,
default_retry=self._method_configs["CreateInstance"].retry,
default_timeout=self._method_configs["CreateInstance"].timeout,
client_info=self._client_info,
)
request = spanner_instance_admin_pb2.CreateInstanceRequest(
parent=parent, instance_id=instance_id, instance=instance
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["create_instance"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
spanner_instance_admin_pb2.Instance,
metadata_type=spanner_instance_admin_pb2.CreateInstanceMetadata,
)
def update_instance(
self,
instance,
field_mask,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates an instance, and begins allocating or releasing resources as
requested. The returned ``long-running operation`` can be used to track
the progress of updating the instance. If the named instance does not
exist, returns ``NOT_FOUND``.
Immediately upon completion of this request:
- For resource types for which a decrease in the instance's allocation
has been requested, billing is based on the newly-requested level.
Until completion of the returned operation:
- Cancelling the operation sets its metadata's ``cancel_time``, and
begins restoring resources to their pre-request values. The operation
is guaranteed to succeed at undoing all resource changes, after which
point it terminates with a ``CANCELLED`` status.
- All other attempts to modify the instance are rejected.
- Reading the instance via the API continues to give the pre-request
resource levels.
Upon completion of the returned operation:
- Billing begins for all successfully-allocated resources (some types
may have lower than the requested levels).
- All newly-reserved resources are available for serving the instance's
tables.
- The instance's new resource levels are readable via the API.
The returned ``long-running operation`` will have a name of the format
``<instance_name>/operations/<operation_id>`` and can be used to track
the instance modification. The ``metadata`` field type is
``UpdateInstanceMetadata``. The ``response`` field type is ``Instance``,
if successful.
Authorization requires ``spanner.instances.update`` permission on
resource ``name``.
Example:
>>> from google.cloud import spanner_admin_instance_v1
>>>
>>> client = spanner_admin_instance_v1.InstanceAdminClient()
>>>
>>> # TODO: Initialize `instance`:
>>> instance = {}
>>>
>>> # TODO: Initialize `field_mask`:
>>> field_mask = {}
>>>
>>> response = client.update_instance(instance, field_mask)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
instance (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.Instance]): Required. The instance to update, which must always include the
instance name. Otherwise, only fields mentioned in ``field_mask`` need
be included.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_admin_instance_v1.types.Instance`
field_mask (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.FieldMask]): Required. A mask specifying which fields in ``Instance`` should be
updated. The field mask must always be specified; this prevents any
future fields in ``Instance`` from being erased accidentally by clients
that do not know about them.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_admin_instance_v1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.operation.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_instance" not in self._inner_api_calls:
self._inner_api_calls[
"update_instance"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_instance,
default_retry=self._method_configs["UpdateInstance"].retry,
default_timeout=self._method_configs["UpdateInstance"].timeout,
client_info=self._client_info,
)
request = spanner_instance_admin_pb2.UpdateInstanceRequest(
instance=instance, field_mask=field_mask
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("instance.name", instance.name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["update_instance"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
spanner_instance_admin_pb2.Instance,
metadata_type=spanner_instance_admin_pb2.UpdateInstanceMetadata,
)
def list_instance_configs(
self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists the supported instance configurations for a given project.
Example:
>>> from google.cloud import spanner_admin_instance_v1
>>>
>>> client = spanner_admin_instance_v1.InstanceAdminClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in client.list_instance_configs(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_instance_configs(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The name of the project for which a list of supported
instance configurations is requested. Values are of the form
``projects/<project>``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~google.cloud.spanner_admin_instance_v1.types.InstanceConfig` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_instance_configs" not in self._inner_api_calls:
self._inner_api_calls[
"list_instance_configs"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_instance_configs,
default_retry=self._method_configs["ListInstanceConfigs"].retry,
default_timeout=self._method_configs["ListInstanceConfigs"].timeout,
client_info=self._client_info,
)
request = spanner_instance_admin_pb2.ListInstanceConfigsRequest(
parent=parent, page_size=page_size
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_instance_configs"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="instance_configs",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def get_instance_config(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets information about a particular instance configuration.
Example:
>>> from google.cloud import spanner_admin_instance_v1
>>>
>>> client = spanner_admin_instance_v1.InstanceAdminClient()
>>>
>>> name = client.instance_config_path('[PROJECT]', '[INSTANCE_CONFIG]')
>>>
>>> response = client.get_instance_config(name)
Args:
name (str): Required. The name of the requested instance configuration. Values
are of the form ``projects/<project>/instanceConfigs/<config>``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.spanner_admin_instance_v1.types.InstanceConfig` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_instance_config" not in self._inner_api_calls:
self._inner_api_calls[
"get_instance_config"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_instance_config,
default_retry=self._method_configs["GetInstanceConfig"].retry,
default_timeout=self._method_configs["GetInstanceConfig"].timeout,
client_info=self._client_info,
)
request = spanner_instance_admin_pb2.GetInstanceConfigRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_instance_config"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_instances(
self,
parent,
page_size=None,
filter_=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists all instances in the given project.
Example:
>>> from google.cloud import spanner_admin_instance_v1
>>>
>>> client = spanner_admin_instance_v1.InstanceAdminClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in client.list_instances(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_instances(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The name of the project for which a list of instances is
requested. Values are of the form ``projects/<project>``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
filter_ (str): An expression for filtering the results of the request. Filter rules
are case insensitive. The fields eligible for filtering are:
- ``name``
- ``display_name``
- ``labels.key`` where key is the name of a label
Some examples of using filters are:
- ``name:*`` --> The instance has a name.
- ``name:Howl`` --> The instance's name contains the string "howl".
- ``name:HOWL`` --> Equivalent to above.
- ``NAME:howl`` --> Equivalent to above.
- ``labels.env:*`` --> The instance has the label "env".
- ``labels.env:dev`` --> The instance has the label "env" and the value
of the label contains the string "dev".
- ``name:howl labels.env:dev`` --> The instance's name contains "howl"
and it has the label "env" with its value containing "dev".
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~google.cloud.spanner_admin_instance_v1.types.Instance` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_instances" not in self._inner_api_calls:
self._inner_api_calls[
"list_instances"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_instances,
default_retry=self._method_configs["ListInstances"].retry,
default_timeout=self._method_configs["ListInstances"].timeout,
client_info=self._client_info,
)
request = spanner_instance_admin_pb2.ListInstancesRequest(
parent=parent, page_size=page_size, filter=filter_
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_instances"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="instances",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def get_instance(
self,
name,
field_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets information about a particular instance.
Example:
>>> from google.cloud import spanner_admin_instance_v1
>>>
>>> client = spanner_admin_instance_v1.InstanceAdminClient()
>>>
>>> name = client.instance_path('[PROJECT]', '[INSTANCE]')
>>>
>>> response = client.get_instance(name)
Args:
name (str): Required. The name of the requested instance. Values are of the form
``projects/<project>/instances/<instance>``.
field_mask (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.FieldMask]): If field_mask is present, specifies the subset of ``Instance``
fields that should be returned. If absent, all ``Instance`` fields are
returned.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_admin_instance_v1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.spanner_admin_instance_v1.types.Instance` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_instance" not in self._inner_api_calls:
self._inner_api_calls[
"get_instance"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_instance,
default_retry=self._method_configs["GetInstance"].retry,
default_timeout=self._method_configs["GetInstance"].timeout,
client_info=self._client_info,
)
request = spanner_instance_admin_pb2.GetInstanceRequest(
name=name, field_mask=field_mask
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_instance"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def delete_instance(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes an instance.
Immediately upon completion of the request:
- Billing ceases for all of the instance's reserved resources.
Soon afterward:
- The instance and *all of its databases* immediately and irrevocably
disappear from the API. All data in the databases is permanently
deleted.
Example:
>>> from google.cloud import spanner_admin_instance_v1
>>>
>>> client = spanner_admin_instance_v1.InstanceAdminClient()
>>>
>>> name = client.instance_path('[PROJECT]', '[INSTANCE]')
>>>
>>> client.delete_instance(name)
Args:
name (str): Required. The name of the instance to be deleted. Values are of the
form ``projects/<project>/instances/<instance>``
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_instance" not in self._inner_api_calls:
self._inner_api_calls[
"delete_instance"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_instance,
default_retry=self._method_configs["DeleteInstance"].retry,
default_timeout=self._method_configs["DeleteInstance"].timeout,
client_info=self._client_info,
)
request = spanner_instance_admin_pb2.DeleteInstanceRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls["delete_instance"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def set_iam_policy(
self,
resource,
policy,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sets the access control policy on an instance resource. Replaces any
existing policy.
Authorization requires ``spanner.instances.setIamPolicy`` on
``resource``.
Example:
>>> from google.cloud import spanner_admin_instance_v1
>>>
>>> client = spanner_admin_instance_v1.InstanceAdminClient()
>>>
>>> # TODO: Initialize `resource`:
>>> resource = ''
>>>
>>> # TODO: Initialize `policy`:
>>> policy = {}
>>>
>>> response = client.set_iam_policy(resource, policy)
Args:
resource (str): REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this field.
policy (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_admin_instance_v1.types.Policy`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.spanner_admin_instance_v1.types.Policy` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "set_iam_policy" not in self._inner_api_calls:
self._inner_api_calls[
"set_iam_policy"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_iam_policy,
default_retry=self._method_configs["SetIamPolicy"].retry,
default_timeout=self._method_configs["SetIamPolicy"].timeout,
client_info=self._client_info,
)
request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("resource", resource)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["set_iam_policy"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_iam_policy(
self,
resource,
options_=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets the access control policy for an instance resource. Returns an
empty policy if an instance exists but does not have a policy set.
Authorization requires ``spanner.instances.getIamPolicy`` on
``resource``.
Example:
>>> from google.cloud import spanner_admin_instance_v1
>>>
>>> client = spanner_admin_instance_v1.InstanceAdminClient()
>>>
>>> # TODO: Initialize `resource`:
>>> resource = ''
>>>
>>> response = client.get_iam_policy(resource)
Args:
resource (str): REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this field.
options_ (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to
``GetIamPolicy``. This field is only used by Cloud IAM.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_admin_instance_v1.types.GetPolicyOptions`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.spanner_admin_instance_v1.types.Policy` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_iam_policy" not in self._inner_api_calls:
self._inner_api_calls[
"get_iam_policy"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_iam_policy,
default_retry=self._method_configs["GetIamPolicy"].retry,
default_timeout=self._method_configs["GetIamPolicy"].timeout,
client_info=self._client_info,
)
request = iam_policy_pb2.GetIamPolicyRequest(
resource=resource, options=options_
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("resource", resource)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_iam_policy"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def test_iam_permissions(
self,
resource,
permissions,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Returns permissions that the caller has on the specified instance
resource.
Attempting this RPC on a non-existent Cloud Spanner instance resource
will result in a NOT_FOUND error if the user has
``spanner.instances.list`` permission on the containing Google Cloud
Project. Otherwise returns an empty set of permissions.
Example:
>>> from google.cloud import spanner_admin_instance_v1
>>>
>>> client = spanner_admin_instance_v1.InstanceAdminClient()
>>>
>>> # TODO: Initialize `resource`:
>>> resource = ''
>>>
>>> # TODO: Initialize `permissions`:
>>> permissions = []
>>>
>>> response = client.test_iam_permissions(resource, permissions)
Args:
resource (str): REQUIRED: The resource for which the policy detail is being requested.
See the operation documentation for the appropriate value for this field.
permissions (list[str]): The set of permissions to check for the ``resource``. Permissions
with wildcards (such as '*' or 'storage.*') are not allowed. For more
information see `IAM
Overview <https://cloud.google.com/iam/docs/overview#permissions>`__.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.spanner_admin_instance_v1.types.TestIamPermissionsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "test_iam_permissions" not in self._inner_api_calls:
self._inner_api_calls[
"test_iam_permissions"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.test_iam_permissions,
default_retry=self._method_configs["TestIamPermissions"].retry,
default_timeout=self._method_configs["TestIamPermissions"].timeout,
client_info=self._client_info,
)
request = iam_policy_pb2.TestIamPermissionsRequest(
resource=resource, permissions=permissions
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("resource", resource)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["test_iam_permissions"](
request, retry=retry, timeout=timeout, metadata=metadata
)
| 43.142157
| 165
| 0.613926
| 5,791
| 52,806
| 5.444483
| 0.10171
| 0.027403
| 0.039583
| 0.026832
| 0.674268
| 0.647087
| 0.612357
| 0.597418
| 0.582638
| 0.558407
| 0
| 0.003937
| 0.312143
| 52,806
| 1,223
| 166
| 43.177433
| 0.864081
| 0.543745
| 0
| 0.527273
| 0
| 0
| 0.066945
| 0.011505
| 0
| 0
| 0
| 0.007359
| 0
| 1
| 0.030303
| false
| 0.020202
| 0.052525
| 0
| 0.117172
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc69b9f3ab057490f4ec7854149028c2c310ae9c
| 31,196
|
py
|
Python
|
src/ScaleHD/__backend.py
|
helloabunai/ScaleHD
|
b48c1a1ed742bdbda0a4cd42555d1e12d2e3024d
|
[
"MIT"
] | 3
|
2017-07-03T19:45:13.000Z
|
2020-05-12T16:56:19.000Z
|
src/ScaleHD/__backend.py
|
helloabunai/ScaleHD
|
b48c1a1ed742bdbda0a4cd42555d1e12d2e3024d
|
[
"MIT"
] | 1
|
2019-06-21T14:49:50.000Z
|
2019-06-24T08:24:37.000Z
|
src/ScaleHD/__backend.py
|
helloabunai/ScaleHD
|
b48c1a1ed742bdbda0a4cd42555d1e12d2e3024d
|
[
"MIT"
] | 2
|
2017-06-05T21:56:36.000Z
|
2021-03-22T20:34:13.000Z
|
#/usr/bin/python
__version__ = '1.0'
__author__ = 'alastair.maxwell@glasgow.ac.uk'
##
## Imports
import string
import os
import errno
import shutil
import sys
import glob
import datetime
import subprocess
import logging as log
import numpy as np
import csv
from io import StringIO
import PyPDF2
from sklearn import preprocessing
from collections import defaultdict
from xml.etree import cElementTree
from lxml import etree
from reportlab.pdfgen import canvas
class Colour:
def __init__(self):
pass
purple = '\033[95m'
cyan = '\033[96m'
darkcyan = '\033[36m'
blue = '\033[94m'
green = '\033[92m'
yellow = '\033[93m'
red = '\033[91m'
bold = '\033[1m'
underline = '\033[4m'
end = '\033[0m'
class ConfigReader(object):
"""
The configuration file reader.
Opens a configuration file, and if valid, converts the parameters within the file to a dictionary object,
reader to be viewed through accessing the config_dict variable.
"""
def __init__(self, scriptdir, config_filename=None):
##
## Instance variables
self.scriptdir = scriptdir
self.config_filename = config_filename
self.dtd_filename = scriptdir + "/config/config.dtd"
##
## Check for configuration file (just incase)
if self.config_filename is None:
log.error("No configuration file specified!")
else:
self.config_file = etree.parse(self.config_filename)
##
## Check config vs dtd, parse info to dictionary, validate vs ruleset
self.validate_against_dtd()
self.set_dictionary()
self.validate_config()
def validate_against_dtd(self):
"""
Validate input config against DTD ruleset
i.e. confirms conformation of XML structure
"""
##
## Open > etree.DTD object
dtd_file = open(self.dtd_filename, 'r')
dtd_object = etree.DTD(dtd_file)
##
## If validation fails, close the object (memory) and raise an error
if not dtd_object.validate(self.config_file):
dtd_file.close()
log.error("DTD validation failure {0}: {1}".format(self.config_filename, dtd_object.error_log.filter_from_errors()[0]))
sys.exit(2)
dtd_file.close()
def set_dictionary(self):
"""
Takes the now validated XML and extracts information from the tree into
a python dictionary {key: value}. This dictionary will be used for variables
within the pipeline. Recursion adapted from http://stackoverflow.com/a/9286702
"""
def recursive_generation(t):
d = {t.tag: {} if t.attrib else None}
children = list(t)
##
## If list was populated, create dictionary, Append keys
if children:
dd = defaultdict(list)
for dc in map(recursive_generation, children):
for k, v in dc.items():
dd[k].append(v)
d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}
##
## Values for key
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.items())
if t.text:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]['#text'] = text
else:
d[t.tag] = text
return d
##
## Takes the formatted xml doc, puts through generator, returns dictionary
string_repr = etree.tostring(self.config_file, pretty_print=True)
element_tree = cElementTree.XML(string_repr)
self.config_dict = recursive_generation(element_tree)
self.config_dict = self.config_dict[list(self.config_dict.keys())[0]]
def validate_config(self):
"""
Method which validates the configuration file's contents.
If all pass, guarantees that the settings dictionary is full of valid settings!
"""
trigger = False
##
## Main configuration instance settings
data_directory = self.config_dict['@data_dir']
if not os.path.exists(data_directory):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified data directory could not be found.'))
trigger = True
for fqfile in glob.glob(os.path.join(data_directory, '*')):
if not (fqfile.endswith('.fq') or fqfile.endswith('.fastq') or fqfile.endswith('.fq.gz') or fqfile.endswith('.fastq.gz')):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Non FastQ/GZ data detected in specified input directory.'))
trigger = True
forward_reference = self.config_dict['@forward_reference']
if not os.path.isfile(forward_reference):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified forward reference file could not be found.'))
trigger = True
if not (forward_reference.endswith('.fa') or forward_reference.endswith('.fasta')):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified forward reference file is not a fa/fas file.'))
trigger = True
reverse_reference = self.config_dict['@reverse_reference']
if not os.path.isfile(reverse_reference):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified reverse reference file could not be found.'))
trigger = True
if not (reverse_reference.endswith('fa') or reverse_reference.endswith('.fasta')):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified reverse reference file is not a fa/fas file.'))
trigger = True
if forward_reference.split('/')[-1] == reverse_reference.split('/')[-1]:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: FW and RV references have identical filenames. Will create indexing issue.'))
trigger = True
##
## Instance flag settings
demultiplexing_flag = self.config_dict['instance_flags']['@demultiplex']
if not (demultiplexing_flag == 'True' or demultiplexing_flag == 'False'):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Demultiplexing flag is not set to True/False.'))
trigger = True
sequence_qc_flag = self.config_dict['instance_flags']['@quality_control']
if not (sequence_qc_flag == 'True' or sequence_qc_flag == 'False'):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Sequence Quality control flag is not set to True/False.'))
trigger = True
alignment_flag = self.config_dict['instance_flags']['@sequence_alignment']
if not (alignment_flag == 'True' or alignment_flag == 'False'):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Sequence Alignment flag is not set to True/False.'))
trigger = True
atypical_flag = self.config_dict['instance_flags']['@atypical_realignment']
if not (atypical_flag == 'True' or atypical_flag == 'False'):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Atypical Realignment flag is not True/False.'))
trigger = True
genotype_flag = self.config_dict['instance_flags']['@genotype_prediction']
if not (genotype_flag == 'True' or genotype_flag == 'False'):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Genotype Prediction control flag is not True/False.'))
trigger = True
snpcall_flag = self.config_dict['instance_flags']['@snp_calling']
if not (snpcall_flag == 'True' or snpcall_flag == 'False'):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: SNP Calling flag is not True/False.'))
trigger = True
##
## Demultiplexing flag settings
trim_adapter_base = ['A', 'G', 'C', 'T']
if demultiplexing_flag == 'True':
forward_adapter = self.config_dict['demultiplex_flags']['@forward_adapter']
for charbase in forward_adapter:
if charbase not in trim_adapter_base:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Invalid character detected in forward_adapter demultiplexing flag.'))
trigger = True
forward_position = self.config_dict['demultiplex_flags']['@forward_position']
if forward_position not in ['5P', '3P', 'AP']:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Given demultiplexing forward adapter position invalid! [5P, 3P, AP]'))
trigger = True
reverse_adapter = self.config_dict['demultiplex_flags']['@reverse_adapter']
for charbase in reverse_adapter:
if charbase not in trim_adapter_base:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Invalid character detected in reverse_adapter demultiplexing flag.'))
trigger = True
reverse_position = self.config_dict['demultiplex_flags']['@reverse_position']
if reverse_position not in ['5P', '3P', 'AP']:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Given demultiplexing reverse adapter position invalid! [5P, 3P, AP]'))
trigger = True
error_rate = self.config_dict['demultiplex_flags']['@error_rate']
if not error_rate.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified error_rate is not a valid integer.'))
trigger = True
minimum_overlap = self.config_dict['demultiplex_flags']['@min_overlap']
if not minimum_overlap.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified min_overlap is not a valid integer.'))
trigger = True
minimum_length = self.config_dict['demultiplex_flags']['@min_length']
if not minimum_length == '':
if not minimum_length.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified min_length is not a valid integer.'))
trigger = True
maximum_length = self.config_dict['demultiplex_flags']['@max_length']
if not maximum_length == '':
if not maximum_length.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified max_length is not a valid integer.'))
trigger = True
##
## Trimming flag settings
if sequence_qc_flag == 'True':
trimming_type = self.config_dict['trim_flags']['@trim_type']
if not (trimming_type == 'Quality' or trimming_type == 'Adapter' or trimming_type == 'Both'):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Trimming type is not Quality/Adapter/Both.'))
trigger = True
quality_threshold = self.config_dict['trim_flags']['@quality_threshold']
if not quality_threshold.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified quality threshold integer is invalid.'))
trigger = True
elif not int(quality_threshold) in range(0,39):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified quality threshold integer out of range (0-38).'))
trigger = True
trim_adapters = ['-a','-g','-a$','-g^','-b']
adapter_flag = self.config_dict['trim_flags']['@adapter_flag']
if not (adapter_flag in trim_adapters):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified trimming adapter not valid selection.'))
trigger = True
forward_adapter = self.config_dict['trim_flags']['@forward_adapter']
for charbase in forward_adapter:
if charbase not in trim_adapter_base:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Invalid character detected in FW adapter sequence.'))
trigger = True
reverse_adapter = self.config_dict['trim_flags']['@reverse_adapter']
for charbase in reverse_adapter:
if charbase not in trim_adapter_base:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Invalid character detected in RV adapter sequence.'))
trigger = True
error_tolerance = self.config_dict['trim_flags']['@error_tolerance']
if not isinstance(float(error_tolerance), float):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified error tolerance is not a valid float.'))
trigger = True
if not float(error_tolerance) in np.arange(0,1.1,0.01):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified error tolerance is not 0.0 < x < 1.0.'))
trigger = True
##
## Alignment flag settings
if alignment_flag == 'True':
min_seed_length = self.config_dict['alignment_flags']['@min_seed_length']
if not min_seed_length.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified min_seed_length integer is invalid.'))
trigger=True
band_width = self.config_dict['alignment_flags']['@band_width']
if not band_width.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified band_width integer is invalid.'))
trigger=True
seed_length_extension = self.config_dict['alignment_flags']['@seed_length_extension']
if not isinstance(float(seed_length_extension), float):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified seed_length_extension float is invalid.'))
trigger=True
skip_seed_with_occurrence = self.config_dict['alignment_flags']['@skip_seed_with_occurrence']
if not skip_seed_with_occurrence.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified skip_seed_with_occurrence integer is invalid.'))
trigger=True
chain_drop = self.config_dict['alignment_flags']['@chain_drop']
if not isinstance(float(chain_drop), float):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified chain_drop float is invalid.'))
trigger=True
seeded_chain_drop = self.config_dict['alignment_flags']['@seeded_chain_drop']
if not seeded_chain_drop.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified seeded_chain_drop integer is invalid.'))
trigger=True
seq_match_score = self.config_dict['alignment_flags']['@seq_match_score']
if not seq_match_score.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified seq_match_score integer is invalid.'))
trigger=True
mismatch_penalty = self.config_dict['alignment_flags']['@mismatch_penalty']
if not mismatch_penalty.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified mismatch_penalty integer is invalid.'))
trigger=True
indel_penalty_raw = self.config_dict['alignment_flags']['@indel_penalty']
indel_penalty = indel_penalty_raw.split(',')
for individual_indelpen in indel_penalty:
if not individual_indelpen.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified indel_penalty integer(s) is(are) invalid.'))
trigger=True
gap_extend_penalty_raw = self.config_dict['alignment_flags']['@gap_extend_penalty']
gap_extend_penalty = gap_extend_penalty_raw.split(',')
for individual_gaextend in gap_extend_penalty:
if not individual_gaextend.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified gap_extend_penalty integer(s) is(are) invalid.'))
trigger=True
prime_clipping_penalty_raw = self.config_dict['alignment_flags']['@prime_clipping_penalty']
prime_clipping_penalty = prime_clipping_penalty_raw.split(',')
for individual_prclip in prime_clipping_penalty:
if not individual_prclip.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified prime_clipping_penalty integer(s) is(are) invalid.'))
trigger=True
unpaired_pairing_penalty = self.config_dict['alignment_flags']['@unpaired_pairing_penalty']
if not unpaired_pairing_penalty.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified unpaired_pairing_penalty integer is invalid.'))
trigger=True
##
## Genotype prediction flag settings
if genotype_flag == 'True':
snp_observation_pcnt = self.config_dict['prediction_flags']['@snp_observation_threshold']
if not snp_observation_pcnt.isdigit():
if not int(snp_observation_pcnt) in range(1,5):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: SNP Observation value invalid! Please use 1-10.'))
trigger = True
quality_cutoff = self.config_dict['prediction_flags']['@quality_cutoff']
if not quality_cutoff.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: SNP Quality Cutoff value is not an integer.'))
trigger = True
if trigger:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Failure, exiting.'))
sys.exit(2)
else:
log.info('{}{}{}{}'.format(Colour.green, 'shd__ ', Colour.end, 'XML Config: Parsing parameters successful!'))
class DataClump(dict):
"""Container object for datasets: dictionary-like object that
exposes its keys as attributes."""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
class DataLoader:
def __init__(self, database, descriptor):
self.database = database
self.descriptor = descriptor
def load_model(self):
## Loads description file for respective data set
modeldescr_name = self.descriptor
with open(modeldescr_name) as f:
descr_text = f.read()
## Loads data set from csv, into objects in preparation for bunch()
data_file_name = self.database
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
temp = next(data_file)
feature_names = np.array(temp)
labels = []
for i, d in enumerate(data_file):
data[i] = d[:-1]
label = d[-1]
labels.append(label)
le = preprocessing.LabelEncoder()
le.fit(labels)
hash_int_labels = le.transform(labels)
return DataClump(DATA=data,
TARGET=hash_int_labels,
FTRNAME=feature_names[:-1],
DESCR=descr_text,
ENCDR=le)
def parse_boolean(boolean_value):
"""
Given a string (boolean_value), returns a boolean value representing the string contents.
For example, a string with 'true', 't', 'y' or 'yes' will yield True.
"""
boolean_value = string.lower(boolean_value) in ('yes', 'y', 'true', 't', '1')
return boolean_value
def empty_string_check(string, raise_exception=True):
"""
Simple check to see if the string provided by parameter string is empty. False indicates the string is NOT empty.
Parameter raise_exception determines if a ValueError exception should be raised if the string is empty.
If raise_exception is False and the string is empty, True is returned.
"""
if string != '':
return False
if raise_exception:
raise ValueError("Empty string detected!")
return True
def sanitise_inputs(parsed_arguments):
"""
Utilises filesystem_exists_check and check_input_files
if either return false, path is invalid or unsupported files present
so, quit
"""
trigger = False
##
## Jobname prefix validity check
if parsed_arguments.jobname:
for character in parsed_arguments.jobname:
if character is ' ' or character is '/':
log.error('{}{}{}{}{}{}'.format(Colour.red,'shd__ ',Colour.end,'Specified Job Name has invalid characters: "', character, '"'))
trigger = True
##
## Config mode check
if parsed_arguments.config:
if not filesystem_exists_check(parsed_arguments.config[0]):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'Specified config file could not be found.'))
trigger = True
for xmlfile in parsed_arguments.config:
if not check_input_files('.xml',xmlfile):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'Specified config file is not an XML file.'))
trigger = True
return trigger
def extract_data(input_data_directory):
target_files = glob.glob(os.path.join(input_data_directory, '*'))
for extract_target in target_files:
if extract_target.lower().endswith(('.fq.gz', '.fastq.gz')):
log.info('{}{}{}{}'.format(Colour.bold, 'shd__ ', Colour.end, 'Detected compressed input data. Extracting!'))
break
for extract_target in target_files:
unzipd = subprocess.Popen(['gzip', '-q', '-f', '-d', extract_target], stderr=subprocess.PIPE)
unzipd.wait()
return True
def sequence_pairings(data_path, instance_rundir):
##
## Get input files from data path
## Sort so that ordering isn't screwy on linux
input_files = glob.glob(os.path.join(data_path, '*'))
sorted_input = sorted(input_files)
sequence_pairs = []
file_count = len(sorted_input)
if not file_count % 2 == 0:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'I/O: Non-even number of input files specified. Cannot continue without pairing!'))
sys.exit(2)
##
## Optimise so code isn't recycled
for i in range(0, len(sorted_input), 2):
file_pair = {}
forward_data = sorted_input[i]
reverse_data = sorted_input[i+1]
##
## Check forward ends with R1
forward_data_name = sorted_input[i].split('/')[-1].split('.')[0]
if not forward_data_name.endswith('_R1'):
log.error('{}{}{}{}{}'.format(Colour.red,'shd__ ',Colour.end,'I/O: Forward input file does not end in _R1. ', forward_data))
sys.exit(2)
##
## Check reverse ends with R2
reverse_data_name = sorted_input[i+1].split('/')[-1].split('.')[0]
if not reverse_data_name.endswith('_R2'):
log.error('{}{}{}{}{}'.format(Colour.red,'shd__ ',Colour.end,'I/O: Reverse input file does not end in _R2. ', reverse_data))
sys.exit(2)
##
## Make Stage outputs for use in everywhere else in pipeline
sample_root = '_'.join(forward_data_name.split('_')[:-1])
instance_path = os.path.join(instance_rundir)
seq_qc_path = os.path.join(instance_rundir, sample_root, 'SeqQC')
align_path = os.path.join(instance_rundir, sample_root, 'Align')
predict_path = os.path.join(instance_rundir, sample_root, 'Predict')
file_pair[sample_root] = [forward_data, reverse_data, instance_path, seq_qc_path, align_path, predict_path]
sequence_pairs.append(file_pair)
return sequence_pairs
def filesystem_exists_check(path, raise_exception=True):
"""
Checks to see if the path, specified by parameter path, exists. Can be either a directory or file.
If the path exists, True is returned. If the path does not exist, and raise_exception is set to True,
an IOError is raised - else False is returned.
"""
if os.path.lexists(path):
return True
if raise_exception:
log.error('{}{}{}{}'.format(Colour.red,'shd__ ',Colour.end,'Specified input path could not be found.'))
return False
def check_input_files(input_format, input_file):
if input_file.endswith(input_format):
return True
return False
def initialise_libraries(instance_params):
trigger = False
##
## Subfunction for recycling code
## Calls UNIX type for checking binaries present
## Changed from WHICH as apparently type functions over different shells/config files
def type_func(binary):
binary_result = []
binary_string = 'type {}'.format(binary)
binary_subprocess = subprocess.Popen([binary_string], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
binary_result = binary_subprocess.communicate()
binary_subprocess.wait()
if 'not found'.encode() in binary_result[0] or binary_result[1]:
log.critical('{}{}{}{}{}{}'.format(Colour.red,'shd__ ',Colour.end,'Missing binary: ', binary, '!'))
raise NameError
##
## To determine which binaries to check for
## AttributeError in the situation where instance_params origin differs
## try for -c style, except AttributeError for -b style
try:
quality_control = instance_params.config_dict['instance_flags']['@quality_control']
alignment = instance_params.config_dict['instance_flags']['@sequence_alignment']
genotyping = instance_params.config_dict['instance_flags']['@genotype_prediction']
snp_calling = instance_params.config_dict['instance_flags']['@snp_calling']
except AttributeError:
quality_control = instance_params['quality_control']
alignment = instance_params['sequence_alignment']
genotyping = instance_params['genotype_prediction']
snp_calling = instance_params['snp_calling']
if quality_control == 'True':
try:type_func('java')
except NameError: trigger=True
try:type_func('fastqc')
except NameError: trigger=True
try:type_func('cutadapt')
except NameError: trigger=True
if alignment == 'True':
try:type_func('seqtk')
except NameError: trigger=True
try:type_func('bwa')
except NameError: trigger=True
try:type_func('samtools')
except NameError: trigger=True
try:type_func('generatr')
except NameError: trigger=True
if genotyping == 'True':
try:type_func('samtools')
except NameError: trigger=True
try:type_func('generatr')
except NameError: trigger=True
if snp_calling == 'True':
try: type_func('picard')
except NameError: trigger=True
try: type_func('freebayes')
except NameError: trigger=True
return trigger
def sanitise_outputs(jobname, output_argument):
run_dir = ''
output_root = output_argument[0]
if jobname:
target_output = os.path.join(output_root, jobname)
if not os.path.exists(target_output):
log.info('{}{}{}{}{}'.format(Colour.bold, 'shd__ ', Colour.end, 'Creating Output with prefix: ', jobname))
run_dir = os.path.join(output_root, jobname)
mkdir_p(run_dir)
else:
purge_choice = ''
while True:
purge_choice = input('{}{}{}{}'.format(Colour.bold, 'shd__ ', Colour.end, 'Job folder already exists. Delete existing folder? Y/N: '))
if not (purge_choice.lower() == 'y') and not (purge_choice.lower() == 'n'):
log.info('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'Invalid input. Please input Y or N.'))
continue
else:
break
if purge_choice.lower() == 'y':
log.info('{}{}{}{}{}'.format(Colour.bold, 'shd__ ', Colour.end, 'Clearing pre-existing Jobname Prefix: ', jobname))
run_dir = os.path.join(output_root, jobname)
if os.path.exists(run_dir):
shutil.rmtree(run_dir, ignore_errors=True)
mkdir_p(run_dir)
else:
raise Exception('User chose not to delete pre-existing Job folder. Cannot write output.')
else:
## Ensures root output is a real directory
## Generates folder name based on date (for run ident)
date = datetime.date.today().strftime('%d-%m-%Y')
walltime = datetime.datetime.now().strftime('%H%M%S')
today = date + '-' + walltime
## If the user specified root doesn't exist, make it
## Then make the run directory for datetime
if not os.path.exists(output_root):
log.info('{}{}{}{}'.format(Colour.bold, 'shd__ ', Colour.end, 'Creating output root... '))
mkdir_p(output_root)
run_dir = os.path.join(output_root, 'ScaleHDRun_'+today)
log.info('{}{}{}{}'.format(Colour.bold, 'shd__ ', Colour.end, 'Creating instance run directory.. '))
mkdir_p(run_dir)
## Inform user it's all gonna be okaaaayyyy
log.info('{}{}{}{}'.format(Colour.green, 'shd__ ', Colour.end, 'Output directories OK!'))
return run_dir
def replace_fqfile(mutate_list, target_fqfile, altered_path):
if target_fqfile in mutate_list:
loc = mutate_list.index(target_fqfile)
mutate_list[loc] = altered_path
return mutate_list
def scrape_summary_data(stage, input_report_file):
##
## If the argument input_report_file is from trimming..
if stage == 'trim':
with open(input_report_file, 'r') as trpf:
trim_lines = trpf.readlines()
##
## Determine buffer size to slice from above array
scraping_buffer = 8
if '-q' in trim_lines[1]:
scraping_buffer += 1
##
## Get Anchor
summary_start = 0
for i in range(0, len(trim_lines)):
if '== Summary ==' in trim_lines[i]:
summary_start = i
##
## Slice and close
summary_data = trim_lines[summary_start:summary_start + scraping_buffer]
trpf.close()
return summary_data[2:]
##
## If the argument input_report_file is from alignment..
if stage == 'align':
with open(input_report_file, 'r') as alnrpf:
align_lines = alnrpf.readlines()
alnrpf.close()
##
## No ranges required, only skip first line
return align_lines[1:]
##
## No need to tidy up report for genotyping
## since we already have the data from our own objects
if stage == 'gtype':
pass
def generate_atypical_xml(label, allele_object, index_path, direction):
"""
:param allele_object:
:param index_path:
:return:
"""
##TODO docstring
atypical_path = os.path.join(index_path, '{}{}_{}.xml'.format(direction, label, allele_object.get_reflabel()))
fp_flank = 'GCGACCCTGGAAAAGCTGATGAAGGCCTTCGAGTCCCTCAAGTCCTTC'
cagstart = ''; cagend = ''
intv = allele_object.get_intervening()
ccgstart = ''; ccgend = ''
ccglen = allele_object.get_ccg()
cctlen = allele_object.get_cct()
tp_flank = 'CAGCTTCCTCAGCCGCCGCCGCAGGCACAGCCGCTGCT'
if direction == 'fw':
cagstart = '1'; cagend = '200'
ccgstart = '1'; ccgend = '20'
if direction == 'rv':
cagstart = '100'; cagend = '100'
ccgstart = '1'; ccgend = '20'
##
## Create XML
data_root = etree.Element('data')
loci_root = etree.Element('loci', label=allele_object.get_reflabel()); data_root.append(loci_root)
##
## Loci Nodes
fp_input = etree.Element('input', type='fiveprime', flank=fp_flank)
cag_region = etree.Element('input', type='repeat_region', order='1', unit='CAG', start=cagstart, end=cagend)
intervening = etree.Element('input', type='intervening', sequence=intv, prior='1')
ccg_region = etree.Element('input', type='repeat_region', order='2', unit='CCG', start=ccgstart, end=ccgend)
cct_region = etree.Element('input', type='repeat_region', order='3', unit='CCT', start=str(cctlen), end=str(cctlen))
tp_input = etree.Element('input', type='threeprime', flank=tp_flank)
for node in [fp_input, cag_region, intervening, ccg_region, cct_region, tp_input]:
loci_root.append(node)
s = etree.tostring(data_root, pretty_print=True)
with open(atypical_path, 'w') as xmlfi:
xmlfi.write(s.decode())
xmlfi.close()
return atypical_path
def generate_reference(input_xml, index_path, ref_indexes, direction):
##TODO docstring
label = input_xml.split('/')[-1].split('.')[0]
target_output = os.path.join(index_path, label + '.fa')
temp_output = os.path.join(index_path, label + '_concat.fa')
gen_process = subprocess.Popen(['generatr', '-i', input_xml, '-o', target_output], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gen_process.wait()
##
## Join typical and atypical reference into one file
if direction == 'fw':
toutfi = open(temp_output, 'w')
cat_process = subprocess.Popen(['cat', target_output, ref_indexes[0]], stdout=toutfi, stderr=subprocess.PIPE)
cat_process.wait()
toutfi.close()
target_output = temp_output
return target_output
def seek_target(input_list, target):
for i in range(0, len(input_list)):
if target in input_list[i]:
return i
def sanitise_trimming_output(input_object, input_list):
if type(input_object) is int:
cleanse_target = input_list[input_object].split(':')[1].lstrip().rstrip()
return cleanse_target
else:
return '*'
def sanitise_alignment_output(input_object, input_list, stage):
if type(input_object) is int:
if stage == 3:
cleanse_target = input_list[input_object].lstrip().rstrip().split(' ')[0:1]
return ''.join(cleanse_target)
else:
cleanse_target = input_list[input_object].lstrip().rstrip().split(' ')[0:2]
return ' '.join(cleanse_target)
else:
return '*'
def mkdir_p(path):
try: os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path): pass
else: raise
| 38.277301
| 155
| 0.702526
| 4,278
| 31,196
| 4.923796
| 0.150538
| 0.034751
| 0.034751
| 0.045291
| 0.414309
| 0.34775
| 0.271221
| 0.236137
| 0.198633
| 0.187476
| 0
| 0.006152
| 0.150724
| 31,196
| 814
| 156
| 38.324324
| 0.788896
| 0.112803
| 0
| 0.220826
| 0
| 0
| 0.237697
| 0.013581
| 0
| 0
| 0
| 0.001229
| 0
| 1
| 0.048474
| false
| 0.005386
| 0.032316
| 0
| 0.150808
| 0.003591
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc6de8ef28a6c9ca4fc7727dee2d21bb765f85a1
| 1,585
|
py
|
Python
|
scripts/json_parse.py
|
andrewsimonds14/Capstone
|
5ae56b9be40846e9993a8f23aaa8e1ef92cd9ea3
|
[
"MIT"
] | null | null | null |
scripts/json_parse.py
|
andrewsimonds14/Capstone
|
5ae56b9be40846e9993a8f23aaa8e1ef92cd9ea3
|
[
"MIT"
] | null | null | null |
scripts/json_parse.py
|
andrewsimonds14/Capstone
|
5ae56b9be40846e9993a8f23aaa8e1ef92cd9ea3
|
[
"MIT"
] | null | null | null |
import json
import os
import nibabel as nib
import csv
from operator import itemgetter
# PATH TO PREPROCESSED DATA
raw_data_path = '/home/lab/nnUNet_data/nnUNet_raw_data_base/nnUNet_raw_data/Task500_BrainMets'
pixdim_ind = [1,2,3] # Indexes at which the voxel size [x,y,z] is stored
# PATH TO JSON FILE
with open('/home/lab/nnUNet_data/RESULTS_FOLDER/nnUNet/3d_fullres/Task500_BrainMets/nnUNetTrainerV2__nnUNetPlansv2.1/fold_4/validation_raw/summary.json') as file:
data = json.load(file)
with open('json_parsed.csv', mode='w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['Case Number', 'Dice Score', 'Voxel Size-X', 'Voxel Size-Y', 'Voxel Size-Z'])
for img in data['results']['all']:
# Get dice score on image
dice = img['1']['Dice']
# Get nifti data on image
img_filename = (os.path.basename(img['reference']).split('.'))[0]
img_ni = nib.load(raw_data_path + '/imagesTr/' + img_filename + '_0000.nii.gz')
label_ni = nib.load(raw_data_path + '/labelsTr/' + img_filename + '.nii.gz')
voxel_size = itemgetter(*pixdim_ind)(img_ni.header["pixdim"])
# Get tumor dimensions
# tumor_size =
# Get case number corresponding to image
case_number = img_filename.split('_')[1]
# Write to csv file
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow([case_number, dice, voxel_size[0], voxel_size[1], voxel_size[2]])
| 36.860465
| 162
| 0.683281
| 231
| 1,585
| 4.47619
| 0.38961
| 0.069633
| 0.046422
| 0.032882
| 0.237911
| 0.237911
| 0.199226
| 0.199226
| 0.199226
| 0.199226
| 0
| 0.018462
| 0.179811
| 1,585
| 42
| 163
| 37.738095
| 0.776923
| 0.147003
| 0
| 0.095238
| 0
| 0.047619
| 0.271439
| 0.161074
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.238095
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc6e2a6ace5b77db9c88569ae6f6456c11dc1f48
| 21,122
|
py
|
Python
|
DeepBrainSeg/tumor/Tester.py
|
JordanMicahBennett/DeepBrainSeg
|
659dd439d20d4c024fe337874eadb90deffc40a4
|
[
"MIT"
] | 1
|
2021-01-01T18:06:50.000Z
|
2021-01-01T18:06:50.000Z
|
DeepBrainSeg/tumor/Tester.py
|
JordanMicahBennett/DeepBrainSeg
|
659dd439d20d4c024fe337874eadb90deffc40a4
|
[
"MIT"
] | null | null | null |
DeepBrainSeg/tumor/Tester.py
|
JordanMicahBennett/DeepBrainSeg
|
659dd439d20d4c024fe337874eadb90deffc40a4
|
[
"MIT"
] | 1
|
2021-01-01T18:06:52.000Z
|
2021-01-01T18:06:52.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# author: Avinash Kori
# contact: koriavinash1@gmail.com
import torch
import SimpleITK as sitk
import numpy as np
import nibabel as nib
from torch.autograd import Variable
from skimage.transform import resize
from torchvision import transforms
from time import gmtime, strftime
from tqdm import tqdm
import pdb
import os
from ..helpers.helper import *
from os.path import expanduser
home = expanduser("~")
#========================================================================================
# prediction functions.....................
bin_path = os.path.join('/opt/ANTs/bin/')
class tumorSeg():
"""
class performs segmentation for a given sequence of patient data.
to main platform for segmentation mask estimation
one for the patient data in brats format
other with any random format
step followed for in estimation of segmentation mask
1. ABLnet for reducing false positives outside the brain
Air Brain Lesson model (2D model, 103 layered)
2. BNet3Dnet 3D network for inner class classification
Dual Path way network
3. MNet2D 57 layered convolutional network for inner class
classification
4. Tir3Dnet 57 layered 3D convolutional network for inner class
classification
more on training details and network information:
(https://link.springer.com/chapter/10.1007/978-3-030-11726-9_43<Paste>)
=========================
quick: True (just evaluates on Dual path network (BNet3D)
else copmutes an ensumble over all four networks
"""
def __init__(self,
quick = False,
ants_path = bin_path):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = "cpu"
map_location = device
#========================================================================================
ckpt_tir2D = os.path.join(home, '.DeepBrainSeg/BestModels/Tramisu_2D_FC57_best_loss.pth.tar')
ckpt_tir3D = os.path.join(home, '.DeepBrainSeg/BestModels/Tramisu_3D_FC57_best_acc.pth.tar')
ckpt_BNET3D = os.path.join(home, '.DeepBrainSeg/BestModels/BrainNet_3D_best_acc.pth.tar')
ckpt_ABL = os.path.join(home, '.DeepBrainSeg/BestModels/ABL_CE_best_model_loss_based.pth.tar')
#========================================================================================
# air brain lesion segmentation..............
from .models.modelABL import FCDenseNet103
self.ABLnclasses = 3
self.ABLnet = FCDenseNet103(n_classes = self.ABLnclasses) ## intialize the graph
saved_parms=torch.load(ckpt_ABL, map_location=map_location)
self.ABLnet.load_state_dict(saved_parms['state_dict']) ## fill the model with trained params
print ("=================================== ABLNET2D Loaded =================================")
self.ABLnet.eval()
self.ABLnet = self.ABLnet.to(device)
#========================================================================================
# Tir2D net.......................
from .models.modelTir2D import FCDenseNet57
self.Mnclasses = 4
self.MNET2D = FCDenseNet57(self.Mnclasses)
ckpt = torch.load(ckpt_tir2D, map_location=map_location)
self.MNET2D.load_state_dict(ckpt['state_dict'])
print ("=================================== MNET2D Loaded ===================================")
self.MNET2D.eval()
self.MNET2D = self.MNET2D.to(device)
#========================================================================================
if not quick:
# BrainNet3D model......................
from .models.model3DBNET import BrainNet_3D_Inception
self.B3Dnclasses = 5
self.BNET3Dnet = BrainNet_3D_Inception()
ckpt = torch.load(ckpt_BNET3D, map_location=map_location)
self.BNET3Dnet.load_state_dict(ckpt['state_dict'])
print ("=================================== KAMNET3D Loaded =================================")
self.BNET3Dnet.eval()
self.BNET3Dnet = self.BNET3Dnet.to(device)
#========================================================================================
# Tir3D model...................
from .models.modelTir3D import FCDenseNet57
self.T3Dnclasses = 5
self.Tir3Dnet = FCDenseNet57(self.T3Dnclasses)
ckpt = torch.load(ckpt_tir3D, map_location=map_location)
self.Tir3Dnet.load_state_dict(ckpt['state_dict'])
print ("================================== TIRNET2D Loaded =================================")
self.Tir3Dnet.eval()
self.Tir3Dnet = self.Tir3Dnet.to(device)
#========================================================================================
self.device = device
self.quick = quick
self.ants_path = ants_path
def get_ants_mask(self, t1_path):
"""
We make use of ants framework for generalized skull stripping
t1_path: t1 volume path (str)
saves the mask in the same location as t1 data directory
returns: maskvolume (numpy uint8 type)
"""
mask_path = os.path.join(os.path.dirname(t1_path), 'mask.nii.gz')
os.system(self.ants_path +'ImageMath 3 '+ mask_path +' Normalize '+ t1_path)
os.system(self.ants_path +'ThresholdImage 3 '+ mask_path +' '+ mask_path +' 0.01 1')
os.system(self.ants_path +'ImageMath 3 '+ mask_path +' MD '+ mask_path +' 1')
os.system(self.ants_path +'ImageMath 3 '+ mask_path +' ME '+ mask_path +' 1')
os.system(self.ants_path +'CopyImageHeaderInformation '+ t1_path+' '+ mask_path +' '+ mask_path +' 1 1 1')
mask = np.uint8(nib.load(mask_path).get_data())
return mask
def get_localization(self, t1_v, t1c_v, t2_v, flair_v, brain_mask):
"""
ABLnetwork output, finds the brain, Whole tumor region
t1_v = t1 volume (numpy array)
t1c_v = t1c volume (numpy array)
t2_v = t2 volume (numpy array)
flair_v = flair volume (numpy array)
brain_mask = brain, whole tumor mask (numpy array, output of ANTs pieline)
"""
t1_v = normalize(t1_v, brain_mask)
t1c_v = normalize(t1c_v, brain_mask)
t2_v = normalize(t2_v, brain_mask)
flair_v = normalize(flair_v, brain_mask)
generated_output_logits = np.empty((self.ABLnclasses, flair_v.shape[0],flair_v.shape[1],flair_v.shape[2]))
for slices in tqdm(range(flair_v.shape[2])):
flair_slice = np.transpose(flair_v[:,:,slices])
t2_slice = np.transpose(t2_v[:,:,slices])
t1ce_slice = np.transpose(t1c_v[:,:,slices])
t1_slice = np.transpose(t1_v[:,:,slices])
array = np.zeros((flair_slice.shape[0],flair_slice.shape[1],4))
array[:,:,0] = flair_slice
array[:,:,1] = t2_slice
array[:,:,2] = t1ce_slice
array[:,:,3] = t1_slice
transformed_array = torch.from_numpy(convert_image(array)).float()
transformed_array = transformed_array.unsqueeze(0) ## neccessary if batch size == 1
transformed_array = transformed_array.to(self.device)
logits = self.ABLnet(transformed_array).detach().cpu().numpy()# 3 x 240 x 240
generated_output_logits[:,:,:, slices] = logits.transpose(0, 1, 3, 2)
final_pred = apply_argmax_to_logits(generated_output_logits)
final_pred = perform_postprocessing(final_pred)
final_pred = adjust_classes_air_brain_tumour(np.uint8(final_pred))
return np.uint8(final_pred)
def inner_class_classification_with_logits_NCube(self, t1,
t1ce, t2, flair,
brain_mask, mask, N = 64):
"""
output of 3D tiramisu model (tir3Dnet)
mask = numpy array output of ABLnet
N = patch size during inference
"""
t1 = normalize(t1, brain_mask)
t1ce = normalize(t1ce, brain_mask)
t2 = normalize(t2, brain_mask)
flair = normalize(flair, brain_mask)
shape = t1.shape # to exclude batch_size
final_prediction = np.zeros((self.T3Dnclasses, shape[0], shape[1], shape[2]))
x_min, x_max, y_min, y_max, z_min, z_max = bbox(mask, pad = N)
x_min, x_max, y_min, y_max, z_min, z_max = x_min, min(shape[0] - N, x_max), y_min, min(shape[1] - N, y_max), z_min, min(shape[2] - N, z_max)
with torch.no_grad():
for x in tqdm(range(x_min, x_max, N//2)):
for y in range(y_min, y_max, N//2):
for z in range(z_min, z_max, N//2):
high = np.zeros((1, 4, N, N, N))
high[0, 0, :, :, :] = flair[x:x+N, y:y+N, z:z+N]
high[0, 1, :, :, :] = t2[x:x+N, y:y+N, z:z+N]
high[0, 2, :, :, :] = t1[x:x+N, y:y+N, z:z+N]
high[0, 3, :, :, :] = t1ce[x:x+N, y:y+N, z:z+N]
high = Variable(torch.from_numpy(high)).to(self.device).float()
pred = torch.nn.functional.softmax(self.Tir3Dnet(high).detach().cpu())
pred = pred.data.numpy()
final_prediction[:, x:x+N, y:y+N, z:z+N] = pred[0]
final_prediction = convert5class_logitsto_4class(final_prediction)
return final_prediction
def inner_class_classification_with_logits_DualPath(self, t1,
t1ce, t2, flair,
brain_mask, mask=None,
prediction_size = 9):
"""
output of BNet3D
prediction_size = mid inference patch size
"""
t1 = normalize(t1, brain_mask)
t1ce = normalize(t1ce, brain_mask)
t2 = normalize(t2, brain_mask)
flair = normalize(flair, brain_mask)
shape = t1.shape # to exclude batch_size
final_prediction = np.zeros((self.B3Dnclasses, shape[0], shape[1], shape[2]))
x_min, x_max, y_min, y_max, z_min, z_max = bbox(mask, pad = prediction_size)
# obtained by aspect ratio calculation
high_res_size = prediction_size + 16
resize_to = int(prediction_size ** 0.5) + 16
low_res_size = int(51*resize_to/19)
hl_pad = (high_res_size - prediction_size)//2
hr_pad = hl_pad + prediction_size
ll_pad = (low_res_size - prediction_size)//2
lr_pad = ll_pad + prediction_size
for x in tqdm(range(x_min, x_max - prediction_size, prediction_size)):
for y in (range(y_min, y_max - prediction_size, prediction_size)):
for z in (range(z_min, z_max - prediction_size, prediction_size)):
high = np.zeros((1, 4, high_res_size, high_res_size, high_res_size))
low = np.zeros((1, 4, low_res_size, low_res_size, low_res_size))
low1 = np.zeros((1, 4, resize_to, resize_to, resize_to))
high[0, 0], high[0, 1], high[0, 2], high[0, 3] = high[0, 0] + flair[0,0,0], high[0, 1] + t2[0,0,0], high[0, 2] + t1[0,0,0], high[0, 2] + t1ce[0,0,0]
low[0, 0], low[0, 1], low[0, 2], low[0, 3] = low[0, 0] + flair[0,0,0], low[0, 1] + t2[0,0,0], low[0, 2] + t1[0,0,0], low[0, 2] + t1ce[0,0,0]
low1[0, 0], low1[0, 1], low1[0, 2], low1[0, 3] = low1[0, 0] + flair[0,0,0], low1[0, 1] + t2[0,0,0], low1[0, 2] + t1[0,0,0], low1[0, 2] + t1ce[0,0,0]
# =========================================================================
vxf, vxt = max(0, x-hl_pad), min(shape[0], x+hr_pad)
vyf, vyt = max(0, y-hl_pad), min(shape[1], y+hr_pad)
vzf, vzt = max(0, z-hl_pad), min(shape[2], z+hr_pad)
txf, txt = max(0, hl_pad-x), max(0, hl_pad-x) + vxt - vxf
tyf, tyt = max(0, hl_pad-y), max(0, hl_pad-y) + vyt - vyf
tzf, tzt = max(0, hl_pad-z), max(0, hl_pad-z) + vzt - vzf
high[0, 0, txf:txt, tyf:tyt, tzf:tzt] = flair[vxf:vxt, vyf:vyt, vzf:vzt]
high[0, 1, txf:txt, tyf:tyt, tzf:tzt] = t2[vxf:vxt, vyf:vyt, vzf:vzt]
high[0, 2, txf:txt, tyf:tyt, tzf:tzt] = t1[vxf:vxt, vyf:vyt, vzf:vzt]
high[0, 3, txf:txt, tyf:tyt, tzf:tzt] = t1ce[vxf:vxt, vyf:vyt, vzf:vzt]
# =========================================================================
vxf, vxt = max(0, x-ll_pad), min(shape[0], x+lr_pad)
vyf, vyt = max(0, y-ll_pad), min(shape[1], y+lr_pad)
vzf, vzt = max(0, z-ll_pad), min(shape[2], z+lr_pad)
txf, txt = max(0, ll_pad-x), max(0, ll_pad-x) + vxt - vxf
tyf, tyt = max(0, ll_pad-y), max(0, ll_pad-y) + vyt - vyf
tzf, tzt = max(0, ll_pad-z), max(0, ll_pad-z) + vzt - vzf
low[0, 0, txf:txt, tyf:tyt, tzf:tzt] = flair[vxf:vxt, vyf:vyt, vzf:vzt]
low[0, 1, txf:txt, tyf:tyt, tzf:tzt] = t2[vxf:vxt, vyf:vyt, vzf:vzt]
low[0, 2, txf:txt, tyf:tyt, tzf:tzt] = t1[vxf:vxt, vyf:vyt, vzf:vzt]
low[0, 3, txf:txt, tyf:tyt, tzf:tzt] = t1ce[vxf:vxt, vyf:vyt, vzf:vzt]
# =========================================================================
low1[0] = [resize(low[0, i, :, :, :], (resize_to, resize_to, resize_to)) for i in range(4)]
high = Variable(torch.from_numpy(high)).to(self.device).float()
low1 = Variable(torch.from_numpy(low1)).to(self.device).float()
pred = torch.nn.functional.softmax(self.BNET3Dnet(high, low1, pred_size=prediction_size).detach().cpu())
pred = pred.numpy()
final_prediction[:, x:x+prediction_size, y:y+prediction_size, z:z+prediction_size] = pred[0]
final_prediction = convert5class_logitsto_4class(final_prediction)
return final_prediction
def inner_class_classification_with_logits_2D(self,
t1ce_volume,
t2_volume,
flair_volume):
"""
output of 2D tiramisu model (MNet)
"""
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
transformList = []
transformList.append(transforms.ToTensor())
transformList.append(normalize)
transformSequence=transforms.Compose(transformList)
generated_output = np.empty((self.Mnclasses,flair_volume.shape[0],flair_volume.shape[1],flair_volume.shape[2]))
for slices in tqdm(range(flair_volume.shape[2])):
flair_slice = scale_every_slice_between_0_to_255(np.transpose(flair_volume[:,:,slices]))
t2_slice = scale_every_slice_between_0_to_255(np.transpose(t2_volume[:,:,slices]))
t1ce_slice = scale_every_slice_between_0_to_255(np.transpose(t1ce_volume[:,:,slices]))
array = np.zeros((flair_slice.shape[0],flair_slice.shape[1],3))
array[:,:,0] = flair_slice
array[:,:,1] = t2_slice
array[:,:,2] = t1ce_slice
array = np.uint8(array)
transformed_array = transformSequence(array)
transformed_array = transformed_array.unsqueeze(0)
transformed_array = transformed_array.to(self.device)
outs = torch.nn.functional.softmax(self.MNET2D(transformed_array).detach().cpu()).numpy()
outs = np.swapaxes(generated_output,1, 2)
return outs
def get_segmentation(self,
t1_path,
t2_path,
t1ce_path,
flair_path,
save_path = None):
"""
Generates segmentation for the data not in brats format
if save_path provided function saves the prediction with
DeepBrainSeg_Prediction.nii.qz name in the provided
directory
returns: segmentation mask
"""
t1 = nib.load(t1_path).get_data()
t2 = nib.load(t2_path).get_data()
t1ce = nib.load(t1ce_path).get_data()
flair = nib.load(flair_path).get_data()
affine = nib.load(flair_path).affine
brain_mask = self.get_ants_mask(t2_path)
mask = self.get_localization(t1, t1ce, t2, flair, brain_mask)
# mask = np.swapaxes(mask,1, 0)
if not self.quick:
final_predictionTir3D_logits = self.inner_class_classification_with_logits_NCube(t1, t1ce, t2, flair, brain_mask, mask)
final_predictionBNET3D_logits = self.inner_class_classification_with_logits_DualPath(t1, t1ce, t2, flair, brain_mask, mask)
final_predictionMnet_logits = self.inner_class_classification_with_logits_2D(t1, t2, flair).transpose(0, 2, 1, 3)
final_prediction_array = np.array([final_predictionTir3D_logits, final_predictionBNET3D_logits, final_predictionMnet_logits])
else:
final_predictionMnet_logits = self.inner_class_classification_with_logits_2D(t1, t2, flair)
final_prediction_array = np.array([final_predictionMnet_logits])
final_prediction_logits = combine_logits_AM(final_prediction_array)
final_pred = postprocessing_pydensecrf(final_prediction_logits)
final_pred = combine_mask_prediction(mask, final_pred)
final_pred = perform_postprocessing(final_pred)
final_pred = adjust_classes(final_pred)
if save_path:
os.makedirs(save_path, exist_ok=True)
save_volume(final_pred, affine, os.path.join(save_path, 'DeepBrainSeg_Prediction'))
return final_pred
def get_segmentation_brats(self,
path,
save = True):
"""
Generates segmentation for the data in BraTs format
if save True saves the prediction in the save directory
in the patients data path
returns : segmentation mask
"""
name = path.split("/")[-1] + "_"
flair = nib.load(os.path.join(path, name + 'flair.nii.gz')).get_data()
t1 = nib.load(os.path.join(path, name + 't1.nii.gz')).get_data()
t1ce = nib.load(os.path.join(path, name + 't1ce.nii.gz')).get_data()
t2 = nib.load(os.path.join(path, name + 't2.nii.gz')).get_data()
affine= nib.load(os.path.join(path, name + 'flair.nii.gz')).affine
print ("[INFO: DeepBrainSeg] (" + strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()) + ") Working on: ", path)
brain_mask = self.get_ants_mask(os.path.join(path, name + 't2.nii.gz'))
# brain_mask = get_brain_mask(t1)
mask = self.get_localization(t1, t1ce, t2, flair, brain_mask)
mask = np.swapaxes(mask,1, 0)
if not self.quick:
final_predictionTir3D_logits = self.inner_class_classification_with_logits_NCube(t1, t1ce, t2, flair, brain_mask, mask)
final_predictionBNET3D_logits = self.inner_class_classification_with_logits_DualPath(t1, t1ce, t2, flair, brain_mask, mask)
final_predictionMnet_logits = self.inner_class_classification_with_logits_2D(t1, t2, flair)
final_prediction_array = np.array([final_predictionTir3D_logits, final_predictionBNET3D_logits, final_predictionMnet_logits])
else:
final_predictionMnet_logits = self.inner_class_classification_with_logits_2D(t1, t2, flair)
final_prediction_array = np.array([final_predictionMnet_logits])
final_prediction_logits = combine_logits_AM(final_prediction_array)
final_pred = postprocessing_pydensecrf(final_prediction_logits)
final_pred = combine_mask_prediction(mask, final_pred)
final_pred = perform_postprocessing(final_pred)
final_pred = adjust_classes(final_pred)
if save:
save_volume(final_pred, affine, os.path.join(path, 'DeepBrainSeg_Prediction'))
return final_pred
# ========================================================================================
if __name__ == '__main__':
ext = deepSeg(True)
ext.get_segmentation_brats('../../sample_volume/Brats18_CBICA_AVG_1/')
| 46.937778
| 168
| 0.548717
| 2,598
| 21,122
| 4.250577
| 0.142802
| 0.005977
| 0.012678
| 0.027891
| 0.540705
| 0.45676
| 0.386489
| 0.357149
| 0.315856
| 0.307435
| 0
| 0.035783
| 0.28288
| 21,122
| 449
| 169
| 47.042316
| 0.693273
| 0.163431
| 0
| 0.220532
| 0
| 0
| 0.056598
| 0.035352
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030418
| false
| 0
| 0.064639
| 0
| 0.125475
| 0.019011
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc71cbe7f13c2f51663de7c1b18572924543ac36
| 26,868
|
py
|
Python
|
bkt/library/powerpoint/elements.py
|
pyro-team/bkt-toolbox
|
bbccba142a81ca0a46056f2bcda75899979158a5
|
[
"MIT"
] | 12
|
2019-05-31T02:57:26.000Z
|
2022-03-26T09:40:50.000Z
|
bkt/library/powerpoint/elements.py
|
mrflory/bkt-toolbox
|
bbccba142a81ca0a46056f2bcda75899979158a5
|
[
"MIT"
] | 27
|
2021-11-27T16:33:19.000Z
|
2022-03-27T17:47:26.000Z
|
bkt/library/powerpoint/elements.py
|
pyro-team/bkt-toolbox
|
bbccba142a81ca0a46056f2bcda75899979158a5
|
[
"MIT"
] | 3
|
2019-06-12T10:59:20.000Z
|
2020-04-21T15:13:50.000Z
|
# -*- coding: utf-8 -*-
'''
Created on 02.11.2017
@author: fstallmann
'''
from __future__ import absolute_import
from collections import deque
import bkt
from bkt import dotnet
Drawing = dotnet.import_drawing()
from . import helpers as pplib
class TextframeSpinnerBox(bkt.ribbon.RoundingSpinnerBox):
### Instance initialization
attr = 'MarginTop'
def __init__(self, **kwargs):
'''
attr examples: MarginTop, MarginBottom, MarginLeft, MarginRight
'''
#self.attr is automatically set through RibbonControl attribute handling
self.fallback_value = 0
my_kwargs = dict(
size_string = '###',
round_cm = True,
convert = 'pt_to_cm',
get_enabled = bkt.apps.ppt_selection_contains_textframe,
)
my_kwargs.update(kwargs)
super(TextframeSpinnerBox, self).__init__(**my_kwargs)
### Spinner Box callbacks ###
def get_text(self, shapes, selection):
value = self.get_attr_from_shapes(shapes, selection)
if value is None: #e.g. no textframe detected
return None
elif int(value) == -2147483648: #replace large negative number (values differ between selected items) with fallback value
return self.fallback_value
else:
return value
def on_change(self, shapes, selection, value):
self.set_attr_for_shapes(shapes, selection, value)
### Getter Methods ###
def get_attr_from_shapes(self, shapes, selection):
'''
Get attr for shapes
'''
for textframe in pplib.iterate_shape_textframes(shapes):
try:
return self.get_attr_from_textframe(textframe)
except:
# produces error for certain chart types, e.g. Treemap
continue
return None
def get_attr_from_textframe(self, textframe):
return getattr(textframe, self.attr)
### Setter methods ###
def set_attr_for_shapes(self, shapes, selection, value):
'''
Set attr for shapes
'''
value = max(0,value)
for textframe in pplib.iterate_shape_textframes(shapes):
self.set_attr_for_textframe(textframe, value)
def set_attr_for_textframe(self, textframe, value):
setattr(textframe, self.attr, value)
class ParagraphFormatSpinnerBox(bkt.ribbon.RoundingSpinnerBox):
### Instance initialization
attr = 'SpaceBefore'
def __init__(self, **kwargs):
'''
attr examples: SpaceBefore, SpaceAfter, LeftIndent, FirstLineIndent, LineSpacing
'''
#self.attr is automatically set through RibbonControl attribute handling
self.fallback_value = 0
my_kwargs = dict(
size_string = '-###',
get_enabled = bkt.apps.ppt_selection_contains_textframe,
)
if self.attr in ["SpaceBefore", "SpaceAfter", "SpaceWithin"]:
my_kwargs["round_pt"] = True
else:
my_kwargs["round_cm"] = True
my_kwargs["convert"] = "pt_to_cm"
if self.attr in ["LeftIndent", "FirstLineIndent"]:
my_kwargs["big_step"] = 0.25
my_kwargs["small_step"] = 0.125
my_kwargs["rounding_factor"] = 0.125
my_kwargs.update(kwargs)
super(ParagraphFormatSpinnerBox, self).__init__(**my_kwargs)
### Spinner Box callbacks ###
def get_text(self, shapes, selection):
value = self.get_attr_from_shapes(shapes, selection)
if value is None: #e.g. no textframe detected
return None
elif int(value) == -2147483648: #replace large negative number (values differ between selected items) with fallback value
return self.fallback_value
else:
return value
def on_change(self, shapes, selection, value):
self.set_attr_for_shapes(shapes, selection, value)
### Getter Methods ###
def get_attr_from_shapes(self, shapes, selection):
if selection.Type == 3:
# text selected
try:
# produces error if no text is selected
return self._get_attr(selection.TextRange2.Paragraphs(1,1).ParagraphFormat)
except:
try:
# produces error if there is no textrange, e.g. selection within a chart
return self._get_attr(selection.TextRange2.ParagraphFormat)
except:
return None
else:
# shapes selected
for textframe in pplib.iterate_shape_textframes(shapes):
try:
value = self.get_attr_from_textrange(textframe.TextRange)
except:
# produces error for certain chart types, e.g. Treemap
continue
try:
if int(value) == -2147483648: #different values for each paragraph, so get value from first paragraph
value = self._get_attr(textframe.TextRange.Paragraphs(1,1).ParagraphFormat)
except:
pass
return value
return None
def get_attr_from_textrange(self, textrange):
return self._get_attr(textrange.ParagraphFormat)
def _get_attr(self, par_format):
if self.attr in ["SpaceBefore", "SpaceAfter", "SpaceWithin"]:
if (self.attr == "SpaceBefore" and par_format.LineRuleBefore == 0) or (self.attr == "SpaceAfter" and par_format.LineRuleAfter == 0) or (self.attr == "SpaceWithin" and par_format.LineRuleWithin == 0):
self.huge_step = 10
self.big_step = 3
self.small_step = 1
self.round_at = 0
else:
self.huge_step = 0.5
self.big_step = 0.2
self.small_step = 0.1
self.round_at = 1
return getattr(par_format, self.attr)
### Setter methods ###
def set_attr_for_shapes(self, shapes, selection, value):
if self.attr != "FirstLineIndent": #FirstLineIndent can be negative!
value = max(0,value)
if selection.Type == 3:
# text selected
self.set_attr_for_textrange(selection.TextRange2, value) #need to use TextRange2 as TextRange does not contain LeftIndent, etc.
else:
for textframe in pplib.iterate_shape_textframes(shapes):
self.set_attr_for_textrange(textframe.TextRange, value)
def set_attr_for_textrange(self, textrange, value): #using textrange instead of textframe!
if self.attr == "SpaceBefore" and textrange.ParagraphFormat.LineRuleBefore == -2: #if values differ, set the same value as in the first paragraph
textrange.ParagraphFormat.LineRuleBefore = textrange.Paragraphs(1,1).ParagraphFormat.LineRuleBefore
if self.attr == "SpaceAfter" and textrange.ParagraphFormat.LineRuleAfter == -2: #if values differ, set the same value as in the first paragraph
textrange.ParagraphFormat.LineRuleAfter = textrange.Paragraphs(1,1).ParagraphFormat.LineRuleAfter
if self.attr == "SpaceWithin" and textrange.ParagraphFormat.LineRuleWithin == -2: #if values differ, set the same value as in the first paragraph
textrange.ParagraphFormat.LineRuleWithin = textrange.Paragraphs(1,1).ParagraphFormat.LineRuleWithin
setattr(textrange.ParagraphFormat, self.attr, value)
class PPTSymbolsSettings(object):
recent_symbols = deque(bkt.settings.get("bkt.symbols.recent_symbols", []), maxlen=3)
convert_into_shape = bkt.settings.get("bkt.symbols.convert_into_shape", True) #always convert newly inserted symbols into shapes
convert_into_bitmap = bkt.settings.get("bkt.symbols.convert_into_bitmap", False) #always convert newly inserted symbols into bitmap picture
unicode_font = bkt.settings.get("bkt.symbols.unicode_font", None) #insert unicode characters as symbol with special font (e.g. Arial Unicode)
@classmethod
def add_to_recent(cls, item):
try:
#try to remove if already exists and add to beginning
cls.recent_symbols.remove(item)
cls.recent_symbols.append(item)
except ValueError:
cls.recent_symbols.append(item)
bkt.settings["bkt.symbols.recent_symbols"] = cls.recent_symbols
@classmethod
def switch_unicode_font(cls, font=None):
cls.unicode_font = font #if font else SymbolsGallery.fallback_font
bkt.settings["bkt.symbols.unicode_font"] = cls.unicode_font
@classmethod
def convert_into_text(cls):
return not (cls.convert_into_shape or cls.convert_into_bitmap)
@classmethod
def switch_convert_into_text(cls, pressed):
cls.convert_into_shape = False
cls.convert_into_bitmap = False
bkt.settings["bkt.symbols.convert_into_shape"] = cls.convert_into_shape
bkt.settings["bkt.symbols.convert_into_bitmap"] = cls.convert_into_bitmap
@classmethod
def switch_convert_into_shape(cls, pressed):
cls.convert_into_shape = pressed
cls.convert_into_bitmap = False
bkt.settings["bkt.symbols.convert_into_shape"] = cls.convert_into_shape
bkt.settings["bkt.symbols.convert_into_bitmap"] = cls.convert_into_bitmap
@classmethod
def get_convert_into_shape(cls):
return (cls.convert_into_shape or bkt.get_key_state(bkt.KeyCodes.SHIFT)) and not bkt.get_key_state(bkt.KeyCodes.CTRL)
@classmethod
def switch_convert_into_bitmap(cls, pressed):
cls.convert_into_shape = False
cls.convert_into_bitmap = pressed
bkt.settings["bkt.symbols.convert_into_shape"] = cls.convert_into_shape
bkt.settings["bkt.symbols.convert_into_bitmap"] = cls.convert_into_bitmap
@classmethod
def get_convert_into_bitmap(cls):
return (cls.convert_into_bitmap or bkt.get_key_state(bkt.KeyCodes.CTRL)) and not bkt.get_key_state(bkt.KeyCodes.SHIFT)
class PPTSymbolsGallery(bkt.ribbon.SymbolsGallery):
@property
def fallback_font(self):
return PPTSymbolsSettings.unicode_font or bkt.ribbon.SymbolsGallery.fallback_font
def on_action_indexed(self, selected_item, index, context, selection, **kwargs):
''' create numberd shape according of settings in clicked element '''
item = self.symbols[index]
self._add_to_recent(item)
shift_or_ctrl = bkt.get_key_state(bkt.KeyCodes.CTRL) or bkt.get_key_state(bkt.KeyCodes.SHIFT)
if selection.Type == 3 and not shift_or_ctrl: #text selected
selection.TextRange2.Text = "" #remove selected text first and then insert symbol
self.insert_symbol_into_text(selection.TextRange2, item)
elif PPTSymbolsSettings.convert_into_text() and selection.Type == 2 and not shift_or_ctrl: #shapes selected
self.insert_symbol_into_shapes(pplib.get_shapes_from_selection(selection), item)
else: #convert into shape or bitmap
if PPTSymbolsSettings.get_convert_into_bitmap():
self.create_symbol_bitmap(selection.SlideRange(1), item)
else:
self.create_symbol_shape(selection.SlideRange(1), item)
def _add_to_recent(self, item):
PPTSymbolsSettings.add_to_recent(item)
def insert_symbol_into_text(self, textrange, item):
if item[0] or PPTSymbolsSettings.unicode_font is not None: #font name is given, then insert as symbol
font = item[0] or self.fallback_font
try:
char_number = ord(item[1]) #ord does not work for higher level unicode, e.g. emojis, and throws TypeError
if char_number > 61695: #for higher numbers (f0ff works, f100 doesnt work) InsertSymbol does not work anymore. Also the default ppt symbol-picker only shows unicode chars til f0ff.
raise TypeError("character number to large for InsertSymbol") #fallback to InsertAfter
placeholder_char = textrange.InsertAfter("X") #append placeholder symbol so that InsertSymbol behaves the same as InsertAfter
return placeholder_char.InsertSymbol(font, char_number, -1) #symbol: FontName, CharNumber (decimal), Unicode=True
except TypeError:
char_inserted = textrange.InsertAfter(item[1]) #append symbol text
#so, NameFarEast and NameComplexScript should be writable, but they are not if InsertSymbol is used before (it remains the font of the symbol). only way to replace these values and correctly show icon is setting it to '+mn-..'
char_inserted.Font.NameFarEast = "+mn-ea"
char_inserted.Font.NameComplexScript = "+mn-cs"
char_inserted.Font.Name = font #font name
return char_inserted
else:
return textrange.InsertAfter(item[1]) #append symbol text
# if item[0]:
# char_inserted.Font.Name = item[0] #font name
def insert_symbol_into_shapes(self, shapes, item):
#pplib.iterate_shape_textframes(shapes, lambda textframe: self.insert_symbol_into_text(textframe.TextRange, item))
for textframe in pplib.iterate_shape_textframes(shapes):
self.insert_symbol_into_text(textframe.TextRange, item)
# for shape in shapes:
# if shape.HasTextFrame == -1:
# self.insert_symbol_into_text(shape.TextFrame2.TextRange, item)
def create_symbol_shape(self, slide, item):
shape = slide.shapes.addTextbox(
#office.MsoAutoShapeType.msoShapeRectangle.value__,
1,
100,100,200,200)
shape.TextFrame2.WordWrap = 0
shape.TextFrame2.AutoSize = 1 #ppAutoSizeShapeToFitText
shape.TextFrame2.MarginBottom = 0
shape.TextFrame2.MarginTop = 0
shape.TextFrame2.MarginLeft = 0
shape.TextFrame2.MarginRight = 0
self.insert_symbol_into_text(shape.TextFrame2.TextRange, item)
# if item[0]:
# shape.TextFrame.TextRange.Font.Name = item[0] #font name
# shape.TextFrame.TextRange.Text = item[1] #symbol text
if PPTSymbolsSettings.get_convert_into_shape(): #convert into shape
try:
orig_fontsize = shape.TextFrame2.TextRange.Font.Size
shape.TextFrame2.TextRange.Font.Size = 60
shape.TextFrame2.TextRange.ParagraphFormat.Bullet.Visible = 0
new_shape = pplib.convert_text_into_shape(shape)
new_shape.TextFrame2.TextRange.Font.Size = orig_fontsize
except:
shape.select()
else:
new_shape.select()
else:
shape.select()
def create_symbol_bitmap(self, slide, item):
import tempfile, os
font = item[0] or self.fallback_font
img = bkt.ribbon.SymbolsGallery.create_symbol_image(font, item[1], 400, None)
tmpfile = os.path.join(tempfile.gettempdir(), "bkt-symbol.png")
img.Save(tmpfile, Drawing.Imaging.ImageFormat.Png)
shape = slide.shapes.AddPicture(tmpfile, 0, -1, 200, 200) #FileName, LinkToFile, SaveWithDocument, Left, Top
shape.select()
os.remove(tmpfile)
class PPTSymbolsGalleryRecent(PPTSymbolsGallery):
@property
def symbols(self):
return PPTSymbolsSettings.recent_symbols
@symbols.setter
def symbols(self, value):
pass
def get_item_image(self, index):
try:
return super(PPTSymbolsGalleryRecent, self).get_item_image(index)
except:
return super(PPTSymbolsGalleryRecent, self).create_symbol_image("Arial", "?")
def button_get_label(self, index):
try:
return self.symbols[index][2]
except:
return "Zuletzt verwendet: Undefined"
def button_get_visible(self, index):
try:
return self.symbols[index] is not None
except:
return False
def get_index_as_button(self, index):
return bkt.ribbon.Button(
id="{}_button_{}".format(self.id, index),
get_label=bkt.Callback(lambda: self.button_get_label(index)),
on_action=bkt.Callback(lambda context, selection: self.on_action_indexed(None, index, context, selection)),
get_image=bkt.Callback(lambda: self.get_item_image(index)),
get_visible=bkt.Callback(lambda: self.button_get_visible(index)),
)
class LocpinGallery(bkt.ribbon.Gallery):
def __init__(self, locpin=None, item_supertip="Shape-Fixpunkt bzw. Fixierung bei Änderung {}", **kwargs):
self.locpin = locpin or pplib.GlobalLocPin
self.items = [
("fix_locpin_tl", "Oben-links", item_supertip.format("oben-links")),
("fix_locpin_tm", "Oben-mitte", item_supertip.format("oben-mitte")),
("fix_locpin_tr", "Oben-rechts", item_supertip.format("oben-rechts")),
("fix_locpin_ml", "Mitte-links", item_supertip.format("mitte-links")),
("fix_locpin_mm", "Mitte-mitte", item_supertip.format("mitte-mitte")),
("fix_locpin_mr", "Mitte-rechts", item_supertip.format("mitte-rechts")),
("fix_locpin_bl", "Unten-links", item_supertip.format("unten-links")),
("fix_locpin_bm", "Unten-mitte", item_supertip.format("unten-mitte")),
("fix_locpin_br", "Unten-rechts", item_supertip.format("unten-rechts")),
]
my_kwargs = dict(
# get_enabled=bkt.apps.ppt_shapes_or_text_selected,
columns="3",
item_height="24",
item_width="24",
show_item_label=False,
on_action_indexed = bkt.Callback(self.locpin_on_action_indexed),
get_selected_item_index = bkt.Callback(lambda: self.locpin.index),
get_item_count = bkt.Callback(lambda: len(self.items)),
get_item_label = bkt.Callback(lambda index: self.items[index][1]),
get_item_image = bkt.Callback(self.locpin_get_image, context=True),
get_item_screentip = bkt.Callback(lambda index: self.items[index][1]),
get_item_supertip = bkt.Callback(lambda index: self.items[index][2]),
# children = [
# Item(image=gal_item[0], screentip=gal_item[1], supertip=gal_item[2])
# for gal_item in self.items
# ]
)
if not "image" in kwargs and not "image_mso" in kwargs:
my_kwargs["get_image"] = bkt.Callback(self.locpin_get_image, context=True)
my_kwargs.update(kwargs)
super(LocpinGallery, self).__init__(**my_kwargs)
def locpin_on_action_indexed(self, selected_item, index):
self.locpin.index = index
def locpin_get_image(self, context, index=None):
if index is None:
return context.python_addin.load_image(self.items[self.locpin.index][0])
else:
return context.python_addin.load_image(self.items[index][0])
class PositionGallery(bkt.ribbon.Gallery):
# items: [label, position, reference]
# position: [left, top, width, height]
# values can be absolute or percentage
# reference: CONTENTE / SLIDE / ABS
# values are converted according to reference
items = [
[u"Volle Fläche", [ 0, 0, 1, 1], 'CONTENT'],
[u"2/3 Links", [ 0, 0, 2./3, 1], 'CONTENT'],
[u"2/3 Rechts", [1./3, 0, 2./3, 1], 'CONTENT'],
[u"1/2 Links", [ 0, 0, .5, 1], 'CONTENT'],
[u"1/2 Mitte", [.25, 0, .5, 1], 'CONTENT'],
[u"1/2 Rechts", [ .5, 0, .5, 1], 'CONTENT'],
[u"1/3 Links", [ 0, 0, 1./3, 1], 'CONTENT'],
[u"1/3 Mitte", [1./3, 0, 1./3, 1], 'CONTENT'],
[u"1/3 Rechts", [2./3, 0, 1./3, 1], 'CONTENT'],
[u"1/6 Oben", [ 0, 0, 1, 1./6], 'CONTENT'],
[u"1/6 Unten", [ 0, 5./6, 1, 1./6], 'CONTENT']
]
def __init__(self, positions=None, label="Standardpositionen", columns=3, **kwargs):
self.items = positions or PositionGallery.items
super(PositionGallery, self).__init__(
label = label,
columns = columns,
image_mso='PositionAnchoringGallery',
supertip=u"Positioniere die ausgewählten Shapes auf eine Standardposition.",
children=[
bkt.ribbon.Button(
label="Benutzerdef. Bereich festlegen",
supertip="Der benutzerdefinierte Bereich wird anhand des gewählten Shapes festgelegt. Dieser Bereich ist anschließend über die Gallery wählbar und wird dauerhaft in der aktuellen Prästentation vorgehalten.",
on_action=bkt.Callback(self.set_userdefined_area),
get_enabled = bkt.get_enabled_auto
)
],
**kwargs
)
def on_action_indexed(self, selected_item, index, context, **kwargs):
''' reposition shapes according of settings in clicked element '''
item = self.items[index]
position = item[1]
reference = item[2]
#self.change_position(selection, shapes, item[1])
# reference size
if reference == 'CONTENT':
ref_left,ref_top,ref_width,ref_height = pplib.slide_content_size(context.slide)
else: # SLIDE / ABS
page_setup = context.presentation.PageSetup
ref_left,ref_top = 0, 0
ref_width,ref_height = page_setup.SlideWidth, page_setup.SlideHeight
# target size
left,top,width,height = self.rect_from_definition(position, ref_frame=[ref_left,ref_top,ref_width, ref_height])
frame = pplib.BoundingFrame.from_rect(left, top, width, height)
if 'on_position_change' in self._callbacks:
if context:
return context.invoke_callback(self._callbacks['on_position_change'], target_frame=frame, **kwargs)
def get_item_count(self, presentation):
self.init_userdefined_area_item(presentation)
return len(self.items)
# def get_enabled(self, shapes):
# return True
# def get_item_label(self, index):
# item = self.items[index]
# return "%s" % getattr(NumberedShapes, 'label_' + item['label'])[index%self.columns]
def get_item_image(self, index, presentation):
''' creates an item image with target area according to settings in the specified item '''
# retrieve item-settings
item = self.items[index]
return self.create_image(item[1], item[2], presentation)
def get_item_screentip(self, index):
# retrieve item-settings
item = self.items[index]
return 'Positionierung: ' + item[0]
def get_item_supertip(self, index):
return 'Verwende angezeigten Position/Größe.'
def create_image(self, position, reference, presentation):
# create bitmap, define pen/brush
height = 40
width = height*16./9
img = Drawing.Bitmap(width, height)
g = Drawing.Graphics.FromImage(img)
# reference size
if reference == 'CONTENT':
v_offset = height/5
v_ref = (height*4)/5
left,top,fill_width,fill_height = self.rect_from_definition(position, ref_frame=[0,v_offset,width, v_ref])
else: # SLIDE / ABS
ref_width,ref_height = presentation.PageSetup.SlideWidth, presentation.PageSetup.SlideHeight
left,top,fill_width,fill_height = self.rect_from_definition(position, ref_frame=[0,0,ref_width, ref_height])
left = left /ref_width * width
fill_width = fill_width /ref_width * width
top = top /ref_height * height
fill_height = fill_height/ref_height * height
color = Drawing.ColorTranslator.FromHtml('#ffdd0000')
brush = Drawing.SolidBrush(color)
g.FillRectangle(brush, Drawing.Rectangle(round(left),round(top), round(fill_width), round(fill_height)))
color = Drawing.ColorTranslator.FromHtml('#ff999999')
pen = Drawing.Pen(color,1)
g.DrawRectangle(pen, Drawing.Rectangle(0,0, width-1, height/5-1))
g.DrawRectangle(pen, Drawing.Rectangle(0,0, width-1, height-1))
return img
def rect_from_definition(self, pos_definition, ref_frame=[0,0,640,480]):
left = self.length_from_definition(pos_definition[0], ref_frame[2]) + ref_frame[0]
top = self.length_from_definition(pos_definition[1], ref_frame[3]) + ref_frame[1]
width = self.length_from_definition(pos_definition[2], ref_frame[2])
height = self.length_from_definition(pos_definition[3], ref_frame[3])
return left, top, width, height
def length_from_definition(self, length_definition, reference):
if type(length_definition) == list:
# allow [150, 50%]
l = 0
for ldef in length_definition:
l += self.length_from_definition(ldef, reference)
return l
elif type(length_definition) in [int, float, long]:
if length_definition < 0:
# negative values specify distance 'from right'
return reference - self.length_from_definition(-length_definition, reference)
elif length_definition <= 1:
# percentage values
return reference * length_definition
else:
# absolute values
return length_definition
else:
return 10
## userdefined area
def set_userdefined_area(self, presentation, shapes):
if len(shapes) == 1:
pplib.ContentArea.define_contentarea(presentation, shapes[0])
else:
frame = pplib.BoundingFrame.from_shapes(shapes)
pplib.ContentArea.define_contentarea(presentation, frame)
self.init_userdefined_area_item(presentation)
def init_userdefined_area_item(self, presentation):
#due to performance check first if tag exists at all
if pplib.ContentArea.isset_contentarea(presentation):
left, top, width, height = pplib.ContentArea.read_contentarea(presentation)
if len(self.items) == 12:
self.items.pop()
self.items.append([u"Benutzerdef. Bereich", [left, top, width, height], 'ABS'])
| 42.512658
| 242
| 0.628703
| 3,124
| 26,868
| 5.21735
| 0.151729
| 0.024971
| 0.01767
| 0.010307
| 0.390269
| 0.314068
| 0.259341
| 0.226885
| 0.188662
| 0.152095
| 0
| 0.016738
| 0.275086
| 26,868
| 631
| 243
| 42.580032
| 0.820096
| 0.156469
| 0
| 0.291765
| 0
| 0.002353
| 0.078865
| 0.016415
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0.004706
| 0.016471
| 0.021176
| 0.272941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc74af5f799cda766e2f5e64ed34c0e410d241a2
| 1,402
|
py
|
Python
|
simglucose/sensor/cgm.py
|
mia-jingyi/simglucose
|
a90bd8750fce362be91668ed839b3b252bc0d58d
|
[
"MIT"
] | null | null | null |
simglucose/sensor/cgm.py
|
mia-jingyi/simglucose
|
a90bd8750fce362be91668ed839b3b252bc0d58d
|
[
"MIT"
] | null | null | null |
simglucose/sensor/cgm.py
|
mia-jingyi/simglucose
|
a90bd8750fce362be91668ed839b3b252bc0d58d
|
[
"MIT"
] | null | null | null |
# from .noise_gen import CGMNoiseGenerator
from .noise_gen import CGMNoise
import pandas as pd
import logging
logger = logging.getLogger(__name__)
class CGMSensor(object):
def __init__(self, params, seed=None):
self._params = params
self.name = params.Name
self.sample_time = params.sample_time
self.seed = seed
self._last_CGM = 0
@classmethod
def withName(cls, name, sensor_para_file, **kwargs):
sensor_params = pd.read_csv(sensor_para_file)
params = sensor_params.loc[sensor_params.Name == name].squeeze()
return cls(params, **kwargs)
def measure(self, patient):
if patient.t % self.sample_time == 0:
BG = patient.observation.Gsub
CGM = BG + next(self._noise_generator)
CGM = max(CGM, self._params["min"])
CGM = min(CGM, self._params["max"])
self._last_CGM = CGM
return CGM
# Zero-Order Hold
return self._last_CGM
@property
def seed(self):
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
self._noise_generator = CGMNoise(self._params, seed=seed)
def reset(self):
logger.debug('Resetting CGM sensor ...')
self._noise_generator = CGMNoise(self._params, seed=self.seed)
self._last_CGM = 0
if __name__ == '__main__':
pass
| 26.961538
| 72
| 0.624108
| 174
| 1,402
| 4.741379
| 0.344828
| 0.067879
| 0.053333
| 0.043636
| 0.135758
| 0.09697
| 0.09697
| 0
| 0
| 0
| 0
| 0.002947
| 0.273894
| 1,402
| 51
| 73
| 27.490196
| 0.807466
| 0.039943
| 0
| 0.052632
| 0
| 0
| 0.028295
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0.026316
| 0.078947
| 0.026316
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc796051d35cf6cd654ce6528d4ed35ac535ec1b
| 1,523
|
py
|
Python
|
290.word-pattern.py
|
Lonitch/hackerRank
|
84991b8340e725422bc47eec664532cc84a3447e
|
[
"MIT"
] | null | null | null |
290.word-pattern.py
|
Lonitch/hackerRank
|
84991b8340e725422bc47eec664532cc84a3447e
|
[
"MIT"
] | null | null | null |
290.word-pattern.py
|
Lonitch/hackerRank
|
84991b8340e725422bc47eec664532cc84a3447e
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=290 lang=python3
#
# [290] Word Pattern
#
# https://leetcode.com/problems/word-pattern/description/
#
# algorithms
# Easy (35.86%)
# Likes: 825
# Dislikes: 113
# Total Accepted: 164K
# Total Submissions: 455.9K
# Testcase Example: '"abba"\n"dog cat cat dog"'
#
# Given a pattern and a string str, find if str follows the same pattern.
#
# Here follow means a full match, such that there is a bijection between a
# letter in pattern and a non-empty word in str.
#
# Example 1:
#
#
# Input: pattern = "abba", str = "dog cat cat dog"
# Output: true
#
# Example 2:
#
#
# Input:pattern = "abba", str = "dog cat cat fish"
# Output: false
#
# Example 3:
#
#
# Input: pattern = "aaaa", str = "dog cat cat dog"
# Output: false
#
# Example 4:
#
#
# Input: pattern = "abba", str = "dog dog dog dog"
# Output: false
#
# Notes:
# You may assume pattern contains only lowercase letters, and str contains
# lowercase letters that may be separated by a single space.
#
#
# @lc code=start
from collections import defaultdict
class Solution:
def wordPattern(self, pattern: str, str1: str) -> bool:
if len(pattern)!=len(str1.split()):
return False
abmap = defaultdict(str)
bamap = defaultdict(str)
for a,b in zip(pattern, str1.split()):
if abmap[a]=='' and bamap[b]=='':
abmap[a]=b
bamap[b]=a
elif abmap[a]!=b or bamap[b]!=a:
return False
return True
# @lc code=end
| 22.397059
| 74
| 0.61392
| 214
| 1,523
| 4.369159
| 0.490654
| 0.025668
| 0.038503
| 0.038503
| 0.115508
| 0.091979
| 0.059893
| 0
| 0
| 0
| 0
| 0.027385
| 0.25673
| 1,523
| 67
| 75
| 22.731343
| 0.798587
| 0.610637
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc79d0b1cabca396208cd2aeb132525a435758f4
| 705
|
py
|
Python
|
s1_getting_started/exercise_files/final_exercise/model.py
|
jaschn/dtu_mlops
|
59f404cffc756739433b5ccebb46ef6bfd467436
|
[
"Apache-2.0"
] | null | null | null |
s1_getting_started/exercise_files/final_exercise/model.py
|
jaschn/dtu_mlops
|
59f404cffc756739433b5ccebb46ef6bfd467436
|
[
"Apache-2.0"
] | null | null | null |
s1_getting_started/exercise_files/final_exercise/model.py
|
jaschn/dtu_mlops
|
59f404cffc756739433b5ccebb46ef6bfd467436
|
[
"Apache-2.0"
] | null | null | null |
from torch import nn
class MyAwesomeModel(nn.Module):
def __init__(self):
super().__init__()
self.cnn = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=5, kernel_size=3),
nn.ReLU(),
nn.Conv2d(in_channels=5, out_channels=3, kernel_size=3, stride=2)
)
self.fc = nn.Sequential(nn.Linear(432, 100),
nn.ReLU(),
nn.Linear(100,10),
nn.LogSoftmax(dim=1)
)
def forward(self, x):
x = self.cnn(x).view(x.size(0), -1)
return self.fc(x)
| 32.045455
| 97
| 0.438298
| 78
| 705
| 3.782051
| 0.474359
| 0.054237
| 0.094915
| 0.122034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0.44539
| 705
| 21
| 98
| 33.571429
| 0.695652
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc7b31007719919e0de3183e896e2da210eb63a7
| 1,706
|
py
|
Python
|
manage.py
|
isijara/zulip
|
403f4dafcc71369f3b1143b9f7073cd5d76bf357
|
[
"Apache-2.0"
] | 1
|
2019-04-14T20:31:55.000Z
|
2019-04-14T20:31:55.000Z
|
manage.py
|
hcxiong/zulip
|
bf22eefedebd50b25f32b22988217c13a89b65d1
|
[
"Apache-2.0"
] | 7
|
2020-09-06T14:54:30.000Z
|
2022-02-10T18:51:14.000Z
|
manage.py
|
hcxiong/zulip
|
bf22eefedebd50b25f32b22988217c13a89b65d1
|
[
"Apache-2.0"
] | 9
|
2019-11-04T18:59:29.000Z
|
2022-03-22T17:46:37.000Z
|
#!/usr/bin/env python3
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import scripts.lib.setup_path_on_import
if __name__ == "__main__":
if 'posix' in os.name and os.geteuid() == 0:
print("manage.py should not be run as root. Use `su zulip` to drop root.")
sys.exit(1)
if (os.access('/etc/zulip/zulip.conf', os.R_OK) and not
os.access('/etc/zulip/zulip-secrets.conf', os.R_OK)):
# The best way to detect running manage.py as another user in
# production before importing anything that would require that
# access is to check for access to /etc/zulip/zulip.conf (in
# which case it's a production server, not a dev environment)
# and lack of access for /etc/zulip/zulip-secrets.conf (which
# should be only readable by root and zulip)
print("Error accessing Zulip secrets; manage.py in production must be run as the zulip user.")
sys.exit(1)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
from django.conf import settings
from django.core.management import execute_from_command_line
from django.core.management.base import CommandError
from scripts.lib.zulip_tools import log_management_command
log_management_command(" ".join(sys.argv), settings.MANAGEMENT_LOG_PATH)
os.environ.setdefault("PYTHONSTARTUP", os.path.join(BASE_DIR, "scripts/lib/pythonrc.py"))
if "--no-traceback" not in sys.argv and len(sys.argv) > 1:
sys.argv.append("--traceback")
try:
execute_from_command_line(sys.argv)
except CommandError as e:
print(e, file=sys.stderr)
sys.exit(1)
| 42.65
| 102
| 0.694607
| 258
| 1,706
| 4.457364
| 0.414729
| 0.030435
| 0.045217
| 0.027826
| 0.066957
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004392
| 0.199297
| 1,706
| 39
| 103
| 43.74359
| 0.837482
| 0.213365
| 0
| 0.111111
| 0
| 0
| 0.235955
| 0.071161
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.259259
| 0
| 0.259259
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc7b521791f08dc13fece1c31003d055797c5819
| 2,385
|
py
|
Python
|
core/scripts/fetch_instructions_specs.py
|
merwaaan/mr.system
|
0b3ff1b1fd726c6fd525a3f03f361dcac678344a
|
[
"MIT"
] | null | null | null |
core/scripts/fetch_instructions_specs.py
|
merwaaan/mr.system
|
0b3ff1b1fd726c6fd525a3f03f361dcac678344a
|
[
"MIT"
] | null | null | null |
core/scripts/fetch_instructions_specs.py
|
merwaaan/mr.system
|
0b3ff1b1fd726c6fd525a3f03f361dcac678344a
|
[
"MIT"
] | null | null | null |
import json, requests
from bs4 import BeautifulSoup
def fetch():
r = requests.get('http://clrhome.org/table/')
if not r.ok:
print('Cannot fetch {})'.format(r.url))
return None
# remove newlines
text = r.text.replace('\n', '')
# Return the data as a BeautifulSoup object for easy querying
return BeautifulSoup(text, 'html.parser')
def table_title(table):
return 'main' if table['title'] == '' else table['title'].lower()
def parse_tables(page):
return {table_title(table): parse_table(table)
for table in page.find_all('table')}
def parse_table(table):
print('Table {}'.format(table_title(table)))
opcodes = []
for td in table.find_all('td', axis=True):
hi = int(td.parent.find('th').text, 16) # row
lo = td.parent.index(td) - 1 # column
code = hi << 4 | lo
specs = td['axis'].split('|')
# Conditional instructions have different durations depending on how they
# branch so the possible durations are stored in an array. Otherwise, the
# duration is just stored as a single value.
cycles = list(map(int, specs[2].split('/'))) if '/' in specs[2] else int(specs[2])
opcodes.append({
'opcode': code,
'mnemonics': normalize(td.text).strip(),
'size': int(specs[1]),
'cycles': cycles,
'flags': specs[0],
'description': specs[3]
})
print(' {}: {}'.format(hex(code), td.text))
return opcodes
def normalize(mnemonics):
parts = mnemonics.split(' ')
name = parts[0]
operands = parts[1].split(',') if len(parts) > 1 else []
return '{} {}'.format(name,
','.join(normalize_operand(o, name) for o in operands))
def normalize_operand(operand, instr_name):
# Flag condition
if instr_name in ['jr', 'jp', 'ret', 'call'] and operand in ['c', 'nc', 'z', 'nz', 'po', 'pe', 'p', 'm']:
operand = 'f_' + {
'po': 'np',
'pe': 'p',
'p': 'ns',
'm': 's'
}.get(operand, operand)
# Alt registers
elif operand == 'af\'':
operand = 'af_'
return operand
if __name__ == '__main__':
"""
This scripts fetches the contents of a webpage that contains
nicely formatted data about the Z80 opcodes and outputs it
to JSON.
"""
page = fetch()
if page is not None:
opcodes = parse_tables(page)
with open('opcodes.json', 'w') as output:
json.dump(opcodes, output, indent=2)
| 24.84375
| 107
| 0.607547
| 326
| 2,385
| 4.374233
| 0.47546
| 0.035063
| 0.031557
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009214
| 0.226415
| 2,385
| 95
| 108
| 25.105263
| 0.763686
| 0.126625
| 0
| 0
| 0
| 0
| 0.11042
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.035714
| 0.035714
| 0.267857
| 0.053571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc7fdbab2e2a6960b77a8cd250963e5c2c2a372b
| 5,046
|
py
|
Python
|
tools/testutils.py
|
sktollman/p4c
|
380830f6c26135d1d65e1312e3ba2da628c18145
|
[
"Apache-2.0"
] | 1
|
2019-01-01T21:46:03.000Z
|
2019-01-01T21:46:03.000Z
|
tools/testutils.py
|
cslev/p4c
|
008f01ebc4bc0fcada4e674e9916b156427512ca
|
[
"Apache-2.0"
] | null | null | null |
tools/testutils.py
|
cslev/p4c
|
008f01ebc4bc0fcada4e674e9916b156427512ca
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2013-present Barefoot Networks, Inc.
# Copyright 2018 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Defines helper functions for a general testing framework. Used by multiple
Python testing scripts in the backends folder."""
from __future__ import print_function
import subprocess
from subprocess import Popen
from threading import Timer
import sys
import os
TIMEOUT = 10 * 60
SUCCESS = 0
FAILURE = 1
SKIPPED = 2 # used occasionally to indicate that a test was not executed
def is_err(p4filename):
""" True if the filename represents a p4 program that should fail. """
return "_errors" in p4filename
def report_err(file, *message):
""" Write message to given file, report to stderr if verbose """
print("***", file=sys.stderr, *message)
if (file and file != sys.stderr):
err_file = open(file, "a+")
print("***", file=err_file, *message)
err_file.close()
def report_output(file, verbose, *message):
""" Write message to given file, report to stdout if verbose """
if (verbose):
print(file=sys.stdout, *message)
if (file and file != sys.stdout):
out_file = open(file, "a+")
print("", file=out_file, *message)
out_file.close()
def byte_to_hex(byteStr):
""" Convert byte sequences to a hex string. """
return ''.join(["%02X " % ord(x) for x in byteStr]).strip()
def hex_to_byte(hexStr):
""" Convert hex strings to bytes. """
bytes = []
hexStr = ''.join(hexStr.split(" "))
for i in range(0, len(hexStr), 2):
bytes.append(chr(int(hexStr[i:i + 2], 16)))
return ''.join(bytes)
def compare_pkt(outputs, expected, received):
""" Compare two given byte sequences and check if they are the same.
Report errors if this is not the case. """
received = ''.join(byte_to_hex(str(received)).split()).upper()
expected = ''.join(expected.split()).upper()
if len(received) < len(expected):
report_err(outputs["stderr"], "Received packet too short",
len(received), "vs", len(expected))
return FAILURE
for i in range(0, len(expected)):
if expected[i] == "*":
continue
if expected[i] != received[i]:
report_err(outputs["stderr"], "Received packet ", received)
report_err(outputs["stderr"], "Packet different at position", i,
": expected", expected[i], ", received", received[i])
report_err(outputs["stderr"], "Expected packet ", expected)
return FAILURE
return SUCCESS
def open_process(verbose, args, outputs):
""" Run the given arguments as a subprocess. Time out after TIMEOUT
seconds and report failures or stdout. """
report_output(outputs["stdout"],
verbose, "Writing", args)
proc = None
if outputs["stderr"] is not None:
try:
proc = Popen(args, stdout=subprocess.PIPE, shell=True,
stdin=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
except OSError as e:
report_err(outputs["stderr"], "Failed executing: ", e)
if proc is None:
# Never even started
report_err(outputs["stderr"], "Process failed to start")
return proc
def run_process(verbose, proc, timeout, outputs, errmsg):
def kill(process):
process.kill()
timer = Timer(TIMEOUT, kill, [proc])
try:
timer.start()
out, err = proc.communicate()
finally:
timer.cancel()
if out:
msg = ("\n########### PROCESS OUTPUT BEGIN:\n"
"%s########### PROCESS OUTPUT END\n" % out)
report_output(outputs["stdout"], verbose, msg)
if proc.returncode != SUCCESS:
report_err(outputs["stderr"], "Error %d: %s\n%s" %
(proc.returncode, errmsg, err))
else:
# Also report non fatal warnings in stdout
if err:
report_err(outputs["stderr"], err)
return proc.returncode
def run_timeout(verbose, args, timeout, outputs, errmsg):
proc = open_process(verbose, args, outputs)
if proc is None:
return FAILURE
report_output(outputs["stdout"],
verbose, "Executing", args)
return run_process(verbose, proc, timeout, outputs, errmsg)
def check_root():
""" This function returns False if the user does not have root privileges.
Caution: Only works on Unix systems """
return (os.getuid() == 0)
| 33.865772
| 78
| 0.630202
| 649
| 5,046
| 4.838213
| 0.349769
| 0.025796
| 0.040764
| 0.056051
| 0.188535
| 0.126115
| 0.052229
| 0.052229
| 0
| 0
| 0
| 0.00819
| 0.249901
| 5,046
| 148
| 79
| 34.094595
| 0.8214
| 0.282798
| 0
| 0.096774
| 0
| 0
| 0.098664
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.11828
| false
| 0
| 0.064516
| 0
| 0.301075
| 0.053763
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc8027a9a53c2f0f832850a598757b1b43c5255c
| 6,237
|
py
|
Python
|
AlgorithmsAndDataStructures/mod2/Heap.py
|
BootyAss/bmstu
|
bea202cbdff159d3840335b2a2a5c3bd632a7393
|
[
"FSFAP"
] | null | null | null |
AlgorithmsAndDataStructures/mod2/Heap.py
|
BootyAss/bmstu
|
bea202cbdff159d3840335b2a2a5c3bd632a7393
|
[
"FSFAP"
] | null | null | null |
AlgorithmsAndDataStructures/mod2/Heap.py
|
BootyAss/bmstu
|
bea202cbdff159d3840335b2a2a5c3bd632a7393
|
[
"FSFAP"
] | 1
|
2021-09-15T18:39:33.000Z
|
2021-09-15T18:39:33.000Z
|
class Heap:
def __init__(self):
self.items = dict() # key - (value, index)
self.indexes = [] # index - key // to know indexes
# Usefull functions
def swap(self, i, j):
x = self.indexes[i] # key of 1 item
y = self.indexes[j] # key of 2 item
# swap keys in index array
self.indexes[i] = y
self.indexes[j] = x
temp = self.items[x][1] # index of 1 item
# swap indexes in dictionary
self.items.update({x: (self.items[x][0], self.items[y][1])})
self.items.update({y: (self.items[y][0], temp)})
def bigger(self, i, j):
if self.indexes[i] <= self.indexes[j]:
return False
else:
return True
# Check family UwU
def hasParent(self, i):
if (i - 1)/2 >= 0:
return True
return False
def parentIndex(self, i):
return int((i - 1)/2)
def hasLeft(self, i):
if i*2 + 1 < len(self.indexes):
return True
return False
def leftIndex(self, i):
return int(i*2 + 1)
def hasRight(self, i):
if i*2 + 2 < len(self.indexes):
return True
return False
def rightIndex(self, i):
return int(i*2 + 2)
# heapifys
def heapifyUp(self, i=None):
if i:
index = i
else:
index = len(self.indexes) - 1
while self.hasParent(index) and self.bigger(self.parentIndex(index), index):
self.swap(self.parentIndex(index), index)
index = self.parentIndex(index)
def heapifyDown(self, i=0):
index = i
while self.hasLeft(index):
smaller = self.leftIndex(index)
if self.hasRight(index) and self.bigger(self.leftIndex(index), self.rightIndex(index)):
smaller = self.rightIndex(index)
if self.bigger(smaller, index):
break
else:
self.swap(index, smaller)
index = smaller
# all needed methods
def add(self, key, data):
if self.items.get(key, None):
raise(Exception)
self.items[key] = (data, int(len(self.indexes)))
self.indexes.append(key)
self.heapifyUp()
def set(self, key, data):
temp = self.items.get(key, None)
if not temp:
raise(Exception)
self.items[key] = (data, temp[1])
def delete(self, key):
temp = self.items.get(key, None)
if not temp:
raise(Exception)
if len(self.indexes) > 1:
lastKey = self.indexes[-1]
last = self.items.get(lastKey, None)
# set last item index of deleted
self.items.update({lastKey: (last[0], temp[1])})
# set key of last item to deleted index
self.indexes[temp[1]] = lastKey
self.indexes.pop()
del self.items[key]
if temp[1] < len(self.indexes): # dont heapify if deleted last element
self.heapifyDown(i=temp[1])
self.heapifyUp(i=temp[1])
def search(self, key):
temp = self.items.get(key, None)
if temp:
print('1', temp[1], temp[0])
else:
print('0')
def min(self):
if len(self.indexes) == 0:
raise(Exception)
key = self.indexes[0]
print(key, '0', self.items[key][0])
def max(self):
if len(self.indexes) == 0:
raise(Exception)
i = int(len(self.indexes)/2)
maxKey = self.indexes[i]
index = i
while i < len(self.indexes):
if maxKey < self.indexes[i]:
maxKey = self.indexes[i]
index = i
i += 1
print(maxKey, index, self.items[maxKey][0])
def extract(self):
if len(self.indexes) == 0:
raise(Exception)
rootKey = self.indexes[0]
rootData = self.items[rootKey][0]
del self.items[rootKey]
if len(self.indexes) > 1:
self.indexes[0] = self.indexes.pop()
# set top item index to 0
self.items.update({self.indexes[0] : (self.items[self.indexes[0]][0], 0)})
self.heapifyDown()
else:
self.indexes.pop()
print(rootKey, rootData)
def print(self):
height = 0
index = 0
out = ''
i = 0
if len(self.indexes) == 0:
out += '_\n'
print('_')
return
while i < len(self.indexes):
lineLen = 1 << height
index += 1
key = self.indexes[i]
out += '[' + str(key) + ' ' + self.items[key][0]
if height != 0:
out += ' ' + str(self.indexes[self.parentIndex(i)])
out += ']'
if index == lineLen:
out += '\n'
index = 0
height += 1
else:
out += ' '
i += 1
if index != 0 and index < lineLen:
out += '_ ' * (lineLen - index)
print(out[0:-1])
else:
print(out, end='')
cycle = True
heap = Heap()
while cycle:
try:
line = input()
cmd = line.split(' ', 2)
try:
if len(cmd) == 1 and cmd[0] == '':
continue
if len(cmd) == 2 and cmd[0] == '' and cmd[1] == '':
continue
if cmd[0] == 'add':
heap.add(int(cmd[1]), cmd[2])
elif cmd[0] == 'set':
heap.set(int(cmd[1]), cmd[2])
elif cmd[0] == 'delete':
heap.delete(int(cmd[1]))
elif cmd[0] == 'search':
heap.search(int(cmd[1]))
elif cmd[0] == 'min':
heap.min()
elif cmd[0] == 'max':
heap.max()
elif cmd[0] == 'extract':
heap.extract()
elif cmd[0] == 'print':
heap.print()
else:
raise(Exception)
except Exception:
print('error')
continue
except Exception:
cycle = False
| 25.048193
| 99
| 0.467372
| 739
| 6,237
| 3.935047
| 0.136671
| 0.139959
| 0.0674
| 0.033012
| 0.246561
| 0.174003
| 0.120358
| 0.120358
| 0.045048
| 0.031637
| 0
| 0.023268
| 0.400513
| 6,237
| 248
| 100
| 25.149194
| 0.75448
| 0.054834
| 0
| 0.308989
| 0
| 0
| 0.009866
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106742
| false
| 0
| 0
| 0.016854
| 0.179775
| 0.067416
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc807e3864743112b7b85584b7afbab826c8463a
| 2,332
|
py
|
Python
|
django_comments_xtd/tests/test_api_views.py
|
Boondockers-Welcome/django-comments-xtd
|
8edd68350803bfc351345820ccc4289077918e91
|
[
"BSD-2-Clause"
] | 1
|
2021-01-27T03:20:45.000Z
|
2021-01-27T03:20:45.000Z
|
django_comments_xtd/tests/test_api_views.py
|
Boondockers-Welcome/django-comments-xtd
|
8edd68350803bfc351345820ccc4289077918e91
|
[
"BSD-2-Clause"
] | null | null | null |
django_comments_xtd/tests/test_api_views.py
|
Boondockers-Welcome/django-comments-xtd
|
8edd68350803bfc351345820ccc4289077918e91
|
[
"BSD-2-Clause"
] | 1
|
2020-03-24T21:28:31.000Z
|
2020-03-24T21:28:31.000Z
|
from __future__ import unicode_literals
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIRequestFactory, force_authenticate
from django_comments_xtd import django_comments
from django_comments_xtd.api.views import CommentCreate
from django_comments_xtd.tests.models import Article, Diary
request_factory = APIRequestFactory()
def post_comment(data, auth_user=None):
request = request_factory.post(reverse('comments-xtd-api-create'), data)
if auth_user:
force_authenticate(request, user=auth_user)
view = CommentCreate.as_view()
return view(request)
class CommentCreateTestCase(TestCase):
def setUp(self):
patcher = patch('django_comments_xtd.views.send_mail')
self.mock_mailer = patcher.start()
self.article = Article.objects.create(
title="October", slug="october", body="What I did on October...")
self.form = django_comments.get_form()(self.article)
def test_post_returns_2xx_response(self):
data = {"name": "Bob", "email": "fulanito@detal.com",
"followup": True, "reply_to": 0, "level": 1, "order": 1,
"comment": "Es war einmal eine kleine...",
"honeypot": ""}
data.update(self.form.initial)
response = post_comment(data)
self.assertEqual(response.status_code, 204)
self.assertEqual(self.mock_mailer.call_count, 1)
def test_post_returns_4xx_response(self):
# It uses an authenticated user, but the user has no mail address.
self.user = User.objects.create_user("bob", "", "pwd")
data = {"name": "", "email": "",
"followup": True, "reply_to": 0, "level": 1, "order": 1,
"comment": "Es war einmal eine kleine...",
"honeypot": ""}
data.update(self.form.initial)
response = post_comment(data, auth_user=self.user)
self.assertEqual(response.status_code, 400)
self.assertTrue('name' in response.data)
self.assertTrue('email' in response.data)
self.assertEqual(self.mock_mailer.call_count, 0)
| 37.612903
| 77
| 0.677959
| 289
| 2,332
| 5.304498
| 0.387543
| 0.045662
| 0.044357
| 0.041096
| 0.264188
| 0.200913
| 0.200913
| 0.151337
| 0.151337
| 0.151337
| 0
| 0.008677
| 0.209262
| 2,332
| 61
| 78
| 38.229508
| 0.822668
| 0.027444
| 0
| 0.166667
| 0
| 0
| 0.127096
| 0.025596
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc8126cdbc2e20b53b03bc4d747c2a82d0fde975
| 892
|
py
|
Python
|
LC/358.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | 2
|
2018-02-24T17:20:02.000Z
|
2018-02-24T17:25:43.000Z
|
LC/358.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | null | null | null |
LC/358.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | null | null | null |
class Solution(object):
def rearrangeString(self, str, k):
"""
:type str: str
:type k: int
:rtype: str
"""
## greedy: count keeps the # of chars, valid keeps the leftmost valid index of a char.
res=[]
# count # of chars
d=collections.defaultdict(int)
for c in str:
d[c]+=1
# create a valid dict
v=collections.defaultdict(int)
# add char one by one, that with max # first, must have valid leftmost index
for i in range(len(str)):
c=None
for key in d:
if (not c or d[key]>d[c]) and d[key]>0 and v[key]<=i: # get c with max # and be valid
c=key
if not c: return ''
res.append(c)
d[c]-=1
v[c]=i+k
return ''.join(res)
| 29.733333
| 101
| 0.465247
| 122
| 892
| 3.401639
| 0.467213
| 0.014458
| 0.120482
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005906
| 0.430493
| 892
| 30
| 102
| 29.733333
| 0.811024
| 0.293722
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc82ef8de803f7a119ffe50ddde0e017fafeacd2
| 16,041
|
py
|
Python
|
momentumopt/python/momentumopt/kinoptpy/momentum_kinematics_optimizer.py
|
ferdinand-wood/kino_dynamic_opt
|
ba6bef170819c55d1d26e40af835a744d1ae663f
|
[
"BSD-3-Clause"
] | null | null | null |
momentumopt/python/momentumopt/kinoptpy/momentum_kinematics_optimizer.py
|
ferdinand-wood/kino_dynamic_opt
|
ba6bef170819c55d1d26e40af835a744d1ae663f
|
[
"BSD-3-Clause"
] | null | null | null |
momentumopt/python/momentumopt/kinoptpy/momentum_kinematics_optimizer.py
|
ferdinand-wood/kino_dynamic_opt
|
ba6bef170819c55d1d26e40af835a744d1ae663f
|
[
"BSD-3-Clause"
] | null | null | null |
'''
@file momentum_kinematics_optimizer.py
@package momentumopt
@author Brahayam Ponton (brahayam.ponton@tuebingen.mpg.de)
@license License BSD-3-Clause
@copyright Copyright (c) 2019, New York University and Max Planck Gesellschaft.
@date 2019-10-08
'''
import os
import numpy as np
from momentumopt.kinoptpy.qp import QpSolver
from momentumopt.kinoptpy.inverse_kinematics import PointContactInverseKinematics
from pinocchio import RobotWrapper
import pinocchio as se3
from pinocchio.utils import zero
from pymomentum import *
from momentumopt.quadruped.quadruped_wrapper import QuadrupedWrapper
from momentumopt.kinoptpy.min_jerk_traj import *
from pymomentum import \
PlannerVectorParam_KinematicDefaultJointPositions, \
PlannerIntParam_NumTimesteps, \
PlannerDoubleParam_TimeStep
class Contact(object):
def __init__(self, position, start_time, end_time):
self.pos = position
self.init_time = start_time
self.final_time = end_time
def position(self):
return self.pos
def start_time(self):
return self.init_time
def end_time(self):
return self.final_time
def get_contact_plan(contact_states, effs):
contacts = {}
for i, eff in enumerate(effs):
num_contacts = len(contact_states(i))
contacts[eff] = []
for j in range(num_contacts):
contact_ = contact_states(i)[j]
start_time = contact_.start_time
end_time = contact_.end_time
position = contact_.position
contacts[eff].append(Contact(position, start_time, end_time))
return contacts
def generate_eff_traj(contacts, z_offset):
effs = contacts.keys()
eff_traj_poly = {}
for eff in effs:
cnt = contacts[eff]
num_contacts = len(cnt)
poly_traj = [
PolynominalList(), PolynominalList(), PolynominalList()
]
for i in range(num_contacts):
# Create a constant polynominal for endeffector on the ground.
t = [cnt[i].start_time(), cnt[i].end_time()]
for idx in range(3):
poly_traj[idx].append(t, constant_poly(cnt[i].position()[idx]))
# If there is a contact following, add the transition between
# the two contact points.
if i < num_contacts - 1:
t = [cnt[i].end_time(), cnt[i+1].start_time()]
for idx in range(3):
via = None
if idx == 2:
via = z_offset + cnt[i].position()[idx]
poly = poly_points(t, cnt[i].position()[idx], cnt[i+1].position()[idx], via)
poly_traj[idx].append(t, poly)
eff_traj_poly[eff] = poly_traj
# returns end eff trajectories
return eff_traj_poly
class EndeffectorTrajectoryGenerator(object):
def __init__(self):
self.z_offset = 0.1
def get_z_bound(self, mom_kin_optimizer):
z_max = min(max(mom_kin_optimizer.com_dyn[:, 2]), self.max_bound)
z_min = max(min(mom_kin_optimizer.com_dyn[:, 2]), self.min_bound)
return z_max, z_min
def __call__(self, mom_kin_optimizer):
'''
Computes the endeffector positions and velocities.
Returns endeff_pos_ref, endeff_vel_ref
[0]: endeff_pos_ref: np.array, shape=[num_time_steps, num_eff, 3={x, y, z}]
[1]: endeff_vel_ref: np.array, shape=[num_time_steps, num_eff, 3={x, y, z}]
'''
dt = mom_kin_optimizer.dt
num_eff = len(mom_kin_optimizer.eff_names)
num_time_steps = mom_kin_optimizer.num_time_steps
contacts = get_contact_plan(mom_kin_optimizer.contact_sequence.contact_states,
mom_kin_optimizer.eff_names)
# Generate minimum jerk trajectories
eff_traj_poly = generate_eff_traj(contacts, self.z_offset)
# Compute the endeffector position and velocity trajectories.
endeff_pos_ref = np.zeros((num_time_steps, num_eff, 3))
endeff_vel_ref = np.zeros((num_time_steps, num_eff, 3))
endeff_contact = np.zeros((num_time_steps, num_eff))
for it in range(num_time_steps):
for eff, name in enumerate(mom_kin_optimizer.eff_names):
endeff_pos_ref[it][eff] = [eff_traj_poly[name][i].eval(it * dt) for i in range(3)]
endeff_vel_ref[it][eff] = [eff_traj_poly[name][i].deval(it * dt) for i in range(3)]
# HACK: If the velocity is zero, assume the endeffector is in
# contact with the ground.
if np.all(endeff_vel_ref[it][eff] == 0.):
endeff_contact[it][eff] = 1.
else:
endeff_contact[it][eff] = 0.
return endeff_pos_ref, endeff_vel_ref, endeff_contact
class JointTrajectoryGenerator(object):
def __init__(self):
self.dt =.01
self.num_time_steps = None
self.q_init = None
self.poly_traj = None
def joint_traj(self, q_via):
self.poly_traj = []
for i in range(len(self.q_init)):
self.poly_traj = np.append(self.poly_traj, [PolynominalList()])
for j in range(len(self.q_init)):
for i in range (len(q_via[:,0])+1):
if i==0:
t = [0, q_via[0,0]/self.dt]
poly = poly_points(t, self.q_init[j], q_via[i,j+1])
self.poly_traj[j].append(t, poly)
elif(i==len(q_via[:,0])):
t = [q_via[i-1,0]/self.dt, self.num_time_steps]
poly = poly_points(t, q_via[i-1,j+1], self.q_init[j])
self.poly_traj[j].append(t, poly)
else:
t = [q_via[i-1,0]/self.dt, q_via[i,0]/self.dt]
poly = poly_points(t, q_via[i-1,j+1], q_via[i,j+1])
self.poly_traj[j].append(t, poly)
def eval_traj(self,t):
q = np.zeros((1,len(self.q_init)),float)
for j in range(len(self.q_init)):
q[0,j] = self.poly_traj[j].eval(t)
return np.matrix(q)
class MomentumKinematicsOptimizer(object):
def __init__(self):
self.q_init = None
self.dq_init = None
self.reg_orientation = 1e-2
self.reg_joint_position = 2.
self.joint_des = None
def reset(self):
self.kinematics_sequence = KinematicsSequence()
self.kinematics_sequence.resize(self.planner_setting.get(PlannerIntParam_NumTimesteps),
self.planner_setting.get(PlannerIntParam_NumDofs))
def initialize(self, planner_setting, max_iterations=50, eps=0.001, endeff_traj_generator=None,
RobotWrapper=QuadrupedWrapper):
self.planner_setting = planner_setting
if endeff_traj_generator is None:
endeff_traj_generator = EndeffectorTrajectoryGenerator()
self.endeff_traj_generator = endeff_traj_generator
self.dt = planner_setting.get(PlannerDoubleParam_TimeStep)
self.num_time_steps = planner_setting.get(PlannerIntParam_NumTimesteps)
self.max_iterations = max_iterations
self.eps = eps
self.robot = RobotWrapper()
self.reset()
# Holds dynamics and kinematics results
self.com_dyn = np.zeros((self.num_time_steps, 3))
self.lmom_dyn = np.zeros((self.num_time_steps, 3))
self.amom_dyn = np.zeros((self.num_time_steps, 3))
self.com_kin = np.zeros((self.num_time_steps, 3))
self.lmom_kin = np.zeros((self.num_time_steps, 3))
self.amom_kin = np.zeros((self.num_time_steps, 3))
self.q_kin = np.zeros((self.num_time_steps, self.robot.model.nq))
self.dq_kin = np.zeros((self.num_time_steps, self.robot.model.nv))
self.hip_names = ['{}_HFE'.format(eff) for eff in self.robot.effs]
self.hip_ids = [self.robot.model.getFrameId(name) for name in self.hip_names]
self.eff_names = ['{}_{}'.format(eff, self.robot.joints_list[-1]) for eff in self.robot.effs]
self.inv_kin = PointContactInverseKinematics(self.robot.model, self.eff_names)
self.motion_eff = {
'trajectory': np.zeros((self.num_time_steps, 3 * self.inv_kin.ne)),
'velocity': np.zeros((self.num_time_steps, 3 * self.inv_kin.ne)),
'trajectory_wrt_base': np.zeros((self.num_time_steps, 3 * self.inv_kin.ne)),
'velocity_wrt_base': np.zeros((self.num_time_steps, 3 * self.inv_kin.ne))
}
def fill_data_from_dynamics(self):
# The centroidal information
for it in range(self.num_time_steps):
self.com_dyn[it] = self.dynamic_sequence.dynamics_states[it].com
self.lmom_dyn[it] = self.dynamic_sequence.dynamics_states[it].lmom
self.amom_dyn[it] = self.dynamic_sequence.dynamics_states[it].amom
def fill_endeffector_trajectory(self):
self.endeff_pos_ref, self.endeff_vel_ref, self.endeff_contact = \
self.endeff_traj_generator(self)
def fill_kinematic_result(self, it, q, dq):
def framesPos(frames):
return np.vstack([data.oMf[idx].translation for idx in frames]).reshape(-1)
def framesVel(frames):
return np.vstack([
self.inv_kin.get_world_oriented_frame_jacobian(q, idx).dot(dq)[:3] for idx in frames
]).reshape(-1)
data = self.inv_kin.robot.data
hg = self.inv_kin.robot.centroidalMomentum(q, dq)
# Storing on the internal array.
self.com_kin[it] = self.inv_kin.robot.com(q).T
self.lmom_kin[it] = hg.linear.T
self.amom_kin[it] = hg.angular.T
self.q_kin[it] = q.T
self.dq_kin[it] = dq.T
# The endeffector informations as well.
self.motion_eff['trajectory'][it] = framesPos(self.inv_kin.endeff_ids)
self.motion_eff['velocity'][it] = self.inv_kin.J[6:(self.inv_kin.ne + 2) * 3].dot(dq).T
self.motion_eff['trajectory_wrt_base'][it] = \
self.motion_eff['trajectory'][it] - framesPos(self.hip_ids)
self.motion_eff['velocity_wrt_base'][it] = \
self.motion_eff['velocity'][it] - framesVel(self.hip_ids)
# Storing on the kinematic sequence.
kinematic_state = self.kinematics_sequence.kinematics_states[it]
kinematic_state.com = self.com_kin[it]
kinematic_state.lmom = self.lmom_kin[it]
kinematic_state.amom = self.amom_kin[it]
kinematic_state.robot_posture.base_position = q[:3]
kinematic_state.robot_posture.base_orientation = q[3:7]
kinematic_state.robot_posture.joint_positions = q[7:]
kinematic_state.robot_velocity.base_linear_velocity = dq[:3]
kinematic_state.robot_velocity.base_angular_velocity = dq[3:6]
kinematic_state.robot_velocity.joint_velocities = dq[6:]
def optimize_initial_position(self, init_state):
# Optimize the initial configuration
q = se3.neutral(self.robot.model)
plan_joint_init_pos = self.planner_setting.get(
PlannerVectorParam_KinematicDefaultJointPositions)
if len(plan_joint_init_pos) != self.robot.num_ctrl_joints:
raise ValueError(
'Number of joints in config file not same as required for robot\n' +
'Got %d joints but robot expects %d joints.' % (
len(plan_joint_init_pos), self.robot.num_ctrl_joints))
q[7:] = np.matrix(plan_joint_init_pos).T
q[2] = self.robot.floor_height + 0.32
dq = np.matrix(np.zeros(self.robot.robot.nv)).T
com_ref = init_state.com
lmom_ref = np.zeros(3)
amom_ref = np.zeros(3)
endeff_pos_ref = np.array([init_state.effPosition(i) for i in range(init_state.effNum())])
endeff_vel_ref = np.matrix(np.zeros((init_state.effNum(), 3)))
endeff_contact = np.ones(init_state.effNum())
quad_goal = se3.Quaternion(se3.rpy.rpyToMatrix(np.matrix([0.0, 0, 0.]).T))
q[3:7] = quad_goal.coeffs()
for iters in range(self.max_iterations):
# Adding small P controller for the base orientation to always start with flat
# oriented base.
quad_q = se3.Quaternion(float(q[6]), float(q[3]), float(q[4]), float(q[5]))
amom_ref = 1e-1 * se3.log((quad_goal * quad_q.inverse()).matrix())
res = self.inv_kin.compute(q, dq, com_ref, lmom_ref, amom_ref,
endeff_pos_ref, endeff_vel_ref, endeff_contact, None)
q = se3.integrate(self.robot.model, q, res)
if np.linalg.norm(res) < 1e-3:
print('Found initial configuration after {} iterations'.format(iters + 1))
break
if iters == self.max_iterations - 1:
print('Failed to converge for initial setup.')
print("initial configuration: \n", q)
self.q_init = q.copy()
self.dq_init = dq.copy()
def optimize(self, init_state, contact_sequence, dynamic_sequence, plotting=False):
self.init_state = init_state
self.contact_sequence = contact_sequence
self.dynamic_sequence = dynamic_sequence
self.q_via = None
# Create array with centroidal and endeffector informations.
self.fill_data_from_dynamics()
self.fill_endeffector_trajectory()
# Run the optimization for the initial configuration only once.
if self.q_init is None:
self.optimize_initial_position(init_state)
# Get the desired joint trajectory
# print "num_joint_via:",self.planner_setting.get(PlannerIntParam_NumJointViapoints)
# print "joint_via:",self.planner_setting.get(PlannerCVectorParam_JointViapoints)
# TODO: this is for jump, should go to config file
# q_jump = [1., 0.1, -0.2 ,0.1, -0.2 ,-0.1, 0.2 ,-0.1, 0.2]
# q_via = np.matrix([.75, np.pi/2, -np.pi, np.pi/2, -np.pi, -np.pi/2, np.pi, -np.pi/2, np.pi]).T
# q_max = np.matrix([1.35, .7*np.pi/2, -.7*np.pi, .7*np.pi/2, -.7*np.pi, -.7*np.pi/2, .7*np.pi, -.7*np.pi/2, .7*np.pi]).T
# q_via0 = np.vstack((q_via.T, q_jump))
# self.q_via = np.vstack((q_via0, q_max.T))
joint_traj_gen = JointTrajectoryGenerator()
joint_traj_gen.num_time_steps = self.num_time_steps
joint_traj_gen.q_init = self.q_init[7:]
self.joint_des = np.zeros((len(self.q_init[7:]),self.num_time_steps), float)
if self.q_via is None:
for i in range (self.num_time_steps):
self.joint_des[:,i] = self.q_init[7 : ].T
else:
joint_traj_gen.joint_traj(self.q_via)
for it in range(self.num_time_steps):
self.joint_des[:,it] = joint_traj_gen.eval_traj(it)
# Compute inverse kinematics over the full trajectory.
self.inv_kin.is_init_time = 0
q, dq = self.q_init.copy(), self.dq_init.copy()
for it in range(self.num_time_steps):
quad_goal = se3.Quaternion(se3.rpy.rpyToMatrix(np.matrix([0.0, 0, 0.]).T))
quad_q = se3.Quaternion(float(q[6]), float(q[3]), float(q[4]), float(q[5]))
amom_ref = (self.reg_orientation * se3.log((quad_goal * quad_q.inverse()).matrix()).T + self.amom_dyn[it]).reshape(-1)
joint_regularization_ref = self.reg_joint_position * (np.matrix(self.joint_des[:,it]).T - q[7 : ])
# joint_regularization_ref = self.reg_joint_position * (self.q_init[7 : ] - q[7 : ])
# Fill the kinematics results for it.
self.inv_kin.forward_robot(q, dq)
self.fill_kinematic_result(it, q, dq)
dq = self.inv_kin.compute(
q, dq, self.com_dyn[it], self.lmom_dyn[it], amom_ref,
self.endeff_pos_ref[it], self.endeff_vel_ref[it],
self.endeff_contact[it], joint_regularization_ref)
# Integrate to the next state.
q = se3.integrate(self.robot.model, q, dq * self.dt)
| 41.342784
| 130
| 0.625335
| 2,246
| 16,041
| 4.227961
| 0.138023
| 0.022115
| 0.037911
| 0.035383
| 0.326453
| 0.251158
| 0.214511
| 0.16944
| 0.126474
| 0.093934
| 0
| 0.015315
| 0.259148
| 16,041
| 387
| 131
| 41.449612
| 0.783743
| 0.128857
| 0
| 0.083969
| 0
| 0
| 0.025344
| 0
| 0
| 0
| 0
| 0.002584
| 0
| 1
| 0.083969
| false
| 0
| 0.041985
| 0.019084
| 0.179389
| 0.01145
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc84db3b22d112c3d8e47827ed44b0cdb57ad39d
| 1,482
|
py
|
Python
|
jupyterhub_http_authenticator/httpauthenticator.py
|
clockfly/jupterhub_http_authenticator
|
88185e4677836129cd1bd15af368b7070103b1bf
|
[
"BSD-3-Clause"
] | null | null | null |
jupyterhub_http_authenticator/httpauthenticator.py
|
clockfly/jupterhub_http_authenticator
|
88185e4677836129cd1bd15af368b7070103b1bf
|
[
"BSD-3-Clause"
] | null | null | null |
jupyterhub_http_authenticator/httpauthenticator.py
|
clockfly/jupterhub_http_authenticator
|
88185e4677836129cd1bd15af368b7070103b1bf
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import urllib
import os
import jupyterhub
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from traitlets import Unicode
from jupyterhub.auth import Authenticator
from tornado import gen
class HttpAuthenticator(Authenticator):
server = Unicode(
None,
allow_none=True,
config=True,
help="""
Http authentication server.
"""
)
appid = Unicode(
None,
allow_none=True,
config=True,
help="""
Application Id recognized by the http authentication server
"""
)
@gen.coroutine
def authenticate(self, handler, data):
http_client = AsyncHTTPClient()
headers = {
"Accept": "application/json",
"User-Agent": "JupyterHub",
}
params = dict(
type="json",
appid=self.appid,
ac=data['username'],
pw=data['password']
)
req = HTTPRequest(self.server,
method="POST",
headers=headers,
body=urllib.parse.urlencode(params),
validate_cert = False
)
resp = yield http_client.fetch(req)
reply = json.loads(resp.body.decode('utf8', 'replace'))
if reply.get("code") == 200:
return (reply.get("data").get("UserCN"))
else:
return None
| 23.52381
| 68
| 0.524966
| 133
| 1,482
| 5.81203
| 0.548872
| 0.028461
| 0.041397
| 0.051746
| 0.098318
| 0.098318
| 0.098318
| 0.098318
| 0
| 0
| 0
| 0.004353
| 0.379892
| 1,482
| 62
| 69
| 23.903226
| 0.836779
| 0
| 0
| 0.2
| 0
| 0
| 0.14527
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0.02
| 0.16
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc85621d3dca3de545ceeff3a1f12920ad9784b4
| 9,912
|
py
|
Python
|
src/lr_find.py
|
KushajveerSingh/fastai_without_fastai
|
9a7c71b92c49be1e05858dc0e7ce63901c3c1bd2
|
[
"MIT"
] | 12
|
2019-03-30T16:43:53.000Z
|
2022-03-21T19:49:12.000Z
|
src/lr_find.py
|
KushajveerSingh/fastai_without_fastai
|
9a7c71b92c49be1e05858dc0e7ce63901c3c1bd2
|
[
"MIT"
] | null | null | null |
src/lr_find.py
|
KushajveerSingh/fastai_without_fastai
|
9a7c71b92c49be1e05858dc0e7ce63901c3c1bd2
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
# NOT -> ParameterModule
# NOT -> children_and_parameters
# NOT -> flatten_model
# NOT -> lr_range
# NOT -> scheduling functions
# NOT -> SmoothenValue
# YES -> lr_find
# NOT -> plot_lr_find
# NOT TO BE MODIFIED
class ParameterModule(nn.Module):
"Register a lone parameter 'p' in a module"
def __init__(self, p:nn.Parameter):
super().__init__()
self.val = p
def forward(self, x):
return x
# NOT TO BE MODIFIED
# To be used to flatten_model
def children_and_parameters(m:nn.Module):
"Return the children of `m` and its direct parameters not registered in modules."
children = list(m.children())
children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],[])
for p in m.parameters():
if id(p) not in children_p: children.append(ParameterModule(p))
return children
# NOT TO BE MODIFIED
flatten_model = lambda m: sum(map(flatten_model,children_and_parameters(m)),[]) if len(list(m.children())) else [m]
# NOT TO BE MODIFIED
def lr_range(model, lr):
"""
Build differential learning rate from lr. It will give you the
Arguments:
model :- torch.nn.Module
lr :- float or slice
Returns:
Depending upon lr
"""
if not isinstance(lr, slice):
return lr
num_layer = len([nn.Sequential(*flatten_model(model))])
if lr.start:
mult = lr.stop / lr.start
step = mult**(1/(num_layer-1))
res = np.array([lr.start*(step**i) for i in range(num_layer)])
else:
res = [lr.stop/10.]*(num_layer-1) + [lr.stop]
return np.array(res)
# NOT TO BE MODIFIED
# These are the functions that would give us the values of lr. Liks for linearly
# increasing lr we would use annealing_linear.
# You can add your own custom function, for producing lr.
# By defualt annealing_exp is used for both lr and momentum
def annealing_no(start, end, pct:float):
"No annealing, always return `start`."
return start
def annealing_linear(start, end, pct:float):
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start + pct * (end-start)
def annealing_exp(start, end, pct:float):
"Exponentially anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start * (end/start) ** pct
def annealing_cos(start, end, pct:float):
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start-end)/2 * cos_out
def do_annealing_poly(start, end, pct:float, degree):
return end + (start-end) * (1-pct)**degree
# NOT TO BE MODIFIED
class Stepper():
"""
Used to step from start, end ('vals') over 'n_iter' iterations on a schedule.
We will create a stepper object and then use one of the above annelaing functions,
to step from start lr to end lr.
"""
def __init__(self, vals, n_iter:int, func=None):
self.start, self.end = (vals[0], vals[1]) if isinstance(vals, tuple) else (vals,0)
self.n_iter = max(1, n_iter)
if func is None:
self.func = annealing_linear if isinstance(vals, tuple) else annealing_no
else:
self.func = func
self.n = 0
def step(self):
"Return next value along annealed schedule"
self.n += 1
return self.func(self.start, self.end, self.n/self.n_iter)
@property
def is_done(self)->bool:
"Return 'True' if schedule completed"
return self.n >= self.n_iter
# NOT TO BE MODIFIED
class SmoothenValue():
"Create a smooth moving average for a value (loss, etc) using `beta`."
def __init__(self, beta:float):
self.beta,self.n,self.mov_avg = beta,0,0
def add_value(self, val:float)->None:
"Add `val` to calculate updated smoothed value."
self.n += 1
self.mov_avg = self.beta * self.mov_avg + (1 - self.beta) * val
self.smooth = self.mov_avg / (1 - self.beta ** self.n)
# TO BE MODIFIED IN SOME CASES
def lr_find(data_loader, model, loss_fn, opt, wd:int=0, start_lr:float=1e-7, end_lr:float=10,
num_it:int=100, stop_div:bool=True, smooth_beta:float=0.98, use_gpu:bool=True,
device=torch.device('cuda'), anneal_func=annealing_exp):
"""
The main function that you will call to plot learning_rate vs losses graph. It is
the only function from lr_find.py that you will call. By default it will use GPU. It
assumes your model is already on GPU if you use use_gpu.
Arguments:-
data_loader :- torch.utils.data.DataLoader
model :- torch.nn.Module
loss_fn :- torch.nn.LossFunction
opt :- torch.optim.Optimizer
wd :- weight decay (default=0).
start_lr :- The learning rate from where to start in lr_find (default=1e-7)
end_lr :- The learning rate at which to end lr_find (default=10)
num_it :- Number of iterations for lr_find (default=100)
stop_div :- If the loss diverges, then stop early (default=True)
smooth_beta :- The beta value to smoothen the running avergae of the loss function (default=0.98)
use_gpu :- True (train on GPU) else CPU
anneal_func :- The step function you want to use (default exp)
device :- Torch device to use for training model (default GPU)
Returns:
losses :- list of smoothened version of losses
lrs :- list of all lrs that we test
"""
model.train()
stop = False
flag = False
best_loss = 0.
iteration = 0
losses = []
lrs = []
lrs.append(start_lr)
start_lr = lr_range(model, start_lr)
start_lr = np.array(start_lr) if isinstance(start_lr, (tuple, list)) else start_lr
end_lr = lr_range(model, end_lr)
end_lr = np.array(end_lr) if isinstance(end_lr, (tuple, list)) else end_lr
sched = Stepper((start_lr, end_lr), num_it, anneal_func)
smoothener = SmoothenValue(smooth_beta)
epochs = int(np.ceil(num_it/len(data_loader)))
# save model_dict
model_state = model.state_dict()
opt_state = opt.state_dict()
# Set optimizer learning_rate = start_lr
for group in opt.param_groups:
group['lr'] = sched.start
for i in range(epochs):
for data in data_loader:
opt.zero_grad()
################### TO BE MODIFIED ###################
# Depending on your model, you will have to modify your
# data pipeline and how you give inputs to your model.
inputs, labels = data
if use_gpu:
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
loss = loss_fn(outputs, labels)
#####################################################
if use_gpu:
smoothener.add_value(loss.detach().cpu())
else:
smoothener.add_value(loss.detach())
smooth_loss = smoothener.smooth
losses.append(smooth_loss)
loss.backward()
################### TO BE MODIFIED ###################
# For AdamW. If you want to use Adam, comment these lines
for group in opt.param_groups:
for param in group['params']:
param.data = param.data.add(-wd * group['lr'], param.data)
#####################################################
opt.step()
# Change lr
new_lr = sched.step()
lrs.append(new_lr)
for group in opt.param_groups:
group['lr'] = new_lr
################### TO BE MODIFIED ###################
# You necessarily don't want to change it. But in cases
# when you are maximizing the loss, then you will have
# to change it.
if iteration == 0 or smooth_loss < best_loss:
best_loss = smooth_loss
iteration += 1
if sched.is_done or (stop_div and (smooth_loss > 4*best_loss or torch.isnan(loss))):
flag = True
break
#####################################################
if iteration%10 == 0:
print(f'Iteration: {iteration}')
if flag:
break
# Load state dict
model.load_state_dict(model_state)
opt.load_state_dict(opt_state)
lrs.pop()
print(f'LR Finder is complete.')
return losses, lrs
# NOT TO BE MODIFIED
def plot_lr_find(losses, lrs, skip_start:int=10, skip_end:int=5, suggestion:bool=False, return_fig:bool=None):
"""
It will take the losses and lrs returned by lr_find as input.
Arguments:-
skip_start -> It will skip skip_start lrs from the start
skip_end -> It will skip skip_end lrs from the end
suggestion -> If you want to see the point where the gradient changes most
return_fig -> True then get the fig in the return statement
"""
lrs = lrs[skip_start:-skip_end] if skip_end > 0 else lrs[skip_start:]
losses = losses[skip_start:-skip_end] if skip_end > 0 else losses[skip_start:]
losses = [x.item() for x in losses]
fig, ax = plt.subplots(1, 1)
ax.plot(lrs, losses)
ax.set_ylabel("Loss")
ax.set_xlabel("Learning Rate")
ax.set_xscale('log')
ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0e'))
if suggestion:
try:
mg = (np.gradient(np.array(losses))).argmin()
except:
print("Failed to compute the gradients, there might not be enough points.")
return
print(f"Min numerical gradient: {lrs[mg]:.2E}")
ax.plot(lrs[mg], losses[mg], markersize=10, marker='o', color='red')
if return_fig is not None:
return fig
| 36.307692
| 115
| 0.601392
| 1,417
| 9,912
| 4.085392
| 0.223006
| 0.008983
| 0.024875
| 0.020729
| 0.096217
| 0.056486
| 0.045776
| 0.045776
| 0.045776
| 0.024011
| 0
| 0.010001
| 0.273709
| 9,912
| 272
| 116
| 36.441176
| 0.794138
| 0.338882
| 0
| 0.078947
| 0
| 0.019737
| 0.111464
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.026316
| 0.013158
| 0.243421
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc85ba5181e5203592287503621708b994737b25
| 3,905
|
py
|
Python
|
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/lang/cpp/class_types/TestClassTypesDisassembly.py
|
Polidea/SiriusObfuscator
|
b0e590d8130e97856afe578869b83a209e2b19be
|
[
"Apache-2.0"
] | 427
|
2018-05-29T14:21:02.000Z
|
2022-03-16T03:17:54.000Z
|
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/lang/cpp/class_types/TestClassTypesDisassembly.py
|
PolideaPlayground/SiriusObfuscator
|
b0e590d8130e97856afe578869b83a209e2b19be
|
[
"Apache-2.0"
] | 25
|
2018-07-23T08:34:15.000Z
|
2021-11-05T07:13:36.000Z
|
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/lang/cpp/class_types/TestClassTypesDisassembly.py
|
PolideaPlayground/SiriusObfuscator
|
b0e590d8130e97856afe578869b83a209e2b19be
|
[
"Apache-2.0"
] | 52
|
2018-07-19T19:57:32.000Z
|
2022-03-11T16:05:38.000Z
|
"""
Test the lldb disassemble command on each call frame when stopped on C's ctor.
"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class IterateFrameAndDisassembleTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test_and_run_command(self):
"""Disassemble each call frame when stopped on C's constructor."""
self.build()
self.breakOnCtor()
raw_output = self.res.GetOutput()
frameRE = re.compile(r"""
^\s\sframe # heading for the frame info,
.* # wildcard, and
0x[0-9a-f]{16} # the frame pc, and
\sa.out`(.+) # module`function, and
\s\+\s # the rest ' + ....'
""", re.VERBOSE)
for line in raw_output.split(os.linesep):
match = frameRE.search(line)
if match:
function = match.group(1)
#print("line:", line)
#print("function:", function)
self.runCmd("disassemble -n '%s'" % function)
@add_test_categories(['pyapi'])
def test_and_python_api(self):
"""Disassemble each call frame when stopped on C's constructor."""
self.build()
self.breakOnCtor()
# Now use the Python API to get at each function on the call stack and
# disassemble it.
target = self.dbg.GetSelectedTarget()
process = target.GetProcess()
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertIsNotNone(thread)
depth = thread.GetNumFrames()
for i in range(depth - 1):
frame = thread.GetFrameAtIndex(i)
function = frame.GetFunction()
# Print the function header.
if self.TraceOn():
print()
print(function)
if function:
# Get all instructions for this function and print them out.
insts = function.GetInstructions(target)
for inst in insts:
# We could simply do 'print inst' to print out the disassembly.
# But we want to print to stdout only if self.TraceOn() is
# True.
disasm = str(inst)
if self.TraceOn():
print(disasm)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break for main.cpp.
self.line = line_number('main.cpp', '// Set break point at this line.')
def breakOnCtor(self):
"""Setup/run the program so it stops on C's constructor."""
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break on the ctor function of class C.
bpno = lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=-1)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint %d.' % (bpno)])
# This test was failing because we fail to put the C:: in front of constructore.
# We should maybe make another testcase to cover that specifically, but we shouldn't
# fail this whole testcase for an inessential issue.
# We should be stopped on the ctor function of class C.
# self.expect("thread backtrace", BACKTRACE_DISPLAYED_CORRECTLY,
# substrs = ['C::C'])
| 38.663366
| 92
| 0.56338
| 447
| 3,905
| 4.834452
| 0.402685
| 0.016659
| 0.007404
| 0.0236
| 0.108283
| 0.108283
| 0.108283
| 0.085146
| 0.072189
| 0.072189
| 0
| 0.003125
| 0.34443
| 3,905
| 100
| 93
| 39.05
| 0.841016
| 0.283483
| 0
| 0.101695
| 0
| 0
| 0.185709
| 0
| 0
| 0
| 0
| 0
| 0.016949
| 1
| 0.067797
| false
| 0
| 0.118644
| 0
| 0.220339
| 0.067797
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc87838b315ca1f64fa986f62a70ee610e20d306
| 1,116
|
py
|
Python
|
reservedwords.py
|
irinaid/MAlice
|
02740d661020866c3927b9ee7ee4523aaaafcb7e
|
[
"MIT"
] | 1
|
2021-04-25T22:53:36.000Z
|
2021-04-25T22:53:36.000Z
|
reservedwords.py
|
irinaid/MAlice
|
02740d661020866c3927b9ee7ee4523aaaafcb7e
|
[
"MIT"
] | null | null | null |
reservedwords.py
|
irinaid/MAlice
|
02740d661020866c3927b9ee7ee4523aaaafcb7e
|
[
"MIT"
] | null | null | null |
'''
All the reserved, individual words used in MAlice.
'''
A = "a"
ALICE = "Alice"
AND = "and"
ATE = "ate"
BECAME = "became"
BECAUSE = "because"
BUT = "but"
CLOSED = "closed"
COMMA = ","
CONTAINED = "contained"
DOT = "."
DRANK = "drank"
EITHER = "either"
ENOUGH = "enough"
EVENTUALLY = "eventually"
FOUND = "found"
HAD = "had"
HATTA = "hatta"
LETTER = "letter"
LOOKING_GLASS = "looking-glass"
LPAR = "("
MAYBE = "maybe"
NUMBER = "number"
OF = "of"
OPENED = "opened"
OR = "or"
PERHAPS = "perhaps"
PIECE = "piece"
QUESTION = "?"
ROOM = "room"
RPAR = ")"
S = "'s"
SAID = "said"
SENTENCE = "sentence"
SO = "so"
SPIDER = "spider"
SPOKE = "spoke"
THE = "The"
THEN = "then"
TIMES = "times"
TOO = "too"
UNDERSCORE = "_"
UNSURE = "unsure"
WAS = "was"
WHAT = "what"
WHICH = "which"
RESTRICTED = [ A, ALICE, AND, ATE, BECAME ,BECAUSE ,BUT ,CLOSED ,COMMA ,CONTAINED ,DOT ,DRANK ,EITHER ,ENOUGH ,EVENTUALLY ,FOUND ,HAD ,HATTA ,LETTER ,LOOKING_GLASS ,LPAR ,MAYBE ,NUMBER ,OF ,OPENED ,OR ,PERHAPS ,PIECE ,QUESTION ,ROOM ,RPAR ,S ,SAID, SENTENCE ,SO ,SPIDER ,SPOKE ,THE ,THEN ,TIMES ,TOO ,UNDERSCORE ,UNSURE ,WAS ,WHAT ,WHICH]
| 21.056604
| 338
| 0.638889
| 144
| 1,116
| 4.930556
| 0.381944
| 0.050704
| 0.056338
| 0.059155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181004
| 1,116
| 52
| 339
| 21.461538
| 0.776805
| 0.044803
| 0
| 0
| 0
| 0
| 0.194497
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc88724609a6f077241f73613153365855b09321
| 853
|
py
|
Python
|
leetcode/0057_Insert_Interval/result.py
|
theck17/notes
|
f32f0f4b8f821b1ed38d173ef0913efddd094b91
|
[
"MIT"
] | null | null | null |
leetcode/0057_Insert_Interval/result.py
|
theck17/notes
|
f32f0f4b8f821b1ed38d173ef0913efddd094b91
|
[
"MIT"
] | null | null | null |
leetcode/0057_Insert_Interval/result.py
|
theck17/notes
|
f32f0f4b8f821b1ed38d173ef0913efddd094b91
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python3
# Author: C.K
# Email: theck17@163.com
# DateTime:2021-04-12 18:35:15
# Description:
import os
import sys
class Solution:
def insert(self, intervals: List[List[int]],
newInterval: List[int]) -> List[List[int]]:
res, i = [], 0
for interval in intervals:
if interval[1] < newInterval[0]:
res.append(interval)
elif interval[0] > newInterval[1]:
res.append(newInterval)
newInterval = interval
elif interval[1] >= newInterval[0] or newInterval[1] >= interval[0]:
newInterval = [
min(interval[0], newInterval[0]),
max(interval[1], newInterval[1])
]
res.append(newInterval)
return res
if __name__ == "__main__":
pass
| 24.371429
| 80
| 0.534584
| 93
| 853
| 4.817204
| 0.516129
| 0.046875
| 0.133929
| 0.09375
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058929
| 0.343494
| 853
| 34
| 81
| 25.088235
| 0.741071
| 0.114889
| 0
| 0.095238
| 0
| 0
| 0.010681
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0.047619
| 0.095238
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc89b98301ba0f533627b829ae7b31f9ab29c245
| 756
|
py
|
Python
|
storage3/_sync/client.py
|
anand2312/storage-py
|
75c9c43ea373cb58970255b8e7438c2ec67e7f25
|
[
"MIT"
] | null | null | null |
storage3/_sync/client.py
|
anand2312/storage-py
|
75c9c43ea373cb58970255b8e7438c2ec67e7f25
|
[
"MIT"
] | null | null | null |
storage3/_sync/client.py
|
anand2312/storage-py
|
75c9c43ea373cb58970255b8e7438c2ec67e7f25
|
[
"MIT"
] | null | null | null |
from ..utils import SyncClient, __version__
from .bucket import SyncStorageBucketAPI
from .file_api import SyncBucketProxy
__all__ = [
"SyncStorageClient",
]
class SyncStorageClient(SyncStorageBucketAPI):
"""Manage storage buckets and files."""
def __init__(self, url: str, headers: dict[str, str]) -> None:
super().__init__(
url,
{"User-Agent": f"supabase-py/storage3 v{__version__}", **headers},
SyncClient(),
)
def from_(self, id: str) -> SyncBucketProxy:
"""Run a storage file operation.
Parameters
----------
id
The unique identifier of the bucket
"""
return SyncBucketProxy(id, self.url, self.headers, self._client)
| 26.068966
| 78
| 0.613757
| 75
| 756
| 5.88
| 0.6
| 0.031746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001805
| 0.267196
| 756
| 28
| 79
| 27
| 0.794224
| 0.170635
| 0
| 0
| 0
| 0
| 0.107826
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.2
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc8a0406013c9abeb99153a42725a7e4225fc35e
| 1,755
|
py
|
Python
|
repo/script.module.liveresolver/lib/js2py/translators/__init__.py
|
Hades01/Addons
|
710da97ac850197498a3cd64be1811c593610add
|
[
"Apache-2.0"
] | 3
|
2020-03-03T13:21:44.000Z
|
2021-07-21T09:53:31.000Z
|
repo/script.module.liveresolver/lib/js2py/translators/__init__.py
|
Hades01/Addons
|
710da97ac850197498a3cd64be1811c593610add
|
[
"Apache-2.0"
] | null | null | null |
repo/script.module.liveresolver/lib/js2py/translators/__init__.py
|
Hades01/Addons
|
710da97ac850197498a3cd64be1811c593610add
|
[
"Apache-2.0"
] | 2
|
2020-04-01T22:11:12.000Z
|
2020-05-07T23:54:52.000Z
|
# The MIT License
#
# Copyright 2014, 2015 Piotr Dabkowski
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
__all__ = ['PyJsParser', 'Node', 'WrappingNode', 'node_to_dict', 'parse', 'translate_js', 'translate', 'syntax_tree_translate',
'DEFAULT_HEADER']
__author__ = 'Piotr Dabkowski'
__version__ = '2.2.0'
from pyjsparser import PyJsParser, Node, WrappingNode, node_to_dict
from translator import translate_js, trasnlate, syntax_tree_translate, DEFAULT_HEADER
def parse(javascript_code):
"""Returns syntax tree of javascript_code.
Syntax tree has the same structure as syntax tree produced by esprima.js
Same as PyJsParser().parse For your convenience :) """
p = PyJsParser()
return p.parse(javascript_code)
| 45
| 127
| 0.764672
| 251
| 1,755
| 5.239044
| 0.513944
| 0.06692
| 0.019772
| 0.045627
| 0.103422
| 0.054753
| 0
| 0
| 0
| 0
| 0
| 0.007539
| 0.168661
| 1,755
| 38
| 128
| 46.184211
| 0.893763
| 0.709402
| 0
| 0
| 0
| 0
| 0.25
| 0.044118
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc8adf2af330cf7308b0b0e25463ed5a44b45099
| 1,484
|
py
|
Python
|
azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/custom_image_properties_custom.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 1
|
2022-03-30T22:39:15.000Z
|
2022-03-30T22:39:15.000Z
|
azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/custom_image_properties_custom.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/custom_image_properties_custom.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CustomImagePropertiesCustom(Model):
"""Properties for creating a custom image from a VHD.
:param image_name: The image name.
:type image_name: str
:param sys_prep: Indicates whether sysprep has been run on the VHD.
:type sys_prep: bool
:param os_type: The OS type of the custom image (i.e. Windows, Linux).
Possible values include: 'Windows', 'Linux', 'None'
:type os_type: str or ~azure.mgmt.devtestlabs.models.CustomImageOsType
"""
_validation = {
'os_type': {'required': True},
}
_attribute_map = {
'image_name': {'key': 'imageName', 'type': 'str'},
'sys_prep': {'key': 'sysPrep', 'type': 'bool'},
'os_type': {'key': 'osType', 'type': 'str'},
}
def __init__(self, os_type, image_name=None, sys_prep=None):
super(CustomImagePropertiesCustom, self).__init__()
self.image_name = image_name
self.sys_prep = sys_prep
self.os_type = os_type
| 35.333333
| 76
| 0.6031
| 175
| 1,484
| 4.942857
| 0.542857
| 0.055491
| 0.030058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000835
| 0.193396
| 1,484
| 41
| 77
| 36.195122
| 0.721805
| 0.570081
| 0
| 0
| 0
| 0
| 0.157627
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc8c55932d28aa8c9253fefe76b11ab1d6dbc13a
| 1,886
|
py
|
Python
|
distance_torch_no_compile/chamfer.py
|
nicolalandro/softpool
|
ca77161ab70e5fe6c6505dc40f448bd8e1d78a48
|
[
"Apache-2.0"
] | null | null | null |
distance_torch_no_compile/chamfer.py
|
nicolalandro/softpool
|
ca77161ab70e5fe6c6505dc40f448bd8e1d78a48
|
[
"Apache-2.0"
] | null | null | null |
distance_torch_no_compile/chamfer.py
|
nicolalandro/softpool
|
ca77161ab70e5fe6c6505dc40f448bd8e1d78a48
|
[
"Apache-2.0"
] | null | null | null |
import torch
def expanded_pairwise_distances(x, y):
'''
Input: x is a bxNxd matrix
y is an optional bxMxd matirx
Output: dist is a bxNxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
'''
differences = x.unsqueeze(2) - y.unsqueeze(1)
distances = torch.sum(differences * differences, -1)
return distances
def chamfer_distance(x, y):
'''
input x and y are bxNxM matrix, b: batch, N:number of point, M: point dim (ex. 2 for 2D or 3 for 3D)
output is a bx1 Matrix with the value of the chamfer distance for each sample of the batch
'''
dist_vec = expanded_pairwise_distances(x, y)
min_distances = torch.topk(dist_vec, k=1, dim=2, largest=False).values
chamfer = torch.sum(min_distances, dim=1) / torch.tensor(x.shape[1])
return chamfer
class ChamferLoss(torch.nn.Module):
def forward(self, x, y):
chamfer = chamfer_distance(x, y)
return torch.sum(chamfer)
if __name__ == "__main__":
x = torch.tensor([
[
[0., 0., 0.],
[0., 1., 0.],
[0., 1., 0.],
],
[
[1., 1., 0.],
[1., 2., 0.],
[0., 1., 0.],
]
])
y = torch.tensor([
[
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
],
[
[1., 1., 0.],
[1., 2., 0.],
[0., 1., 0.],
]
])
chamfer = ChamferLoss()
print('chamfer loss torch (cpu):', chamfer(x, y))
print('chamfer loss torch (cuda):', chamfer(x.cuda(), y.cuda()))
# import sys
# sys.path.append("../distance/chamfer/")
# import dist_chamfer as cd
# CD = cd.chamferDist()
# dist1, dist2, _, _= CD(x, y)
# print('orig', dist1)
| 27.735294
| 104
| 0.507423
| 262
| 1,886
| 3.572519
| 0.354962
| 0.023504
| 0.022436
| 0.025641
| 0.094017
| 0.036325
| 0.034188
| 0.034188
| 0.034188
| 0.034188
| 0
| 0.040945
| 0.326617
| 1,886
| 67
| 105
| 28.149254
| 0.696063
| 0.306999
| 0
| 0.357143
| 0
| 0
| 0.047162
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.02381
| 0
| 0.190476
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc8efe8d75934b61443e05664bf142fdc9790c04
| 6,351
|
py
|
Python
|
run_tests.py
|
silx-kit/silx
|
360f890a617676a92f0bed6a28b718d09e70ec03
|
[
"CC0-1.0",
"MIT"
] | 94
|
2016-03-04T17:25:53.000Z
|
2022-03-18T18:05:23.000Z
|
run_tests.py
|
silx-kit/silx
|
360f890a617676a92f0bed6a28b718d09e70ec03
|
[
"CC0-1.0",
"MIT"
] | 2,841
|
2016-01-21T09:06:49.000Z
|
2022-03-18T14:53:56.000Z
|
run_tests.py
|
silx-kit/silx
|
360f890a617676a92f0bed6a28b718d09e70ec03
|
[
"CC0-1.0",
"MIT"
] | 71
|
2015-09-30T08:35:35.000Z
|
2022-03-16T07:16:28.000Z
|
#!/usr/bin/env python3
# coding: utf8
# /*##########################################################################
#
# Copyright (c) 2015-2021 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Run the tests of the project.
This script expects a suite function in <project_package>.test,
which returns a unittest.TestSuite.
Test coverage dependencies: coverage, lxml.
"""
__authors__ = ["Jérôme Kieffer", "Thomas Vincent"]
__date__ = "30/09/2020"
__license__ = "MIT"
import distutils.util
import logging
import os
import subprocess
import sys
import importlib
# Capture all default warnings
logging.captureWarnings(True)
import warnings
warnings.simplefilter('default')
logger = logging.getLogger("run_tests")
logger.setLevel(logging.WARNING)
logger.info("Python %s %s", sys.version, tuple.__itemsize__ * 8)
try:
import numpy
except Exception as error:
logger.warning("Numpy missing: %s", error)
else:
logger.info("Numpy %s", numpy.version.version)
try:
import h5py
except Exception as error:
logger.warning("h5py missing: %s", error)
else:
logger.info("h5py %s", h5py.version.version)
def get_project_name(root_dir):
"""Retrieve project name by running python setup.py --name in root_dir.
:param str root_dir: Directory where to run the command.
:return: The name of the project stored in root_dir
"""
logger.debug("Getting project name in %s", root_dir)
p = subprocess.Popen([sys.executable, "setup.py", "--name"],
shell=False, cwd=root_dir, stdout=subprocess.PIPE)
name, _stderr_data = p.communicate()
logger.debug("subprocess ended with rc= %s", p.returncode)
return name.split()[-1].decode('ascii')
def is_debug_python():
"""Returns true if the Python interpreter is in debug mode."""
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
if sysconfig.get_config_var("Py_DEBUG"):
return True
return hasattr(sys, "gettotalrefcount")
def build_project(name, root_dir):
"""Run python setup.py build for the project.
Build directory can be modified by environment variables.
:param str name: Name of the project.
:param str root_dir: Root directory of the project
:return: The path to the directory were build was performed
"""
platform = distutils.util.get_platform()
architecture = "lib.%s-%i.%i" % (platform,
sys.version_info[0], sys.version_info[1])
if is_debug_python():
architecture += "-pydebug"
if os.environ.get("PYBUILD_NAME") == name:
# we are in the debian packaging way
home = os.environ.get("PYTHONPATH", "").split(os.pathsep)[-1]
elif os.environ.get("BUILDPYTHONPATH"):
home = os.path.abspath(os.environ.get("BUILDPYTHONPATH", ""))
else:
home = os.path.join(root_dir, "build", architecture)
logger.warning("Building %s to %s", name, home)
p = subprocess.Popen([sys.executable, "setup.py", "build"],
shell=False, cwd=root_dir)
logger.debug("subprocess ended with rc= %s", p.wait())
if os.path.isdir(home):
return home
alt_home = os.path.join(os.path.dirname(home), "lib")
if os.path.isdir(alt_home):
return alt_home
def import_project_module(project_name, project_dir):
"""Import project module, from the system of from the project directory"""
if "--installed" in sys.argv:
try:
module = importlib.import_module(project_name)
except Exception:
logger.error("Cannot run tests on installed version: %s not installed or raising error.",
project_name)
raise
else: # Use built source
build_dir = build_project(project_name, project_dir)
if build_dir is None:
logging.error("Built project is not available !!! investigate")
sys.path.insert(0, build_dir)
logger.warning("Patched sys.path, added: '%s'", build_dir)
module = importlib.import_module(project_name)
return module
if __name__ == "__main__": # Needed for multiprocessing support on Windows
import pytest
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_NAME = get_project_name(PROJECT_DIR)
logger.info("Project name: %s", PROJECT_NAME)
project_module = import_project_module(PROJECT_NAME, PROJECT_DIR)
PROJECT_VERSION = getattr(project_module, 'version', '')
PROJECT_PATH = project_module.__path__[0]
def normalize_option(option):
option_parts = option.split(os.path.sep)
if option_parts == ["src", "silx"]:
return PROJECT_PATH
if option_parts[:2] == ["src", "silx"]:
return os.path.join(PROJECT_PATH, *option_parts[2:])
return option
args = [normalize_option(p) for p in sys.argv[1:] if p != "--installed"]
# Run test on PROJECT_PATH if nothing is specified
without_options = [a for a in args if not a.startswith("-")]
if len(without_options) == 0:
args += [PROJECT_PATH]
argv = ["--rootdir", PROJECT_PATH] + args
sys.exit(pytest.main(argv))
| 34.895604
| 101
| 0.668714
| 839
| 6,351
| 4.934446
| 0.33969
| 0.037198
| 0.021739
| 0.02029
| 0.111111
| 0.101449
| 0.05314
| 0.016425
| 0
| 0
| 0
| 0.00694
| 0.205952
| 6,351
| 181
| 102
| 35.088398
| 0.814
| 0.324673
| 0
| 0.12
| 0
| 0
| 0.14254
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.17
| 0
| 0.31
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc8faa6c50d7d1921cb25f63e39e57127594a8e6
| 7,072
|
py
|
Python
|
src/robot/utils/error.py
|
vprashanth777/Selenium
|
b3c48b75e73322891bb697f251b32a9a9d8b4dbe
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-03-10T11:10:20.000Z
|
2018-03-10T11:10:20.000Z
|
src/robot/utils/error.py
|
vprashanth777/Selenium
|
b3c48b75e73322891bb697f251b32a9a9d8b4dbe
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robot/utils/error.py
|
vprashanth777/Selenium
|
b3c48b75e73322891bb697f251b32a9a9d8b4dbe
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import traceback
from robot.errors import RobotError
from .platform import JYTHON, RERAISED_EXCEPTIONS
from .unic import unic
EXCLUDE_ROBOT_TRACES = not os.getenv('ROBOT_INTERNAL_TRACES')
if JYTHON:
from java.io import StringWriter, PrintWriter
from java.lang import Throwable, OutOfMemoryError
else:
Throwable = ()
def get_error_message():
"""Returns error message of the last occurred exception.
This method handles also exceptions containing unicode messages. Thus it
MUST be used to get messages from all exceptions originating outside the
framework.
"""
return ErrorDetails().message
def get_error_details(exclude_robot_traces=EXCLUDE_ROBOT_TRACES):
"""Returns error message and details of the last occurred exception."""
details = ErrorDetails(exclude_robot_traces=exclude_robot_traces)
return details.message, details.traceback
def ErrorDetails(exc_info=None, exclude_robot_traces=EXCLUDE_ROBOT_TRACES):
"""This factory returns an object that wraps the last occurred exception
It has attributes `message`, `traceback` and `error`, where `message`
contains type and message of the original error, `traceback` contains the
traceback/stack trace and `error` contains the original error instance.
"""
exc_type, exc_value, exc_traceback = exc_info or sys.exc_info()
if exc_type in RERAISED_EXCEPTIONS:
raise exc_value
details = PythonErrorDetails \
if not isinstance(exc_value, Throwable) else JavaErrorDetails
return details(exc_type, exc_value, exc_traceback, exclude_robot_traces)
class _ErrorDetails(object):
_generic_exception_names = ('AssertionError', 'AssertionFailedError',
'Exception', 'Error', 'RuntimeError',
'RuntimeException')
def __init__(self, exc_type, exc_value, exc_traceback,
exclude_robot_traces=True):
self.error = exc_value
self._exc_type = exc_type
self._exc_traceback = exc_traceback
self._exclude_robot_traces = exclude_robot_traces
self._message = None
self._traceback = None
@property
def message(self):
if self._message is None:
self._message = self._get_message()
return self._message
def _get_message(self):
raise NotImplementedError
@property
def traceback(self):
if self._traceback is None:
self._traceback = self._get_details()
return self._traceback
def _get_details(self):
raise NotImplementedError
def _get_name(self, exc_type):
try:
return exc_type.__name__
except AttributeError:
return unic(exc_type)
def _format_message(self, name, message):
message = unic(message or '')
message = self._clean_up_message(message, name)
name = name.split('.')[-1] # Use only last part of the name
if not message:
return name
if self._is_generic_exception(name):
return message
return '%s: %s' % (name, message)
def _is_generic_exception(self, name):
return (name in self._generic_exception_names or
isinstance(self.error, RobotError) or
getattr(self.error, 'ROBOT_SUPPRESS_NAME', False))
def _clean_up_message(self, message, name):
return message
class PythonErrorDetails(_ErrorDetails):
def _get_message(self):
name = self._get_name(self._exc_type)
return self._format_message(name, unic(self.error))
def _get_details(self):
if isinstance(self.error, RobotError):
return self.error.details
return 'Traceback (most recent call last):\n' + self._get_traceback()
def _get_traceback(self):
tb = self._exc_traceback
while tb and self._is_excluded_traceback(tb):
tb = tb.tb_next
return ''.join(traceback.format_tb(tb)).rstrip() or ' None'
def _is_excluded_traceback(self, traceback):
if not self._exclude_robot_traces:
return False
module = traceback.tb_frame.f_globals.get('__name__')
return module and module.startswith('robot.')
class JavaErrorDetails(_ErrorDetails):
_java_trace_re = re.compile('^\s+at (\w.+)')
_ignored_java_trace = ('org.python.', 'robot.running.', 'robot$py.',
'sun.reflect.', 'java.lang.reflect.')
def _get_message(self):
exc_name = self._get_name(self._exc_type)
# OOME.getMessage and even toString seem to throw NullPointerException
if not self._is_out_of_memory_error(self._exc_type):
exc_msg = self.error.getMessage()
else:
exc_msg = str(self.error)
return self._format_message(exc_name, exc_msg)
def _is_out_of_memory_error(self, exc_type):
return exc_type is OutOfMemoryError
def _get_details(self):
# OOME.printStackTrace seems to throw NullPointerException
if self._is_out_of_memory_error(self._exc_type):
return ''
output = StringWriter()
self.error.printStackTrace(PrintWriter(output))
details = '\n'.join(line for line in output.toString().splitlines()
if not self._is_ignored_stack_trace_line(line))
msg = unic(self.error.getMessage() or '')
if msg:
details = details.replace(msg, '', 1)
return details
def _is_ignored_stack_trace_line(self, line):
if not line:
return True
res = self._java_trace_re.match(line)
if res is None:
return False
location = res.group(1)
for entry in self._ignored_java_trace:
if location.startswith(entry):
return True
return False
def _clean_up_message(self, msg, name):
msg = self._remove_stack_trace_lines(msg)
return self._remove_exception_name(msg, name).strip()
def _remove_stack_trace_lines(self, msg):
lines = msg.splitlines()
while lines:
if self._java_trace_re.match(lines[-1]):
lines.pop()
else:
break
return '\n'.join(lines)
def _remove_exception_name(self, msg, name):
tokens = msg.split(':', 1)
if len(tokens) == 2 and tokens[0] == name:
msg = tokens[1]
return msg
| 34.330097
| 78
| 0.662472
| 875
| 7,072
| 5.105143
| 0.248
| 0.023506
| 0.048355
| 0.022386
| 0.140587
| 0.105664
| 0.055742
| 0.044101
| 0.044101
| 0
| 0
| 0.003795
| 0.254808
| 7,072
| 205
| 79
| 34.497561
| 0.843833
| 0.184248
| 0
| 0.142857
| 0
| 0
| 0.045757
| 0.003682
| 0
| 0
| 0
| 0
| 0.007143
| 1
| 0.164286
| false
| 0
| 0.064286
| 0.021429
| 0.478571
| 0.007143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc936c9856eecc335b0cca94f1df34512def1882
| 754
|
py
|
Python
|
Physics250-ME29/magAverageEMFinCoil.py
|
illusion173/Physics250
|
69f2ffdb8af013e8b0739779861c1455b579ddaf
|
[
"MIT"
] | null | null | null |
Physics250-ME29/magAverageEMFinCoil.py
|
illusion173/Physics250
|
69f2ffdb8af013e8b0739779861c1455b579ddaf
|
[
"MIT"
] | null | null | null |
Physics250-ME29/magAverageEMFinCoil.py
|
illusion173/Physics250
|
69f2ffdb8af013e8b0739779861c1455b579ddaf
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
extraNumber = 4 * math.pi * pow(10,-7)
def avgEMF():
turns = input("Input how many turns: ")
radius = input("Input the radius (cm):")
resistance = input("Input resistance (Ω): ")
magField0 = input("Input the first magnetic Field value (T): ")
magField1 = input("Input the second magnetic Field value (T): ")
time = input("Input the time (s): ")
turns = float(turns)
radius = float(radius)
resistance = float(resistance)
magField0 = float(magField0)
magField1 = float(magField1)
time = float(time)
radius = radius/100
area = pow(radius,2)*math.pi
averageEMF = turns * area * ((magField1-magField0)/time)
print(averageEMF)
avgEMF()
| 29
| 69
| 0.624668
| 92
| 754
| 5.119565
| 0.413043
| 0.127389
| 0.110403
| 0.080679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028219
| 0.248011
| 754
| 25
| 70
| 30.16
| 0.802469
| 0
| 0
| 0
| 0
| 0
| 0.234568
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.142857
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc93ed322f15833ada38ade26d0df82b04900ca0
| 1,908
|
py
|
Python
|
bench_cupy.py
|
zhouxzh/Jetson_nano_stft_benchmark
|
ffa97984f95b9862ac2a10b8459bb7ef241c6c72
|
[
"MIT"
] | null | null | null |
bench_cupy.py
|
zhouxzh/Jetson_nano_stft_benchmark
|
ffa97984f95b9862ac2a10b8459bb7ef241c6c72
|
[
"MIT"
] | null | null | null |
bench_cupy.py
|
zhouxzh/Jetson_nano_stft_benchmark
|
ffa97984f95b9862ac2a10b8459bb7ef241c6c72
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Computes the spectrogram of a test signal using cupy and cuFFT.
Author: Jan Schlüter
"""
import sys
import os
import timeit
import numpy as np
import cupy as cp
INPUT_ON_GPU = True
OUTPUT_ON_GPU = True
from testfile import make_test_signal
def spectrogram(signal, sample_rate=22050, frame_len=1024, fps=70):
"""
Computes a magnitude spectrogram at a given sample rate (in Hz), frame
length (in samples) and frame rate (in Hz), on CUDA using cupy.
"""
if not INPUT_ON_GPU:
signal = cp.array(signal.astype(np.float32)) # already blown up to a list of frames
win = cp.hanning(frame_len).astype(cp.float32)
# apply window function
#signal *= win # this doesn't work correctly for some reason.
signal = signal * win
# perform FFT
spect = cp.fft.rfft(signal)
# convert into magnitude spectrogram
spect = cp.abs(spect)
# return
if OUTPUT_ON_GPU:
cp.cuda.get_current_stream().synchronize()
else:
return spect.get()
def main():
# load input
global x, spectrogram
x = make_test_signal()
# we do the following here because cupy cannot do stride tricks
# the actual copying work is included in the benchmark unless INPUT_ON_GPU
hop_size = 22050 // 70
frame_len = 1024
frames = len(x) - frame_len + 1
x = np.lib.stride_tricks.as_strided(
x, (frames, frame_len), (x.strides[0], x.strides[0]))[::hop_size]
if INPUT_ON_GPU:
x = cp.array(x.astype(np.float32))
# benchmark
times = timeit.repeat(
setup='from __main__ import x, spectrogram',
stmt='spectrogram(x)',
repeat=5, number=32)
print("Took %.3fs." % (min(times) / 32))
# save result
#assert not OUTPUT_ON_GPU
#np.save(sys.argv[0][:-2] + 'npy', spectrogram(x))
if __name__=="__main__":
main()
| 26.5
| 92
| 0.649371
| 279
| 1,908
| 4.290323
| 0.480287
| 0.02924
| 0.033417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027624
| 0.24109
| 1,908
| 72
| 93
| 26.5
| 0.799033
| 0.354822
| 0
| 0
| 0
| 0
| 0.057143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.194444
| 0
| 0.277778
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc96fd29e9d6cb6eb71dd73f5f39dcfd2bcd44f8
| 11,604
|
py
|
Python
|
dtr_code/shared/run_torch_trial.py
|
merrymercy/dtr-prototype
|
bf40e182453a7d8d23581ea68f32a9d7d2037d62
|
[
"Linux-OpenIB"
] | 1
|
2021-08-02T02:42:58.000Z
|
2021-08-02T02:42:58.000Z
|
dtr_code/shared/run_torch_trial.py
|
merrymercy/dtr-prototype
|
bf40e182453a7d8d23581ea68f32a9d7d2037d62
|
[
"Linux-OpenIB"
] | null | null | null |
dtr_code/shared/run_torch_trial.py
|
merrymercy/dtr-prototype
|
bf40e182453a7d8d23581ea68f32a9d7d2037d62
|
[
"Linux-OpenIB"
] | 1
|
2021-08-05T08:58:53.000Z
|
2021-08-05T08:58:53.000Z
|
"""
To avoid any issues of memory hanging around between inputs,
we run each input as a separate process.
A little ugly but effective
"""
import gc
import glob
import json
import os
import random
import time
import numpy as np
import torch
from common import invoke_main, read_json, write_json, prepare_out_file, check_file_exists
from validate_config import validate_trials_config
from pt_trial_util import create_csv_writer
from tqdm import tqdm
import model_util
def extend_simrd_config(dest_dir, sim_conf_filename, model_name, specific_params, log_name):
if not check_file_exists(dest_dir, sim_conf_filename):
prepare_out_file(dest_dir, sim_conf_filename)
write_json(dest_dir, sim_conf_filename, dict())
conf = read_json(dest_dir, sim_conf_filename)
if model_name not in conf:
conf[model_name] = []
conf[model_name].append({
'name': model_util.get_model_family(model_name),
'batch_size': str(specific_params['batch_size']),
'layers': specific_params.get('layers', model_util.get_model_layers(model_name)),
'type': model_util.get_model_type(model_name),
'log': log_name,
'has_start': True
})
write_json(dest_dir, sim_conf_filename, conf)
def save_trial_log(dest_dir, sim_conf_filename, model_name, specific_params, is_baseline=False):
"""
Find the last DTR log produced in the trial (if any exist)
and move it to the directory
"""
all_logs = glob.glob(os.path.join(os.getcwd(), '*.log'))
if not all_logs:
return
# if we delete all logs in advance, there should be at most one log
assert len(all_logs) == 1
most_recent = all_logs[0]
# rename and move
# (new name just appends info to the old one)
batch_size = specific_params['batch_size']
budget = specific_params['memory_budget']
if budget < 0:
budget = 'inf'
new_name = '{}-{}-{}-{}'.format(model_name, batch_size, budget,
os.path.basename(most_recent))
filename = prepare_out_file(dest_dir, new_name)
os.rename(most_recent, filename)
if is_baseline and sim_conf_filename is not None:
extend_simrd_config(dest_dir, sim_conf_filename, model_name, specific_params, filename)
def delete_logs():
for log in glob.glob(os.path.join(os.getcwd(), '*.log')):
os.remove(log)
def run_single_measurement(model_name, produce_model, run_model, teardown, inp, criterion, extra_params, use_dtr, use_profiling):
"""
This function initializes a model and performs
a single measurement of the model on the given input.
While it might seem most reasonable to initialize
the model outside of the loop, DTR's logs have shown
that certain constants in the model persist between loop iterations;
performing these actions in a separate *function scope* turned out to be the only
way to prevent having those constants hang around.
Returns a dict of measurements
"""
torch.cuda.reset_max_memory_allocated()
# resetting means the count should be reset to
# only what's in scope, meaning only the input
input_mem = torch.cuda.max_memory_allocated()
model = produce_model(extra_params=extra_params)
params = []
for m in model:
if hasattr(m, 'parameters'):
params.extend(m.parameters())
model_mem = torch.cuda.max_memory_allocated()
optimizer = torch.optim.SGD(model[0].parameters(), 1e-3, momentum=0.9, weight_decay=1e-4)
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
# start timing
torch.cuda.synchronize()
start_time = time.time()
if use_dtr:
torch.reset_profile()
start.record()
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
run_model(criterion, *model, *inp, optimizer=optimizer)
end.record()
start_sync = time.time()
torch.cuda.synchronize()
end_sync = time.time()
end_time = time.time()
# end timing
if use_dtr:
# operators-only time, tracked by DTR
cuda_time = torch.compute_time()
base_compute_time = -1
remat_compute_time = -1
search_time = -1
cost_time = -1
if use_profiling:
base_compute_time = torch.base_compute_time()
remat_compute_time = torch.remat_compute_time()
search_time = torch.search_time()
cost_time = torch.cost_time()
torch.reset_profile()
total_mem = torch.cuda.max_memory_allocated()
teardown(*model)
torch.cuda.reset_max_memory_allocated()
del model
if use_dtr:
torch.toggle_log(False)
del params
batch_size = len(inp[0])
ips = batch_size / (end_time - start_time)
result = {
'time': end_time - start_time,
'sync_time': end_sync - start_sync,
'gpu_time': start.elapsed_time(end),
'input_mem': input_mem,
'model_mem': model_mem,
'total_mem': total_mem,
'base_compute_time': base_compute_time,
'remat_compute_time': remat_compute_time,
'search_time': search_time,
'cost_time': cost_time,
'batch_size': batch_size,
'ips': ips
}
if use_dtr:
result['cuda_time'] = cuda_time
else:
result['cuda_time'] = -1.0
return result
def timing_loop(model_name, i, config, use_dtr,
specific_params, writer, trial_run=False, trial_run_outfile=None, memory_budget=-1.0):
dry_run = config['dry_run']
measurements = []
print(f'Running {model_name} : {specific_params}')
# remove any logs hanging around (so we only have to look for one)
delete_logs()
# we only save logs for the final input on DTR
save_log = use_dtr and specific_params.get('save_logs', config['save_logs']) and i == config['n_inputs'] - 1
if use_dtr:
torch.toggle_log(False)
# whether to report profiling info
use_profiling = use_dtr and specific_params.get('use_profiling', False)
use_cudnn = model_util.use_cudnn(model_name)
with torch.backends.cudnn.flags(enabled=use_cudnn, benchmark=use_cudnn):
criterion = model_util.get_criterion(model_name)
produce_model, gen_input, run_model, teardown = model_util.prepare_model(model_name,
specific_params['batch_size'],
use_dtr=use_dtr)
inp = gen_input(i, specific_params.get('extra_params', dict()))
n_reps = specific_params.get('n_reps', config['n_reps'])
if use_profiling:
torch.toggle_profile(use_profiling)
progress = tqdm(range(dry_run + n_reps))
for j in progress:
progress.set_description(f'Rep [{j}]' + '' if j > dry_run else f'Dry run [{j}]')
gc.collect()
# Annotate where the final run starts in the log
if save_log and j == dry_run + n_reps - 1:
torch.toggle_log(True)
torch.annotate_log('START')
res = run_single_measurement(model_name, produce_model, run_model,
teardown, inp, criterion, extra_params=specific_params.get('extra_params', dict()), use_dtr=use_dtr, use_profiling=use_profiling)
if j >= dry_run:
measurements.append(res)
# Dump results
model_name_replace_dict = {
'tv_resnet152': 'resnet152',
'tv_resnet50': 'resnet50',
}
train_ips_list = []
batch_size = None
for res in measurements:
batch_size = res['batch_size']
train_ips_list.append(res['ips'])
out_file = "speed_results.tsv"
with open(out_file, "a") as fout:
val_dict = {
'network': model_name_replace_dict.get(model_name, model_name),
'algorithm': 'dtr',
'budget': specific_params['memory_budget'],
'batch_size': batch_size,
'ips': np.median(train_ips_list) if train_ips_list else -1,
}
print(val_dict)
fout.write(json.dumps(val_dict) + "\n")
print(f"save results to {out_file}")
# write to csv file only when this trial is not
# for getting a baseline memory usage
if trial_run:
write_json(os.getcwd(), trial_run_outfile, {
'mem' : max(map(lambda data: data['total_mem'], measurements))
})
return
if save_log:
save_trial_log(config['log_dest'], config.get('simrd_config', None),
model_name,
specific_params,
is_baseline=specific_params['memory_budget'] == -1)
# clean up after ourselves
delete_logs()
# do all the writing after the trial is over
for j in range(len(measurements)):
data = measurements[j]
# do unit conversions now: times in ms,
# memory in MB
writer.writerow({
'time': data['time']*1e3,
'sync_time': data['sync_time']*1e3,
# pytorch's cuda elapsed time is already in ms
'gpu_time': float(data['gpu_time']),
# 'cuda_time' : float(data['cuda_time']) * 1e-6,
'input_mem': data['input_mem']*1e-6,
'model_mem': data['model_mem']*1e-6,
'total_mem': data['total_mem']*1e-6,
'memory_budget': memory_budget,
# profiling (reported in nanoseconds)
'base_compute_time': data['base_compute_time']*1e-6,
'remat_compute_time': data['remat_compute_time']*1e-6,
'search_time': data['search_time']*1e-6,
'cost_time': data['cost_time']*1e-6,
'rep': j - dry_run,
'input': i,
**specific_params
})
def main(config_dir, experiment_mode, model_name, input_idx, params_file, out_file,
trial_run=False, trial_run_outfile=None):
if 'DTR_MODEL_NAME' in os.environ:
model_name = os.environ['DTR_MODEL_NAME']
config, msg = validate_trials_config(config_dir)
if config is None:
print(msg)
return 1
use_dtr = (experiment_mode == 'dtr')
i = int(input_idx)
is_trial = trial_run == 'True'
if config['set_seed']:
torch.manual_seed(config['seed'] + i)
random.seed(config['seed'] + i)
cwd = os.getcwd()
# handle specific params, esp. for DTR
specific_params = read_json(cwd, params_file)
if 'DTR_MEMORY_BUDGET' in os.environ:
specific_params['memory_budget'] = float(os.environ['DTR_MEMORY_BUDGET'])
assert 'batch_size' in specific_params
if use_dtr:
assert 'memory_budget' in specific_params
if specific_params['memory_budget'] > 0:
print(f'Setting budget to {int(specific_params["memory_budget"])}')
torch.set_memory_budget(int(specific_params['memory_budget']))
if is_trial:
timing_loop(model_name, i, config, use_dtr, specific_params, None, True, trial_run_outfile)
return
with open(out_file, 'a', newline='') as csvfile:
writer = create_csv_writer(csvfile, specific_params)
timing_loop(model_name, i, config, use_dtr, specific_params, writer, memory_budget=specific_params.get('memory_budget', -1))
if __name__ == '__main__':
invoke_main(main, 'config_dir', 'experiment_mode',
'model_name', 'input_idx', 'params_file',
'out_file', 'trial_run', 'trial_run_outfile')
| 35.057402
| 170
| 0.635384
| 1,549
| 11,604
| 4.479019
| 0.199484
| 0.062554
| 0.019458
| 0.016143
| 0.242721
| 0.196887
| 0.122081
| 0.09729
| 0.088931
| 0.082445
| 0
| 0.006542
| 0.262323
| 11,604
| 330
| 171
| 35.163636
| 0.803972
| 0.134781
| 0
| 0.115044
| 0
| 0
| 0.114507
| 0.003921
| 0
| 0
| 0
| 0
| 0.013274
| 1
| 0.026549
| false
| 0
| 0.057522
| 0
| 0.106195
| 0.022124
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc986ff7e618db67d5b1902a0fdfeecd1595ea88
| 1,482
|
py
|
Python
|
pythonTools/downloadPDBsInList.py
|
rsanchezgarc/BIPSPI
|
e155fee0836084ea02bc9919c58817d26a4a13e5
|
[
"Apache-2.0"
] | 5
|
2020-01-21T21:11:49.000Z
|
2022-02-06T19:55:28.000Z
|
pythonTools/downloadPDBsInList.py
|
rsanchezgarc/BIPSPI
|
e155fee0836084ea02bc9919c58817d26a4a13e5
|
[
"Apache-2.0"
] | null | null | null |
pythonTools/downloadPDBsInList.py
|
rsanchezgarc/BIPSPI
|
e155fee0836084ea02bc9919c58817d26a4a13e5
|
[
"Apache-2.0"
] | 3
|
2018-05-25T14:57:36.000Z
|
2022-01-27T12:53:41.000Z
|
import sys, os
from subprocess import call
try:
from downloadPdb import downloadPDB
except ImportError:
from .downloadPdb import downloadPDB
pdbListFile="/home/rsanchez/Tesis/rriPredMethod/data/joanDimers/117_dimers_list.tsv"
outPath="/home/rsanchez/Tesis/rriPredMethod/data/joanDimers/pdbFiles/rawPDBs"
USE_BIO_UNIT=False
##def downloadPDB(pdbId, pdbOutPath, useBioUnit):
#### descargar pdb: wget ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/all/1i1q.pdb2.gz o ya descomprimido
#### wget -qO- ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/all/1i1q.pdb2.gz |zcat > 1i1q.pdb
## outName= os.path.join(pdbOutPath,pdbId+'.pdb')
## if not os.path.isfile(outName):
## if useBioUnit:
## cmd= 'wget -qO- ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/all/%s.pdb1.gz |zcat > %s'%(pdbId.lower(), outName)
## else:
## cmd= 'wget -qO- http://www.pdb.org/pdb/files/%s.pdb | cat > %s'%(pdbId.upper(), outName)
## print(cmd)
## call(cmd, shell= True)
def downloadInFile(fname, outPath, useBioUnit):
with open(fname) as f:
for line in f:
pdbId= line.split()[0]
print(pdbId)
downloadPDB(pdbId, outPath, bioUnit= 0 if useBioUnit else None)
if __name__=="__main__":
if len(sys.argv)==3:
pdbListFile= os.path.abspath(os.path.expanduser(sys.argv[1]))
outPath= os.path.abspath(os.path.expanduser(sys.argv[2]))
print( pdbListFile, outPath)
downloadInFile(pdbListFile, outPath, USE_BIO_UNIT)
| 36.146341
| 129
| 0.702429
| 209
| 1,482
| 4.913876
| 0.430622
| 0.035054
| 0.032132
| 0.040896
| 0.318403
| 0.318403
| 0.232717
| 0.232717
| 0.16261
| 0.16261
| 0
| 0.013333
| 0.139676
| 1,482
| 40
| 130
| 37.05
| 0.792157
| 0.421727
| 0
| 0
| 0
| 0
| 0.174489
| 0.164862
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.238095
| 0
| 0.285714
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc98a22d0cd11d65a7d45c78d01ce4ed45420116
| 1,935
|
py
|
Python
|
code/python3/search_facets.py
|
hsethi2709/xapian-docsprint
|
a872c83fef6fde13efce67fd5563d43514c7444a
|
[
"MIT"
] | 47
|
2015-01-20T15:38:41.000Z
|
2022-02-15T21:03:50.000Z
|
code/python3/search_facets.py
|
hsethi2709/xapian-docsprint
|
a872c83fef6fde13efce67fd5563d43514c7444a
|
[
"MIT"
] | 16
|
2015-06-09T16:12:50.000Z
|
2020-02-05T06:40:18.000Z
|
code/python3/search_facets.py
|
hsethi2709/xapian-docsprint
|
a872c83fef6fde13efce67fd5563d43514c7444a
|
[
"MIT"
] | 56
|
2015-01-20T15:38:44.000Z
|
2022-03-03T18:13:39.000Z
|
#!/usr/bin/env python
import json
import sys
import xapian
import support
def search(dbpath, querystring, offset=0, pagesize=10):
# offset - defines starting point within result set
# pagesize - defines number of records to retrieve
# Open the database we're going to search.
db = xapian.Database(dbpath)
# Set up a QueryParser with a stemmer and suitable prefixes
queryparser = xapian.QueryParser()
queryparser.set_stemmer(xapian.Stem("en"))
queryparser.set_stemming_strategy(queryparser.STEM_SOME)
queryparser.add_prefix("title", "S")
queryparser.add_prefix("description", "XD")
# And parse the query
query = queryparser.parse_query(querystring)
# Use an Enquire object on the database to run the query
enquire = xapian.Enquire(db)
enquire.set_query(query)
# And print out something about each match
matches = []
### Start of example code.
# Set up a spy to inspect the MAKER value at slot 1
spy = xapian.ValueCountMatchSpy(1)
enquire.add_matchspy(spy)
for match in enquire.get_mset(offset, pagesize, 100):
fields = json.loads(match.document.get_data().decode('utf8'))
print(u"%(rank)i: #%(docid)3.3i %(title)s" % {
'rank': match.rank + 1,
'docid': match.docid,
'title': fields.get('TITLE', u''),
})
matches.append(match.docid)
# Fetch and display the spy values
for facet in spy.values():
print("Facet: %(term)s; count: %(count)i" % {
'term' : facet.term.decode('utf-8'),
'count' : facet.termfreq
})
# Finally, make sure we log the query and displayed results
support.log_matches(querystring, offset, pagesize, matches)
### End of example code.
if len(sys.argv) < 3:
print("Usage: %s DBPATH QUERYTERM..." % sys.argv[0])
sys.exit(1)
search(dbpath = sys.argv[1], querystring = " ".join(sys.argv[2:]))
| 31.209677
| 69
| 0.649612
| 259
| 1,935
| 4.80695
| 0.474903
| 0.02249
| 0.009639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012048
| 0.227907
| 1,935
| 61
| 70
| 31.721311
| 0.821285
| 0.268217
| 0
| 0.057143
| 0
| 0
| 0.110079
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.114286
| 0
| 0.142857
| 0.085714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc98ed1d916dea38c19eaadce5f09692d5d10eeb
| 1,272
|
py
|
Python
|
iconcollections/serializers.py
|
plrthink/myicons
|
62475e118e2c7404d88146ea5d67961418d7f8ab
|
[
"BSD-2-Clause"
] | 83
|
2015-01-02T04:50:43.000Z
|
2021-06-06T03:26:55.000Z
|
iconcollections/serializers.py
|
plrthink/myicons
|
62475e118e2c7404d88146ea5d67961418d7f8ab
|
[
"BSD-2-Clause"
] | 2
|
2015-01-04T11:25:20.000Z
|
2015-01-05T11:13:37.000Z
|
iconcollections/serializers.py
|
plrthink/myicons
|
62475e118e2c7404d88146ea5d67961418d7f8ab
|
[
"BSD-2-Clause"
] | 20
|
2015-01-15T10:00:09.000Z
|
2019-11-06T07:25:59.000Z
|
import re
from rest_framework import serializers
from .models import Collection, CollectionIcon
class CollectionSerializer(serializers.ModelSerializer):
"""Collections's serializer"""
class Meta:
model = Collection
read_only = ('token', )
class CollectionIconSerializer(serializers.ModelSerializer):
"""CollectionIcon's Serializer. """
class Meta:
model = CollectionIcon
def validate_width(self, attrs, source):
width = attrs[source]
if width < 1.0:
raise serializers.ValidationError('Width should be greater than 1.0')
return attrs
def validate_name(self, attrs, source):
name = attrs[source].lower()
name = re.sub(r'[^a-z0-9\-]', '-', name).strip('-')
name = re.sub(r'-+', '-', name)
if name:
attrs[source] = name
else:
raise serializers.ValidationError('Invalid name')
return attrs
def validate(self, attrs):
packicon = attrs.get('packicon')
svg_d = attrs.get('svg_d')
width = attrs.get('width')
if packicon or (svg_d and width): return attrs
raise serializers.ValidationError(
'Either a packicon or the shape of icon should be given'
)
| 27.06383
| 81
| 0.616352
| 140
| 1,272
| 5.55
| 0.421429
| 0.070785
| 0.119691
| 0.05148
| 0.06435
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006494
| 0.273585
| 1,272
| 46
| 82
| 27.652174
| 0.834416
| 0.042453
| 0
| 0.125
| 0
| 0
| 0.113505
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.09375
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc99e84c9e8d7aa99d673f47ef51acfd45692fba
| 1,738
|
py
|
Python
|
Python/partition-to-k-equal-sum-subsets.py
|
sm2774us/leetcode_interview_prep_2021
|
33b41bea66c266b733372d9a8b9d2965cd88bf8c
|
[
"Fair"
] | null | null | null |
Python/partition-to-k-equal-sum-subsets.py
|
sm2774us/leetcode_interview_prep_2021
|
33b41bea66c266b733372d9a8b9d2965cd88bf8c
|
[
"Fair"
] | null | null | null |
Python/partition-to-k-equal-sum-subsets.py
|
sm2774us/leetcode_interview_prep_2021
|
33b41bea66c266b733372d9a8b9d2965cd88bf8c
|
[
"Fair"
] | null | null | null |
# Time: O(n*2^n)
# Space: O(2^n)
class Solution(object):
def canPartitionKSubsets(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
def dfs(nums, target, used, todo, lookup):
if lookup[used] is None:
targ = (todo-1)%target + 1
lookup[used] = any(dfs(nums, target, used | (1<<i), todo-num, lookup) \
for i, num in enumerate(nums) \
if ((used>>i) & 1) == 0 and num <= targ)
return lookup[used]
total = sum(nums)
if total%k or max(nums) > total//k:
return False
lookup = [None] * (1 << len(nums))
lookup[-1] = True
return dfs(nums, total//k, 0, total, lookup)
# Time: O(k^(n-k) * k!)
# Space: O(n)
# DFS solution with pruning.
class Solution2(object):
def canPartitionKSubsets(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
def dfs(nums, target, i, subset_sums):
if i == len(nums):
return True
for k in range(len(subset_sums)):
if subset_sums[k]+nums[i] > target:
continue
subset_sums[k] += nums[i]
if dfs(nums, target, i+1, subset_sums):
return True
subset_sums[k] -= nums[i]
if not subset_sums[k]: break
return False
total = sum(nums)
if total%k != 0 or max(nums) > total//k:
return False
nums.sort(reverse=True)
subset_sums = [0] * k
return dfs(nums, total//k, 0, subset_sums)
| 30.491228
| 87
| 0.468354
| 217
| 1,738
| 3.709677
| 0.253456
| 0.111801
| 0.064596
| 0.055901
| 0.442236
| 0.42236
| 0.278261
| 0.213665
| 0.213665
| 0.213665
| 0
| 0.014423
| 0.401611
| 1,738
| 56
| 88
| 31.035714
| 0.759615
| 0.10817
| 0
| 0.257143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc9b38aa93978a9c5a2ff6d24ac4f1e6be8b4faa
| 1,888
|
py
|
Python
|
third_party_package/RDKit_2015_03_1/rdkit/ML/Descriptors/UnitTestParser.py
|
Ivy286/cluster_basedfps
|
7fc216537f570436f008ea567c137d03ba2b6d81
|
[
"WTFPL"
] | 9
|
2019-04-23T01:46:12.000Z
|
2021-08-16T07:07:12.000Z
|
third_party_package/RDKit_2015_03_1/rdkit/ML/Descriptors/UnitTestParser.py
|
Ivy286/cluster_basedfps
|
7fc216537f570436f008ea567c137d03ba2b6d81
|
[
"WTFPL"
] | null | null | null |
third_party_package/RDKit_2015_03_1/rdkit/ML/Descriptors/UnitTestParser.py
|
Ivy286/cluster_basedfps
|
7fc216537f570436f008ea567c137d03ba2b6d81
|
[
"WTFPL"
] | 5
|
2016-09-21T03:47:48.000Z
|
2019-07-30T22:17:35.000Z
|
#
# Copyright (C) 2001 greg Landrum
#
""" unit testing code for compound descriptors
"""
from __future__ import print_function
import unittest
import Parser
from rdkit.six.moves import xrange
class TestCase(unittest.TestCase):
def setUp(self):
print('\n%s: '%self.shortDescription(),end='')
self.piece1 = [['d1','d2'],['d1','d2']]
self.aDict = {'Fe':{'d1':1,'d2':2},'Pt':{'d1':10,'d2':20}}
self.pDict = {'d1':100.,'d2':200.}
self.compos = [('Fe',1),('Pt',1)]
self.cExprs = ["SUM($1)","SUM($1)+SUM($2)","MEAN($1)","DEV($2)","MAX($1)","MIN($2)","SUM($1)/$a"]
self.results = [11.,33.,5.5,9.,10.,2.,0.11]
self.tol = 0.0001
def testSingleCalcs(self):
" testing calculation of a single descriptor "
for i in xrange(len(self.cExprs)):
cExpr= self.cExprs[i]
argVect = self.piece1 + [cExpr]
res = Parser.CalcSingleCompoundDescriptor(self.compos,argVect,self.aDict,self.pDict)
self.assertAlmostEqual(res,self.results[i],2)
def testMultipleCalcs(self):
" testing calculation of multiple descriptors "
for i in xrange(len(self.cExprs)):
cExpr= self.cExprs[i]
argVect = self.piece1 + [cExpr]
res = Parser.CalcMultipleCompoundsDescriptor([self.compos,self.compos],argVect,
self.aDict,[self.pDict,self.pDict])
self.assertAlmostEqual(res[0],self.results[i],2)
self.assertAlmostEqual(res[1],self.results[i],2)
#self.assertTrue(abs(res[0]-self.results[i])<self.tol,'Expression %s failed'%(cExpr))
#self.assertTrue((res[1]-self.results[i])<self.tol,'Expression %s failed'%(cExpr))
def TestSuite():
suite = unittest.TestSuite()
suite.addTest(TestCase('testSingleCalcs'))
suite.addTest(TestCase('testMultipleCalcs'))
return suite
if __name__ == '__main__':
suite = TestSuite()
unittest.TextTestRunner().run(suite)
| 35.622642
| 101
| 0.64036
| 248
| 1,888
| 4.822581
| 0.358871
| 0.055184
| 0.050167
| 0.032609
| 0.334448
| 0.254181
| 0.254181
| 0.254181
| 0.188963
| 0.120401
| 0
| 0.040921
| 0.17161
| 1,888
| 52
| 102
| 36.307692
| 0.723785
| 0.175318
| 0
| 0.157895
| 0
| 0
| 0.137171
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.263158
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc9c8f24e080e4c64950de33e4962b6b2e44ede2
| 1,575
|
py
|
Python
|
setup.py
|
maciek3000/data_dashboard
|
1b573b674d37f57ae7e8bbfb1e83c801b488dfd6
|
[
"MIT"
] | 8
|
2021-05-03T04:06:15.000Z
|
2022-01-15T16:27:42.000Z
|
setup.py
|
maciek3000/data_dashboard
|
1b573b674d37f57ae7e8bbfb1e83c801b488dfd6
|
[
"MIT"
] | null | null | null |
setup.py
|
maciek3000/data_dashboard
|
1b573b674d37f57ae7e8bbfb1e83c801b488dfd6
|
[
"MIT"
] | 3
|
2021-05-19T17:31:18.000Z
|
2021-06-19T12:24:01.000Z
|
from setuptools import setup, find_packages
import pathlib
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / "readme.md").read_text(encoding="utf-8")
setup(
name="data_dashboard",
version="0.1.1",
description="Dashboard to explore the data and to create baseline Machine Learning model.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/maciek3000/data_dashboard",
author="Maciej Dowgird",
author_email="dowgird.maciej@gmail.com",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Visualization"
],
package_dir={"data_dashboard": "data_dashboard"},
packages=find_packages(),
python_requires=">=3.7",
install_requires=[
"pandas>=1.2.3",
"numpy>=1.19.5",
"scipy>=1.6.1",
"beautifulsoup4>=4.9.3",
"scikit-learn>=0.24.1",
"seaborn>=0.11.1",
"bokeh>=2.3.0",
"Jinja2>=2.11.3",
"xgboost>=1.3.3",
"lightgbm>=3.2.0"
],
package_data={
"data_dashboard": ["static/*", "templates/*", "examples/*"]
},
project_urls={
"Github": "https://github.com/maciek3000/data_dashboard",
},
)
| 32.142857
| 95
| 0.615238
| 177
| 1,575
| 5.333333
| 0.564972
| 0.082627
| 0.040254
| 0.063559
| 0.07839
| 0.07839
| 0
| 0
| 0
| 0
| 0
| 0.04401
| 0.220952
| 1,575
| 48
| 96
| 32.8125
| 0.725346
| 0
| 0
| 0.044444
| 0
| 0
| 0.499683
| 0.056508
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.044444
| 0
| 0.044444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc9d746d95215d78b546409456d7b42ad25142a0
| 5,577
|
py
|
Python
|
test/e2e/tests/test_instance.py
|
acornett21/ack-ec2-controller
|
aa747d981239e41ae4254a9b31ee0f20ac882c85
|
[
"Apache-2.0"
] | null | null | null |
test/e2e/tests/test_instance.py
|
acornett21/ack-ec2-controller
|
aa747d981239e41ae4254a9b31ee0f20ac882c85
|
[
"Apache-2.0"
] | null | null | null |
test/e2e/tests/test_instance.py
|
acornett21/ack-ec2-controller
|
aa747d981239e41ae4254a9b31ee0f20ac882c85
|
[
"Apache-2.0"
] | null | null | null |
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Integration tests for Instance API.
"""
import datetime
import pytest
import time
import logging
from acktest.resources import random_suffix_name
from acktest.k8s import resource as k8s
from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_ec2_resource
from e2e.replacement_values import REPLACEMENT_VALUES
from e2e.bootstrap_resources import get_bootstrap_resources
RESOURCE_PLURAL = "instances"
# highly available instance type for deterministic testing
INSTANCE_TYPE = "m4.large"
INSTANCE_AMI = "Amazon Linux 2 Kernel"
INSTANCE_TAG_KEY = "owner"
INSTANCE_TAG_VAL = "ack-controller"
CREATE_WAIT_AFTER_SECONDS = 10
DELETE_WAIT_AFTER_SECONDS = 10
TIMEOUT_SECONDS = 300
def get_instance(ec2_client, instance_id: str) -> dict:
instance = None
try:
resp = ec2_client.describe_instances(
InstanceIds=[instance_id]
)
instance = resp["Reservations"][0]["Instances"][0]
except Exception as e:
logging.debug(e)
finally:
return instance
def get_instance_state(ec2_client, instance_id):
instance_state = None
try:
instance = get_instance(ec2_client, instance_id)
instance_state = instance["State"]["Name"]
except Exception as e:
logging.debug(e)
finally:
return instance_state
def wait_for_instance_or_die(ec2_client, instance_id, desired_state, timeout_sec):
while True:
now = datetime.datetime.now()
timeout = now + datetime.timedelta(seconds=timeout_sec)
if datetime.datetime.now() >= timeout:
pytest.fail(f"Timed out waiting for Instance to enter {desired_state} state")
time.sleep(DELETE_WAIT_AFTER_SECONDS)
instance_state = get_instance_state(ec2_client, instance_id)
if instance_state == desired_state:
break
def get_ami_id(ec2_client):
try:
# Use latest AL2
resp = ec2_client.describe_images(
Owners=['amazon'],
Filters=[
{"Name": "architecture", "Values": ['x86_64']},
{"Name": "state", "Values": ['available']},
{"Name": "virtualization-type", "Values": ['hvm']},
],
)
for image in resp['Images']:
if 'Description' in image:
if INSTANCE_AMI in image['Description']:
return image['ImageId']
except Exception as e:
logging.debug(e)
@pytest.fixture
def instance(ec2_client):
test_resource_values = REPLACEMENT_VALUES.copy()
resource_name = random_suffix_name("instance-ack-test", 24)
test_vpc = get_bootstrap_resources().SharedTestVPC
subnet_id = test_vpc.public_subnets.subnet_ids[0]
ami_id = get_ami_id(ec2_client)
test_resource_values["INSTANCE_NAME"] = resource_name
test_resource_values["INSTANCE_AMI_ID"] = ami_id
test_resource_values["INSTANCE_TYPE"] = INSTANCE_TYPE
test_resource_values["INSTANCE_SUBNET_ID"] = subnet_id
test_resource_values["INSTANCE_TAG_KEY"] = INSTANCE_TAG_KEY
test_resource_values["INSTANCE_TAG_VAL"] = INSTANCE_TAG_VAL
# Load Instance CR
resource_data = load_ec2_resource(
"instance",
additional_replacements=test_resource_values,
)
logging.debug(resource_data)
# Create k8s resource
ref = k8s.CustomResourceReference(
CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
resource_name, namespace="default",
)
k8s.create_custom_resource(ref, resource_data)
cr = k8s.wait_resource_consumed_by_controller(ref)
assert cr is not None
assert k8s.get_resource_exists(ref)
yield (ref, cr)
# Delete the instance when tests complete
try:
_, deleted = k8s.delete_custom_resource(ref, 3, 10)
assert deleted
except:
pass
@service_marker
@pytest.mark.canary
class TestInstance:
def test_create_delete(self, ec2_client, instance):
(ref, cr) = instance
resource_id = cr["status"]["instanceID"]
time.sleep(CREATE_WAIT_AFTER_SECONDS)
# Check Instance exists
instance = get_instance(ec2_client, resource_id)
assert instance is not None
# Give time for instance to come up
wait_for_instance_or_die(ec2_client, resource_id, 'running', TIMEOUT_SECONDS)
# Validate instance tags
instance_tags = instance["Tags"]
tag_present = False
for t in instance_tags:
if (t['Key'] == INSTANCE_TAG_KEY and
t['Value'] == INSTANCE_TAG_VAL):
tag_present = True
assert tag_present
# Delete k8s resource
_, deleted = k8s.delete_custom_resource(ref, 2, 5)
assert deleted is True
# Reservation still exists, but instance will commence termination
# State needs to be 'terminated' in order to remove the dependency on the shared subnet
# for successful test cleanup
wait_for_instance_or_die(ec2_client, resource_id, 'terminated', TIMEOUT_SECONDS)
| 33.8
| 95
| 0.684777
| 701
| 5,577
| 5.195435
| 0.31669
| 0.034596
| 0.039539
| 0.042834
| 0.183141
| 0.127403
| 0.085667
| 0.049973
| 0.049973
| 0.028556
| 0
| 0.01333
| 0.23328
| 5,577
| 165
| 96
| 33.8
| 0.8384
| 0.178591
| 0
| 0.104348
| 0
| 0
| 0.09688
| 0
| 0
| 0
| 0
| 0
| 0.052174
| 1
| 0.052174
| false
| 0.008696
| 0.078261
| 0
| 0.165217
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bc9fd661a260bba8109c66590275e9d7c9b1094c
| 2,774
|
py
|
Python
|
hello.py
|
LMiceOrg/postdoc-voting
|
091fd6caa120f7c5aae600c0a492a185ec10e9d6
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
hello.py
|
LMiceOrg/postdoc-voting
|
091fd6caa120f7c5aae600c0a492a185ec10e9d6
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
hello.py
|
LMiceOrg/postdoc-voting
|
091fd6caa120f7c5aae600c0a492a185ec10e9d6
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
#coding: utf-8
import sys
import os
import asyncio
import websockets
import json
import socket
import xlrd
#global vars
phd_data = None
pro_data = None
def get_host_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('1.255.255.255', 65535))
ip = s.getsockname()[0]
finally:
s.close()
return ip
def read_xls(name):
try:
book = xlrd.open_workbook(name)
except:
print("Open Excel(%s) failed!" % name)
for i in range(book.nsheets):
s = book.sheet_by_index(i)
sname = s.name
svalue = list()
for r in range(s.nrows):
svalue.append( s.row_values(r) )
ctx[i] = (sname, svalue)
return ctx
#生成json
def gen_pro():
ret = {
"header": [
{
"name": "id",
"title": "ID",
"size": 50,
"sortable": True,
"sortDir": "asc",
"format": "number"
},
{
"name": "name",
"title": "Name",
"sortable": True
},
{
"name": "start",
"title": "Start",
"sortable": True,
"size": 150,
"format": "date",
"formatMask": "dd-mm-yyyy"
},
{
"name": "age",
"title": "Age",
"sortable": True,
"size": 80
},
{
"name": "salary",
"title": "Salary",
"sortable": True,
"size": 150,
"format": "money",
"show": True
}
],
"data":[]
}
return ret
async def proc_msg(ws, msg):
method = msg.get('method')
if method == 'host_ip':
ip=get_host_ip()
ret = {
"method":method,
"type":'success',
'return':ip
}
await ws.send(json.dumps(ret))
elif method=='genpro':
phd_file = msg.get('phd_file')
if phd_file:
phd_data = read_xls(phd_file)
pro_file = msg.get('pro_file')
if pro_file:
pro_data = read_xls(pro_file)
data = gen_pro()
ret = {
"method":method,
"type":'success',
'return':data
}
await ws.send(json.dumps(ret))
else:
ret = {'type':'unknown'}
await ws.send(json.dumps(ret))
async def recv_msg(websocket):
while True:
recv_text = await websocket.recv()
try:
msg = json.loads(recv_text)
await proc_msg(websocket, msg)
except:
ret = {'type':'error'}
await ws.send(json.dumps(ret))
async def main_logic(websocket, path):
await recv_msg(websocket)
port = 5678
if len(sys.argv) >=2:
port = sys.argv[1]
ws_server = websockets.serve(main_logic, '0.0.0.0', port)
asyncio.get_event_loop().run_until_complete(ws_server)
asyncio.get_event_loop().run_forever()
| 20.248175
| 60
| 0.519466
| 338
| 2,774
| 4.130178
| 0.37574
| 0.04298
| 0.031519
| 0.04298
| 0.190544
| 0.123209
| 0.044413
| 0.044413
| 0
| 0
| 0
| 0.019882
| 0.329128
| 2,774
| 136
| 61
| 20.397059
| 0.730253
| 0.010815
| 0
| 0.189655
| 0
| 0
| 0.131704
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025862
| false
| 0
| 0.060345
| 0
| 0.112069
| 0.008621
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bca0727b76dc54909be0bf60b6d636ec8f539927
| 2,518
|
py
|
Python
|
runtime/Python3/src/antlr4/dfa/DFASerializer.py
|
maximmenshikov/antlr4
|
5ad8c150ae6b9a34a92df1f59606516fe58cb65f
|
[
"BSD-3-Clause"
] | 11,811
|
2015-01-01T02:40:39.000Z
|
2022-03-31T16:11:19.000Z
|
runtime/Python3/src/antlr4/dfa/DFASerializer.py
|
maximmenshikov/antlr4
|
5ad8c150ae6b9a34a92df1f59606516fe58cb65f
|
[
"BSD-3-Clause"
] | 2,364
|
2015-01-01T00:29:19.000Z
|
2022-03-31T21:26:34.000Z
|
runtime/Python3/src/antlr4/dfa/DFASerializer.py
|
maximmenshikov/antlr4
|
5ad8c150ae6b9a34a92df1f59606516fe58cb65f
|
[
"BSD-3-Clause"
] | 3,240
|
2015-01-05T02:34:15.000Z
|
2022-03-30T18:26:29.000Z
|
#
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
# A DFA walker that knows how to dump them to serialized strings.#/
from io import StringIO
from antlr4 import DFA
from antlr4.Utils import str_list
from antlr4.dfa.DFAState import DFAState
class DFASerializer(object):
__slots__ = ('dfa', 'literalNames', 'symbolicNames')
def __init__(self, dfa:DFA, literalNames:list=None, symbolicNames:list=None):
self.dfa = dfa
self.literalNames = literalNames
self.symbolicNames = symbolicNames
def __str__(self):
if self.dfa.s0 is None:
return None
with StringIO() as buf:
for s in self.dfa.sortedStates():
n = 0
if s.edges is not None:
n = len(s.edges)
for i in range(0, n):
t = s.edges[i]
if t is not None and t.stateNumber != 0x7FFFFFFF:
buf.write(self.getStateString(s))
label = self.getEdgeLabel(i)
buf.write("-")
buf.write(label)
buf.write("->")
buf.write(self.getStateString(t))
buf.write('\n')
output = buf.getvalue()
if len(output)==0:
return None
else:
return output
def getEdgeLabel(self, i:int):
if i==0:
return "EOF"
if self.literalNames is not None and i<=len(self.literalNames):
return self.literalNames[i-1]
elif self.symbolicNames is not None and i<=len(self.symbolicNames):
return self.symbolicNames[i-1]
else:
return str(i-1)
def getStateString(self, s:DFAState):
n = s.stateNumber
baseStateStr = ( ":" if s.isAcceptState else "") + "s" + str(n) + ( "^" if s.requiresFullContext else "")
if s.isAcceptState:
if s.predicates is not None:
return baseStateStr + "=>" + str_list(s.predicates)
else:
return baseStateStr + "=>" + str(s.prediction)
else:
return baseStateStr
class LexerDFASerializer(DFASerializer):
def __init__(self, dfa:DFA):
super().__init__(dfa, None)
def getEdgeLabel(self, i:int):
return "'" + chr(i) + "'"
| 34.027027
| 113
| 0.548451
| 293
| 2,518
| 4.638225
| 0.327645
| 0.03532
| 0.033113
| 0.02649
| 0.0883
| 0.029433
| 0.029433
| 0
| 0
| 0
| 0
| 0.013464
| 0.351072
| 2,518
| 73
| 114
| 34.493151
| 0.818237
| 0.098888
| 0
| 0.140351
| 0
| 0
| 0.019903
| 0
| 0
| 0
| 0.004423
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.070175
| 0.017544
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bca253db9d9aae8a5131355cc2fd801c42bb88f2
| 13,242
|
py
|
Python
|
sw/calibrate.py
|
microsoft/moabian
|
db95844103faedb3788abb5f37d0f37a771a9455
|
[
"MIT"
] | 13
|
2020-09-17T19:54:30.000Z
|
2022-03-01T00:25:11.000Z
|
sw/calibrate.py
|
microsoft/moabian
|
db95844103faedb3788abb5f37d0f37a771a9455
|
[
"MIT"
] | 27
|
2020-09-21T23:51:50.000Z
|
2022-03-25T19:45:16.000Z
|
sw/calibrate.py
|
microsoft/moabian
|
db95844103faedb3788abb5f37d0f37a771a9455
|
[
"MIT"
] | 13
|
2020-11-30T19:01:38.000Z
|
2021-11-10T11:28:36.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
Calibration Controller
Performs calibration for hue, center of camera position, and servo offsets
"""
import os
import cv2
import time
import json
import argparse
import datetime
import numpy as np
import logging as log
from env import MoabEnv
from typing import Tuple
from common import Vector2
from detector import hsv_detector
from controllers import pid_controller
from dataclasses import dataclass, astuple
from hardware import plate_angles_to_servo_positions
@dataclass
class CalibHue:
hue: int = 44 # Reasonable default
success: bool = False
early_quit: bool = False # If menu is pressed before the calibration is complete
def __iter__(self):
return iter(astuple(self))
@dataclass
class CalibPos:
position: Tuple[float, float] = (0.0, 0.0)
success: bool = False
early_quit: bool = False # If menu is pressed before the calibration is complete
def __iter__(self):
return iter(astuple(self))
@dataclass
class CalibServos:
servos: Tuple[float, float, float] = (0.0, 0.0, 0.0)
success: bool = False
early_quit: bool = False # If menu is pressed before the calibration is complete
def __iter__(self):
return iter(astuple(self))
def ball_close_enough(x, y, radius, max_ball_dist=0.045, min_ball_dist=0.01):
# reject balls which are too far from the center and too small
return (
np.abs(x) < max_ball_dist
and np.abs(y) < max_ball_dist
and radius > min_ball_dist
)
def calibrate_hue(camera_fn, detector_fn, is_menu_down_fn):
hue_low = 0
hue_high = 360
hue_steps = 41 # Is 41 instead of 40 so that the steps are even
img_frame, elapsed_time = camera_fn()
hue_options = list(np.linspace(hue_low, hue_high, hue_steps))
detected_hues = []
for hue in hue_options:
if is_menu_down_fn():
return CalibHue(early_quit=True)
img_frame, elapsed_time = camera_fn()
ball_detected, ((x, y), radius) = detector_fn(img_frame, hue=hue, debug=True)
# If we found a ball roughly in the center that is large enough
if ball_detected and ball_close_enough(x, y, radius):
log.info(
f"hue={hue:0.3f}, ball_detected={ball_detected}, "
f"(x, y)={x:0.3f} {y:0.3f}, radius={radius:0.3f}"
)
detected_hues.append(hue)
if len(detected_hues) > 0:
# https://en.wikipedia.org/wiki/Mean_of_circular_quantities
detected_hues_rad = np.radians(detected_hues)
sines, cosines = np.sin(detected_hues_rad), np.cos(detected_hues_rad)
sin_mean, cos_mean = np.mean(sines), np.mean(cosines)
avg_hue_rad = np.arctan2(sin_mean, cos_mean)
avg_hue = np.degrees(avg_hue_rad) % 360 # Convert back to [0, 360]
print(f"Hues are: {detected_hues}")
print(f"Hue calibrated: {avg_hue:0.2f}")
print(f"Avg hue: {avg_hue:0.2f}")
return CalibHue(hue=int(avg_hue), success=True)
else:
log.warning(f"Hue calibration failed.")
return CalibHue()
def calibrate_pos(camera_fn, detector_fn, hue, is_menu_down_fn):
for i in range(10): # Try and detect for 10 frames before giving up
if is_menu_down_fn():
return CalibPos(early_quit=True)
img_frame, elapsed_time = camera_fn()
ball_detected, ((x, y), radius) = detector_fn(img_frame, hue=hue)
# If we found a ball roughly in the center that is large enough
if ball_detected and ball_close_enough(x, y, radius):
x_offset = round(x, 3)
y_offset = round(y, 3)
log.info(f"Offset calibrated: [{x_offset:.3f}, {y_offset:.3f}]")
return CalibPos(position=(x_offset, y_offset), success=True)
log.warning(f"Offset calibration failed.")
return CalibPos()
def calibrate_servo_offsets(pid_fn, env, stationary_vel=0.005, time_limit=20):
start_time = time.time()
action = Vector2(0, 0)
# Initial high vel_history (to use the vel_hist[-100:] later)
vel_x_hist = [1.0 for _ in range(100)]
vel_y_hist = [1.0 for _ in range(100)]
# Run until the ball has stabilized or the time limit was reached
while time.time() < start_time + time_limit:
state = env.step(action)
action, info = pid_fn(state)
(x, y, vel_x, vel_y, sum_x, sum_y), ball_detected, buttons = state
# Quit on menu down
if buttons.menu_button:
return CalibServos(early_quit=True)
if ball_detected:
vel_x_hist.append(vel_x)
vel_y_hist.append(vel_y)
prev_100_x = np.mean(np.abs(vel_x_hist[-100:]))
prev_100_y = np.mean(np.abs(vel_y_hist[-100:]))
print("Prev 100: ", (prev_100_x, prev_100_y))
# If the average velocity for the last 100 timesteps is under the limit
if (prev_100_x < stationary_vel) and (prev_100_y < stationary_vel):
# Calculate offsets by calculating servo positions at the
# current stable position and subtracting the `default` zeroed
# position of the servos.
servos = np.array(plate_angles_to_servo_positions(*action))
servos_zeroed = np.array(plate_angles_to_servo_positions(0, 0))
servo_offsets = list(servos - servos_zeroed)
return CalibServos(servos=servo_offsets, success=True)
# If the plate could be stabilized in time_limit seconds, quit
log.warning(f"Servo calibration failed.")
return CalibServos()
def write_calibration(calibration_dict, calibration_file="bot.json"):
log.info("Writing calibration.")
# write out stuff
with open(calibration_file, "w+") as outfile:
log.info(f"Creating calibration file {calibration_file}")
json.dump(calibration_dict, outfile, indent=4, sort_keys=True)
def read_calibration(calibration_file="bot.json"):
log.info("Reading previous calibration.")
if os.path.isfile(calibration_file):
with open(calibration_file, "r") as f:
calibration_dict = json.load(f)
else: # Use defaults
calibration_dict = {
"ball_hue": 44,
"plate_offsets": (0.0, 0.0),
"servo_offsets": (0.0, 0.0, 0.0),
}
return calibration_dict
def wait_for_joystick_or_menu(hardware, sleep_time=1 / 30):
"""Waits for either the joystick or the menu. Returns the buttons"""
while True:
buttons = hardware.get_buttons()
if buttons.menu_button or buttons.joy_button:
return buttons
time.sleep(sleep_time)
def wait_for_menu(hardware, sleep_time=1 / 30):
while True:
menu_button, joy_button, joy_x, joy_y = hardware.get_buttons()
time.sleep(sleep_time)
if menu_button:
return
def run_calibration(env, pid_fn, calibration_file):
# Get some hidden things from env
hardware = env.hardware
camera_fn = hardware.camera
detector_fn = hardware.detector
def is_menu_down(hardware=hardware) -> bool:
return hardware.get_buttons().menu_button
# lift plate up first
hardware.set_angles(0, 0)
# Display message and wait for joystick
hardware.display(
"put ball on stand\nclick joystick",
# "Place ball in\ncenter using\nclear stand.\n\n" "Click joystick\nwhen ready."
scrolling=True,
)
buttons = wait_for_joystick_or_menu(hardware)
if buttons.menu_button: # Early quit
hardware.go_up()
return
hardware.display("Calibrating...")
hue_calib = calibrate_hue(camera_fn, detector_fn, is_menu_down)
if hue_calib.early_quit:
hardware.go_up()
return
# Calibrate position
pos_calib = calibrate_pos(camera_fn, detector_fn, hue_calib.hue, is_menu_down)
if pos_calib.early_quit:
hardware.go_up()
return
# Save calibration
calibration_dict = read_calibration(calibration_file)
calibration_dict["ball_hue"] = hue_calib.hue
calibration_dict["plate_offsets"] = pos_calib.position
x_offset, y_offset = pos_calib.position
write_calibration(calibration_dict)
# Update the environment to use the new calibration
# Warning! This mutates the state!
hardware.reset_calibration(calibration_file=calibration_file)
if pos_calib.success and hue_calib.success: # and servo_calib.success:
hardware.display(f"Ok! Ball hue={hue_calib.hue}\nClick menu...", scrolling=True)
elif not (pos_calib.success or hue_calib.success): # or servo_calib.success):
hardware.display("Calibration failed\nClick menu...", scrolling=True)
else:
hue_str = (
f"Hue calib:\nsuccessful\nBall hue = {hue_calib.hue}\n\n"
if hue_calib.success
else "Hue calib:\nfailed\n\n"
)
pos_str = (
f"Position \ncalib:\nsuccessful\nPosition = \n({100*x_offset:.1f}, {100*y_offset:.1f})cm\n\n"
if hue_calib.success
else "(X, Y) calib:\nfailed\n\n"
)
hardware.display(
"Calibration\npartially succeeded\n\n"
+ hue_str
+ pos_str
+ "Click menu\nto return...\n",
scrolling=True,
)
# When the calibration is complete, save the image of what the moab camera
# sees (useful for debugging when the hue calibration fails)
# Have a nice filename with the time and whether it succeeded or failed
time_of_day = datetime.datetime.now().strftime("%H%M%S")
filename = "/tmp/hue"
if hue_calib.success:
filename += f".{hue_calib.hue}.{time_of_day}.jpg"
else:
filename += f".fail.{time_of_day}.jpg"
img_frame, _ = camera_fn()
# Huemask keeps an internal cache. By sending a new hue (hue + 1) invalidates
# the cache. TODO: added this while searching for a state bug
detector_fn(img_frame, hue=hue_calib.hue + 1, debug=True, filename=filename)
hardware.go_up()
def run_servo_calibration(env, pid_fn, calibration_file):
# Warning: servo calib works but doesn't currently give a good calibration
raise NotImplementedError
# Get some hidden things from env
hardware = env.hardware
camera_fn = hardware.camera
detector_fn = hardware.detector
# Start the calibration with uncalibrated servos
hardware.servo_offsets = (0, 0, 0)
# lift plate up fist
hardware.set_angles(0, 0)
# Calibrate servo offsets
hardware.display(
"Calibarating\nservos\n\n"
"Place ball in\ncenter without\n stand.\n\n"
"Click joystick\nto continue.",
scrolling=True,
)
buttons = wait_for_joystick_or_menu(hardware)
if buttons.menu_button: # Early quit
hardware.go_up()
return
hardware.display("Calibrating\nservos...", scrolling=True)
servo_calib = calibrate_servo_offsets(pid_fn, env)
# Save calibration
calibration_dict = read_calibration(calibration_file)
calibration_dict["servo_offsets"] = servo_calib.servos
s1, s2, s3 = servo_calib.servos
write_calibration(calibration_dict)
# Update the environment to use the new calibration
# Warning! This mutates the state!
env.reset_calibration(calibration_file=calibration_file)
if servo_calib.success:
hardware.display(
f"servo offsets =\n({s1:.2f}, {s2:.2f}, {s3:.2f})\n\n"
"Click menu\nto return...\n",
scrolling=True,
)
print(f"servo offsets =\n({s1:.2f}, {s2:.2f}, {s3:.2f})")
else:
hardware.display(
"Calibration\nfailed\n\nClick menu\nto return...", scrolling=True
)
hardware.go_up()
def calibrate_controller(**kwargs):
run_calibration(
kwargs["env"],
kwargs["pid_fn"],
kwargs["calibration_file"],
)
def wait_for_menu_and_stream():
# Get some hidden things from env to be able to stream the calib results
env = kwargs["env"]
hardware = env.hardware
camera_fn = hardware.camera
detector_fn = hardware.detector
menu_button = False
while not menu_button:
img_frame, _ = camera_fn()
detector_fn(img_frame, debug=True) # Save to streaming
menu, joy, _, _ = hardware.get_buttons()
if menu or joy:
break
env.hardware.go_up()
return wait_for_menu_and_stream
def main(calibration_file, frequency=30, debug=True):
pid_fn = pid_controller(frequency=frequency)
with MoabEnv(frequency=frequency, debug=debug) as env:
env.step((0, 0))
time.sleep(0.2)
env.hardware.enable_servos()
time.sleep(0.2)
env.hardware.set_servos(133, 133, 133)
run_calibration(env, pid_fn, calibration_file)
env.hardware.disable_servos()
if __name__ == "__main__": # Parse command line args
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action="store_true")
parser.add_argument("-f", "--file", default="bot.json", type=str)
args, _ = parser.parse_known_args()
main(args.file, debug=args.debug)
| 32.696296
| 105
| 0.655264
| 1,817
| 13,242
| 4.567969
| 0.201981
| 0.005542
| 0.004699
| 0.003855
| 0.370361
| 0.325422
| 0.266145
| 0.203976
| 0.203976
| 0.194337
| 0
| 0.018233
| 0.246186
| 13,242
| 404
| 106
| 32.777228
| 0.813264
| 0.175276
| 0
| 0.283088
| 0
| 0.011029
| 0.118899
| 0.02855
| 0
| 0
| 0
| 0.002475
| 0
| 1
| 0.0625
| false
| 0
| 0.055147
| 0.018382
| 0.242647
| 0.018382
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bca450dae1b4675ac1d585a61880a16b6a3d235e
| 3,739
|
py
|
Python
|
marketing/tests_celery_tasks.py
|
renzyndrome/lits-crm
|
32daea8c76f91780b8cc8c3f107d04df606c0ec8
|
[
"MIT"
] | 1
|
2021-03-01T12:07:10.000Z
|
2021-03-01T12:07:10.000Z
|
marketing/tests_celery_tasks.py
|
renzyndrome/lits-crm
|
32daea8c76f91780b8cc8c3f107d04df606c0ec8
|
[
"MIT"
] | null | null | null |
marketing/tests_celery_tasks.py
|
renzyndrome/lits-crm
|
32daea8c76f91780b8cc8c3f107d04df606c0ec8
|
[
"MIT"
] | 1
|
2021-12-09T09:38:50.000Z
|
2021-12-09T09:38:50.000Z
|
from datetime import datetime, timedelta
from django.test import TestCase
from django.test.utils import override_settings
from marketing.tasks import (
delete_multiple_contacts_tasks,
list_all_bounces_unsubscribes,
run_all_campaigns,
run_campaign,
send_campaign_email_to_admin_contact,
send_scheduled_campaigns,
upload_csv_file,
)
from marketing.tests import TestMarketingModel
class TestCeleryTasks(TestMarketingModel, TestCase):
@override_settings(
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_ALWAYS_EAGER=True,
BROKER_BACKEND="memory",
)
def test_celery_tasks(self):
task = run_campaign.apply(
(self.campaign.id,),
)
self.assertEqual("SUCCESS", task.state)
self.campaign.reply_to_email = None
self.campaign.save()
task = run_campaign.apply(
(self.campaign.id,),
)
self.assertEqual("SUCCESS", task.state)
self.campaign.schedule_date_time = datetime.now()
self.campaign.save()
task = run_all_campaigns.apply()
self.assertEqual("SUCCESS", task.state)
task = list_all_bounces_unsubscribes.apply()
self.assertEqual("SUCCESS", task.state)
task = send_scheduled_campaigns.apply()
self.assertEqual("SUCCESS", task.state)
task = delete_multiple_contacts_tasks.apply(
(self.contact_list.id,),
)
self.assertEqual("SUCCESS", task.state)
task = send_campaign_email_to_admin_contact.apply(
(self.campaign.id,),
)
self.assertEqual("SUCCESS", task.state)
valid_rows = [
{
"company name": "company_name_1",
"email": "user1@email.com",
"first name": "first_name",
"last name": "last_name",
"city": "Hyderabad",
"state": "Telangana",
},
{
"company name": "company_name_2",
"email": "user2@email.com",
"first name": "first_name",
"last name": "last_name",
"city": "Hyderabad",
"state": "Telangana",
},
{
"company name": "company_name_3",
"email": "user3@email.com",
"first name": "first_name",
"last name": "last_name",
"city": "Hyderabad",
"state": "Telangana",
},
{
"company name": "company_name_4",
"email": "user4@email.com",
"first name": "first_name",
"last name": "last_name",
"city": "Hyderabad",
"state": "Telangana",
},
]
invalid_rows = [
{
"company name": "company_name_1",
"email": "useremail.com",
"first name": "first_name",
"last name": "last_name",
"city": "Hyderabad",
"state": "Telangana",
},
{
"company name": "company_name_2",
"email": "user2@email",
"first name": "first_name",
"last name": "last_name",
"city": "Hyderabad",
"state": "Telangana",
},
]
task = upload_csv_file.apply(
(
valid_rows,
invalid_rows,
self.user.id,
[
self.contact_list.id,
],
self.company.id,
),
)
self.assertEqual("SUCCESS", task.state)
| 29.912
| 58
| 0.502006
| 331
| 3,739
| 5.425982
| 0.241692
| 0.073497
| 0.080178
| 0.115813
| 0.630846
| 0.589087
| 0.537305
| 0.455457
| 0.400891
| 0.373051
| 0
| 0.00477
| 0.383258
| 3,739
| 124
| 59
| 30.153226
| 0.774068
| 0
| 0
| 0.390909
| 0
| 0
| 0.1931
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 1
| 0.009091
| false
| 0
| 0.045455
| 0
| 0.063636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bca568d5e71e781c0b945807208117a83879f72f
| 263
|
py
|
Python
|
doc's/3-labels_and_titles.py
|
andreluispy/py2html
|
227f3225632b467c95131b841d6ffab4c5202e44
|
[
"MIT"
] | null | null | null |
doc's/3-labels_and_titles.py
|
andreluispy/py2html
|
227f3225632b467c95131b841d6ffab4c5202e44
|
[
"MIT"
] | null | null | null |
doc's/3-labels_and_titles.py
|
andreluispy/py2html
|
227f3225632b467c95131b841d6ffab4c5202e44
|
[
"MIT"
] | null | null | null |
from py2html.main import *
page = web()
page.create()
# Header Parameters
# text = header text
# n = title level
page.header(text='My Site', n=1)
# Label Parameters
# text = label text
# color = label color
page.label(text='', color='')
page.compile()
| 16.4375
| 32
| 0.657795
| 37
| 263
| 4.675676
| 0.513514
| 0.16185
| 0.16185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009434
| 0.193916
| 263
| 16
| 33
| 16.4375
| 0.806604
| 0.437262
| 0
| 0
| 0
| 0
| 0.049296
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bca56f1f07a7efd89750413292d60e6212055e4a
| 1,022
|
py
|
Python
|
JorGpi/tests/test_pickup.py
|
adujovic/JorG
|
15062984e837a938819e548c83f6f5414fa47103
|
[
"BSD-3-Clause"
] | 1
|
2020-07-22T11:05:03.000Z
|
2020-07-22T11:05:03.000Z
|
JorGpi/tests/test_pickup.py
|
adujovic/JorG
|
15062984e837a938819e548c83f6f5414fa47103
|
[
"BSD-3-Clause"
] | 2
|
2019-06-07T11:53:48.000Z
|
2019-06-24T08:20:25.000Z
|
JorGpi/tests/test_pickup.py
|
adujovic/JorG
|
15062984e837a938819e548c83f6f5414fa47103
|
[
"BSD-3-Clause"
] | 3
|
2019-07-01T12:38:06.000Z
|
2022-02-01T21:38:12.000Z
|
import unittest
from JorGpi.pickup.pickup import SmartPickUp,Reference,CommandLineOptions
class TestPickupIron(unittest.TestCase):
@staticmethod
def options(*args):
return CommandLineOptions(*args)
def test_iron_001(self):
_input = "test -R _VASP/Fe/noFlip -D _VASP/Fe/flip00000 -E Fe -J1 -U mRy".split(" ")
options = TestPickupIron.options(*_input)
elements = ''.join(options('elements'))
self.assertEqual(elements,'Fe$')
ref = Reference(options('reference')+"/POSCAR")
self.assertEqual(ref(),0)
self.assertEqual(options('number_of_interactions'),1)
pickerUpper = SmartPickUp(options('number_of_interactions'),elements)
pickerUpper.read(options('reference'),*options('directories'),reference=ref())
self.assertEqual(options('units'),'mRy')
_J_ij = pickerUpper.solve(units=options('units')).flatten()
self.assertEqual(_J_ij[0],1.1861042008301703)
self.assertEqual(_J_ij[1],4.157645364906014)
| 37.851852
| 92
| 0.682975
| 112
| 1,022
| 6.089286
| 0.473214
| 0.131965
| 0.064516
| 0.079179
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054632
| 0.176125
| 1,022
| 26
| 93
| 39.307692
| 0.755344
| 0
| 0
| 0
| 0
| 0.05
| 0.163405
| 0.043053
| 0
| 0
| 0
| 0
| 0.3
| 1
| 0.1
| false
| 0
| 0.1
| 0.05
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bca98a1ce3fff11966f586aae11d75f7d4194f73
| 859
|
py
|
Python
|
bindings/python/tests/cdef_types.py
|
mewbak/dragonffi
|
2a205dbe4dd980d5dd53026c871514795573a7fb
|
[
"Apache-2.0"
] | null | null | null |
bindings/python/tests/cdef_types.py
|
mewbak/dragonffi
|
2a205dbe4dd980d5dd53026c871514795573a7fb
|
[
"Apache-2.0"
] | null | null | null |
bindings/python/tests/cdef_types.py
|
mewbak/dragonffi
|
2a205dbe4dd980d5dd53026c871514795573a7fb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Adrien Guinet <adrien@guinet.me>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# RUN: "%python" "%s"
#
import pydffi
import sys
F = pydffi.FFI()
CU = F.cdef('''
#include <stdint.h>
typedef int32_t MyInt;
typedef struct {
int a;
int b;
} A;
''')
assert(CU.types.MyInt == F.Int32Ty)
assert(isinstance(CU.types.A, pydffi.StructType))
| 26.030303
| 74
| 0.726426
| 133
| 859
| 4.684211
| 0.669173
| 0.096308
| 0.041734
| 0.051364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01683
| 0.169965
| 859
| 32
| 75
| 26.84375
| 0.856943
| 0.689173
| 0
| 0
| 0
| 0
| 0.334661
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcaa742ec3f2633707689915b345db35e3f84a87
| 25,875
|
py
|
Python
|
src/general_harvester.py
|
Badger-Finance/python-keepers
|
b5b2b0b083a237dceecd161d81754512959822b1
|
[
"MIT"
] | null | null | null |
src/general_harvester.py
|
Badger-Finance/python-keepers
|
b5b2b0b083a237dceecd161d81754512959822b1
|
[
"MIT"
] | 22
|
2022-03-08T19:30:45.000Z
|
2022-03-28T21:14:15.000Z
|
src/general_harvester.py
|
Badger-Finance/python-keepers
|
b5b2b0b083a237dceecd161d81754512959822b1
|
[
"MIT"
] | null | null | null |
import logging
import os
from decimal import Decimal
from time import sleep
import requests
from hexbytes import HexBytes
from web3 import Web3
from web3 import contract
from web3.contract import Contract
from config.constants import BASE_CURRENCIES
from config.constants import GAS_LIMITS
from config.constants import MULTICHAIN_CONFIG
from config.enums import Network
from src.harvester import IHarvester
from src.misc_utils import hours
from src.misc_utils import seconds_to_blocks
from src.tx_utils import get_effective_gas_price
from src.tx_utils import get_gas_price_of_tx
from src.tx_utils import get_priority_fee
from src.web3_utils import confirm_transaction
from src.utils import get_abi
from src.discord_utils import get_hash_from_failed_tx_error
from src.web3_utils import get_last_harvest_times
from src.token_utils import get_token_price
from src.discord_utils import send_error_to_discord
from src.discord_utils import send_success_to_discord
logging.basicConfig(level=logging.INFO)
MAX_TIME_BETWEEN_HARVESTS = hours(120)
HARVEST_THRESHOLD = 0.0005 # min ratio of want to total vault AUM required to harvest
NUM_FLASHBOTS_BUNDLES = 6
class GeneralHarvester(IHarvester):
def __init__(
self,
chain: Network = Network.Ethereum,
web3: Web3 = None,
keeper_acl: str = os.getenv("KEEPER_ACL"),
keeper_address: str = os.getenv("KEEPER_ADDRESS"),
keeper_key: str = os.getenv("KEEPER_KEY"),
base_oracle_address: str = os.getenv("ETH_USD_CHAINLINK"),
use_flashbots: bool = False,
discord_url: str = None,
):
self.logger = logging.getLogger(__name__)
self.chain = chain
self.web3 = web3
self.keeper_key = keeper_key
self.keeper_address = keeper_address
self.keeper_acl: Contract = self.web3.eth.contract(
address=self.web3.toChecksumAddress(keeper_acl),
abi=get_abi(self.chain, "keeper_acl"),
)
self.base_usd_oracle: Contract = self.web3.eth.contract(
address=self.web3.toChecksumAddress(base_oracle_address),
abi=get_abi(self.chain, "oracle"),
)
# Times of last harvest
if self.chain in [Network.Ethereum, Network.Fantom]:
self.last_harvest_times = get_last_harvest_times(
self.web3,
self.keeper_acl,
start_block=self.web3.eth.block_number
- seconds_to_blocks(MAX_TIME_BETWEEN_HARVESTS),
chain=self.chain,
)
else:
# Don't care about poly/arbitrum
self.last_harvest_times = {}
self.use_flashbots = use_flashbots
self.discord_url = discord_url
def is_time_to_harvest(
self,
strategy: contract.Contract,
harvest_interval_threshold: int = MAX_TIME_BETWEEN_HARVESTS,
) -> bool:
"""Calculates the time between harvests for the supplied strategy and returns true if
it has been longer than the supplied harvest_interval_threshold which is measured in seconds
Args:
strategy (contract): Vault strategy web3 contract object
harvest_interval_threshold (int, optional):
Amount of time in seconds that is acceptable to not have harvested within.
Defaults to MAX_TIME_BETWEEN_HARVESTS.
Returns:
bool: True if time since last harvest is > harvest_interval_threshold, else False
"""
# Only care about harvest gas costs on eth
if self.chain not in [Network.Ethereum, Network.Fantom]:
return True
try:
last_harvest = self.last_harvest_times[strategy.address]
current_time = self.web3.eth.get_block("latest")["timestamp"]
self.logger.info(
f"Time since last harvest: {(current_time - last_harvest) / 3600}"
)
return current_time - last_harvest > harvest_interval_threshold
except KeyError:
return True
def harvest(
self,
strategy: contract.Contract,
):
"""Orchestration function that harvests outstanding rewards.
Args:
strategy (contract)
Raises:
ValueError: If the keeper isn't whitelisted, throw an error and alert user.
"""
strategy_name = strategy.functions.getName().call()
# TODO: update for ACL
if not self.__is_keeper_whitelisted("harvest"):
raise ValueError("Keeper ACL is not whitelisted for calling harvest")
want_address = strategy.functions.want().call()
want = self.web3.eth.contract(
address=want_address,
abi=get_abi(self.chain, "erc20"),
)
vault_balance = want.functions.balanceOf(strategy.address).call()
self.logger.info(f"vault balance: {vault_balance}")
want_to_harvest = (
self.estimate_harvest_amount(strategy)
/ 10 ** want.functions.decimals().call()
)
self.logger.info(f"estimated want change: {want_to_harvest}")
# TODO: figure out how to handle profit estimation
# current_price_eth = self.get_current_rewards_price()
# self.logger.info(f"current rewards price per token (ETH): {current_price_eth}")
gas_fee = self.estimate_gas_fee(strategy.address)
self.logger.info(f"estimated gas cost: {gas_fee}")
# for now we'll just harvest every hour
should_harvest = self.is_profitable()
self.logger.info(f"Should we harvest: {should_harvest}")
if should_harvest:
self.__process_harvest(
strategy=strategy,
strategy_name=strategy_name,
)
def harvest_no_return(
self,
strategy: contract,
):
strategy_name = strategy.functions.getName().call()
# TODO: update for ACL
if not self.__is_keeper_whitelisted("harvestNoReturn"):
raise ValueError(
"Keeper ACL is not whitelisted for calling harvestNoReturn"
)
want_address = strategy.functions.want().call()
want = self.web3.eth.contract(
address=want_address,
abi=get_abi(self.chain, "erc20"),
)
vault_balance = want.functions.balanceOf(strategy.address).call()
self.logger.info(f"vault balance: {vault_balance}")
# TODO: figure out how to handle profit estimation
# current_price_eth = self.get_current_rewards_price()
# self.logger.info(f"current rewards price per token (ETH): {current_price_eth}")
gas_fee = self.estimate_gas_fee(strategy.address, returns=False)
self.logger.info(f"estimated gas cost: {gas_fee}")
# for now we'll just harvest every hour
should_harvest = self.is_profitable()
self.logger.info(f"Should we harvest: {should_harvest}")
if should_harvest:
self.__process_harvest(
strategy=strategy,
strategy_name=strategy_name,
)
def harvest_rewards_manager(
self,
strategy: contract,
):
strategy_name = strategy.functions.getName().call()
self.keeper_acl = self.web3.eth.contract(
address=self.web3.toChecksumAddress(
MULTICHAIN_CONFIG[self.chain]["rewards_manager"]
),
abi=get_abi(self.chain, "rewards_manager"),
)
if not self.__is_keeper_whitelisted("rewards_manager"):
raise ValueError(f"Keeper is not whitelisted for {strategy_name}")
want_address = strategy.functions.want().call()
want = self.web3.eth.contract(
address=want_address,
abi=get_abi(self.chain, "erc20"),
)
vault_balance = want.functions.balanceOf(strategy.address).call()
self.logger.info(f"vault balance: {vault_balance}")
gas_fee = self.estimate_gas_fee(strategy.address)
self.logger.info(f"estimated gas cost: {gas_fee}")
self.__process_harvest(
strategy=strategy,
strategy_name=strategy_name,
)
def harvest_mta(
self,
voter_proxy: contract,
):
# TODO: update for ACL
if not self.__is_keeper_whitelisted("harvestMta"):
raise ValueError("Keeper ACL is not whitelisted for calling harvestMta")
gas_fee = self.estimate_gas_fee(voter_proxy.address, function="harvestMta")
self.logger.info(f"estimated gas cost: {gas_fee}")
should_harvest_mta = self.is_profitable()
self.logger.info(f"Should we call harvestMta: {should_harvest_mta}")
if should_harvest_mta:
self.__process_harvest_mta(voter_proxy)
def tend(self, strategy: contract):
strategy_name = strategy.functions.getName().call()
# TODO: update for ACL
if not self.__is_keeper_whitelisted("tend"):
raise ValueError("Keeper ACL is not whitelisted for calling tend")
# TODO: figure out how to handle profit estimation
# current_price_eth = self.get_current_rewards_price()
# self.logger.info(f"current rewards price per token (ETH): {current_price_eth}")
gas_fee = self.estimate_gas_fee(strategy.address, function="tend")
self.logger.info(f"estimated gas cost: {gas_fee}")
self.__process_tend(
strategy=strategy,
strategy_name=strategy_name,
)
def tend_then_harvest(self, strategy: contract):
self.tend(strategy)
sleep(60)
self.harvest(strategy)
def estimate_harvest_amount(self, strategy: contract) -> Decimal:
want = self.web3.eth.contract(
address=strategy.functions.want().call(),
abi=get_abi(self.chain, "erc20"),
)
want_gained = self.keeper_acl.functions.harvest(strategy.address).call(
{"from": self.keeper_address}
)
# call badger api to get prices
currency = BASE_CURRENCIES[self.chain]
if self.chain == Network.Fantom:
price_per_want = get_token_price(
want.address, currency, self.chain, use_staging=True
)
else:
price_per_want = get_token_price(want.address, currency, self.chain)
self.logger.info(f"price per want: {price_per_want} {currency}")
self.logger.info(f"want gained: {want_gained}")
if type(want_gained) is list:
want_gained = 0
return price_per_want * want_gained
def is_profitable(self) -> bool:
# TODO: Implement this
# harvest if ideal want change is > 0.05% of total vault assets
# should_harvest = want_to_harvest / vault_balance >= HARVEST_THRESHOLD
return True
def __is_keeper_whitelisted(self, function: str) -> bool:
"""Checks if the bot we're using is whitelisted for the strategy.
Returns:
bool: True if our bot is whitelisted to make function calls, False otherwise.
"""
if function in ["harvest", "harvestMta"]:
key = self.keeper_acl.functions.HARVESTER_ROLE().call()
elif function == "tend":
key = self.keeper_acl.functions.TENDER_ROLE().call()
elif function == "rewards_manager":
key = self.keeper_acl.functions.KEEPER_ROLE().call()
return self.keeper_acl.functions.hasRole(key, self.keeper_address).call()
def __process_tend(
self,
strategy: contract = None,
strategy_name: str = None,
):
try:
tx_hash = self.__send_tend_tx(strategy)
succeeded, _ = confirm_transaction(self.web3, tx_hash)
if succeeded:
gas_price_of_tx = get_gas_price_of_tx(
self.web3, self.base_usd_oracle, tx_hash, self.chain
)
self.logger.info(f"got gas price of tx: {gas_price_of_tx}")
send_success_to_discord(
tx_type=f"Tend {strategy_name}",
tx_hash=tx_hash,
gas_cost=gas_price_of_tx,
chain=self.chain,
url=self.discord_url,
)
elif tx_hash != HexBytes(0):
send_success_to_discord(
tx_type=f"Tend {strategy_name}",
tx_hash=tx_hash,
chain=self.chain,
url=self.discord_url,
)
except Exception as e:
self.logger.error(f"Error processing tend tx: {e}")
send_error_to_discord(
strategy_name,
"Tend",
error=e,
chain=self.chain,
keeper_address=self.keeper_address,
)
def __process_harvest(
self,
strategy: contract = None,
strategy_name: str = None,
harvested: Decimal = None,
returns: bool = True,
):
"""Private function to create, broadcast, confirm tx on eth and then send
transaction to Discord for monitoring
Args:
strategy (contract, optional): Defaults to None.
strategy_name (str, optional): Defaults to None.
harvested (Decimal, optional): Amount of Sushi harvested. Defaults to None.
"""
try:
tx_hash, max_target_block = self.__send_harvest_tx(
strategy, returns=returns
)
succeeded, msg = confirm_transaction(
self.web3, tx_hash, max_block=max_target_block
)
if succeeded:
# If successful, update last harvest harvest
# time to make sure we don't double harvest
self.update_last_harvest_time(strategy.address)
gas_price_of_tx = get_gas_price_of_tx(
self.web3, self.base_usd_oracle, tx_hash, self.chain
)
self.logger.info(f"got gas price of tx: {gas_price_of_tx}")
send_success_to_discord(
tx_type=f"Harvest {strategy_name}",
tx_hash=tx_hash,
gas_cost=gas_price_of_tx,
chain=self.chain,
url=self.discord_url,
)
elif tx_hash != HexBytes(0):
if not self.use_flashbots:
# And if pending
self.update_last_harvest_time(strategy.address)
send_success_to_discord(
tx_type=f"Harvest {strategy_name}",
tx_hash=tx_hash,
chain=self.chain,
url=self.discord_url,
)
else:
send_error_to_discord(
strategy_name,
"Harvest",
tx_hash=tx_hash,
message=msg,
chain=self.chain,
keeper_address=self.keeper_address,
)
except Exception as e:
self.logger.error(f"Error processing harvest tx: {e}")
send_error_to_discord(
strategy_name,
"Harvest",
error=e,
chain=self.chain,
keeper_address=self.keeper_address,
)
def __process_harvest_mta(
self,
voter_proxy: contract,
):
"""Private function to create, broadcast, confirm tx on eth and then send
transaction to Discord for monitoring
Args:
voter_proxy (contract): Mstable voter proxy contract
"""
try:
tx_hash = self.__send_harvest_mta_tx(voter_proxy)
succeeded, _ = confirm_transaction(self.web3, tx_hash)
if succeeded:
# If successful, update last harvest harvest time
self.update_last_harvest_time(voter_proxy.address)
gas_price_of_tx = get_gas_price_of_tx(
self.web3, self.base_usd_oracle, tx_hash, self.chain
)
self.logger.info(f"got gas price of tx: {gas_price_of_tx}")
send_success_to_discord(
tx_type="Harvest MTA",
tx_hash=tx_hash,
gas_cost=gas_price_of_tx,
chain=self.chain,
url=self.discord_url,
)
elif tx_hash != HexBytes(0):
send_success_to_discord(
tx_type="Harvest MTA",
tx_hash=tx_hash,
chain=self.chain,
url=self.discord_url,
)
except Exception as e:
self.logger.error(f"Error processing harvestMta tx: {e}")
send_error_to_discord(
"",
"Harvest MTA",
error=e,
chain=self.chain,
keeper_address=self.keeper_address,
)
def __send_harvest_tx(self, strategy: contract, returns: bool = True) -> HexBytes:
"""Sends transaction to ETH node for confirmation.
Args:
strategy (contract)
Raises:
Exception: If we have an issue sending transaction (unable to communicate with
node, etc.) we log the error and return a tx_hash of 0x00.
Returns:
HexBytes: Transaction hash for transaction that was sent.
"""
max_target_block = None
tx_hash = HexBytes(0)
try:
tx = self.__build_transaction(strategy.address, returns=returns)
signed_tx = self.web3.eth.account.sign_transaction(
tx, private_key=self.keeper_key
)
tx_hash = signed_tx.hash
if not self.use_flashbots:
self.web3.eth.send_raw_transaction(signed_tx.rawTransaction)
else:
bundle = [
{"signed_transaction": signed_tx.rawTransaction},
]
block_number = self.web3.eth.block_number
for i in range(1, NUM_FLASHBOTS_BUNDLES + 1):
self.web3.flashbots.send_bundle(
bundle, target_block_number=block_number + i
)
max_target_block = block_number + NUM_FLASHBOTS_BUNDLES
self.logger.info(f"Bundle broadcasted at {max_target_block}")
except ValueError as e:
self.logger.error(f"Error in sending harvest tx: {e}")
tx_hash = get_hash_from_failed_tx_error(
e, "Harvest", chain=self.chain, keeper_address=self.keeper_address
)
finally:
return tx_hash, max_target_block
def __send_tend_tx(self, strategy: contract) -> HexBytes:
"""Sends transaction to ETH node for confirmation.
Args:
strategy (contract)
Raises:
Exception: If we have an issue sending transaction (unable to communicate with
node, etc.) we log the error and return a tx_hash of 0x00.
Returns:
HexBytes: Transaction hash for transaction that was sent.
"""
tx_hash = HexBytes(0)
try:
tx = self.__build_transaction(strategy.address, function="tend")
signed_tx = self.web3.eth.account.sign_transaction(
tx, private_key=self.keeper_key
)
tx_hash = signed_tx.hash
self.web3.eth.send_raw_transaction(signed_tx.rawTransaction)
except ValueError as e:
self.logger.error(f"Error in sending tend tx: {e}")
tx_hash = get_hash_from_failed_tx_error(
e, "Tend", chain=self.chain, keeper_address=self.keeper_address
)
finally:
return tx_hash
def __send_harvest_mta_tx(self, voter_proxy: contract) -> HexBytes:
"""Sends transaction to ETH node for confirmation.
Args:
voter_proxy (contract)
Raises:
Exception: If we have an issue sending transaction (unable to communicate with
node, etc.) we log the error and return a tx_hash of 0x00.
Returns:
HexBytes: Transaction hash for transaction that was sent.
"""
tx_hash = HexBytes(0)
try:
tx = self.__build_transaction(voter_proxy.address, function="harvestMta")
signed_tx = self.web3.eth.account.sign_transaction(
tx, private_key=self.keeper_key
)
tx_hash = signed_tx.hash
self.web3.eth.send_raw_transaction(signed_tx.rawTransaction)
except ValueError as e:
self.logger.error(f"Error in sending harvestMta tx: {e}")
tx_hash = get_hash_from_failed_tx_error(
e, "Harvest MTA", chain=self.chain, keeper_address=self.keeper_address
)
finally:
return tx_hash
def __build_transaction(
self, address: str, returns: bool = True, function: str = "harvest"
) -> dict:
"""Builds transaction depending on which chain we're harvesting. EIP-1559
requires different handling for ETH txs than the other EVM chains.
Args:
contract (contract): contract to use to build harvest tx
Returns:
dict: tx dictionary
"""
options = {
"nonce": self.web3.eth.get_transaction_count(
self.keeper_address, "pending"
),
"from": self.keeper_address,
"gas": GAS_LIMITS[self.chain],
}
if self.chain == Network.Ethereum:
options["maxPriorityFeePerGas"] = get_priority_fee(self.web3)
options["maxFeePerGas"] = self.__get_effective_gas_price()
else:
options["gasPrice"] = self.__get_effective_gas_price()
if function == "harvest":
self.logger.info(
f"estimated gas fee: {self.__estimate_harvest_gas(address, returns)}"
)
return self.__build_harvest_transaction(address, returns, options)
elif function == "tend":
self.logger.info(f"estimated gas fee: {self.__estimate_tend_gas(address)}")
return self.__build_tend_transaction(address, options)
elif function == "harvestMta":
self.logger.info(
f"estimated gas fee: {self.__estimate_harvest_mta_gas(address)}"
)
return self.__build_harvest_mta_transaction(address, options)
def __build_harvest_transaction(
self, strategy_address: str, returns: bool, options: dict
) -> dict:
if returns:
return self.keeper_acl.functions.harvest(strategy_address).buildTransaction(
options
)
else:
return self.keeper_acl.functions.harvestNoReturn(
strategy_address
).buildTransaction(options)
def __build_tend_transaction(self, strategy_address: str, options: dict) -> dict:
return self.keeper_acl.functions.tend(strategy_address).buildTransaction(
options
)
def __build_harvest_mta_transaction(
self, voter_proxy_address: str, options: dict
) -> dict:
return self.keeper_acl.functions.harvestMta(
voter_proxy_address
).buildTransaction(options)
def estimate_gas_fee(
self, address: str, returns: bool = True, function: str = "harvest"
) -> Decimal:
current_gas_price = self.__get_effective_gas_price()
if function == "harvest":
estimated_gas = self.__estimate_harvest_gas(address, returns)
elif function == "tend":
estimated_gas = self.__estimate_tend_gas(address)
elif function == "harvestMta":
estimated_gas = self.__estimate_harvest_mta_gas(address)
return Decimal(current_gas_price * estimated_gas)
def __estimate_harvest_gas(self, strategy_address: str, returns: bool) -> Decimal:
if returns:
estimated_gas_to_harvest = self.keeper_acl.functions.harvest(
strategy_address
).estimateGas({"from": self.keeper_address})
else:
estimated_gas_to_harvest = self.keeper_acl.functions.harvestNoReturn(
strategy_address
).estimateGas({"from": self.keeper_address})
return Decimal(estimated_gas_to_harvest)
def __estimate_tend_gas(self, strategy_address: str) -> Decimal:
return Decimal(
self.keeper_acl.functions.tend(strategy_address).estimateGas(
{"from": self.keeper_address}
)
)
def __estimate_harvest_mta_gas(self, voter_proxy_address: str) -> Decimal:
return Decimal(
self.keeper_acl.functions.harvestMta(voter_proxy_address).estimateGas(
{"from": self.keeper_address}
)
)
def __get_effective_gas_price(self) -> int:
if self.chain == Network.Polygon:
response = requests.get("https://gasstation-mainnet.matic.network").json()
gas_price = self.web3.toWei(int(response.get("fast") * 1.1), "gwei")
elif self.chain in [Network.Arbitrum, Network.Fantom]:
gas_price = int(1.1 * self.web3.eth.gas_price)
# Estimated gas price + buffer
elif self.chain == Network.Ethereum:
# EIP-1559
gas_price = get_effective_gas_price(self.web3)
return gas_price
def update_last_harvest_time(self, strategy_address: str):
self.last_harvest_times[strategy_address] = self.web3.eth.get_block("latest")[
"timestamp"
]
| 37.939883
| 100
| 0.602705
| 2,938
| 25,875
| 5.048673
| 0.105174
| 0.017798
| 0.023596
| 0.025281
| 0.627857
| 0.565159
| 0.517697
| 0.473943
| 0.43828
| 0.374907
| 0
| 0.005763
| 0.315981
| 25,875
| 681
| 101
| 37.995595
| 0.832307
| 0.143498
| 0
| 0.46
| 0
| 0
| 0.088887
| 0.005272
| 0
| 0
| 0
| 0.001468
| 0
| 1
| 0.054
| false
| 0
| 0.052
| 0.01
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcaab7186ea62c16403a777679f43c5651b2eeea
| 1,170
|
py
|
Python
|
clickhouse_plantuml/column.py
|
yonesko/clickhouse-plantuml
|
6db26788fe86854967f627f28fd8a403ccbf7ffb
|
[
"Apache-2.0"
] | null | null | null |
clickhouse_plantuml/column.py
|
yonesko/clickhouse-plantuml
|
6db26788fe86854967f627f28fd8a403ccbf7ffb
|
[
"Apache-2.0"
] | null | null | null |
clickhouse_plantuml/column.py
|
yonesko/clickhouse-plantuml
|
6db26788fe86854967f627f28fd8a403ccbf7ffb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# License: Apache-2.0
# Copyright (C) 2020 Mikhail f. Shiryaev
class Column(object):
"""
Represents ClickHouse column
"""
def __init__(
self,
database: str,
table: str,
name: str,
type: str,
default_kind: str,
default_expression: str,
comment: str,
compression_codec: str,
is_in_partition_key: bool,
is_in_sorting_key: bool,
is_in_primary_key: bool,
is_in_sampling_key: bool,
):
self.database = database
self.table = table
self.name = name
self.type = type
self.default_kind = default_kind
self.default_expression = default_expression
self.comment = comment
self.compression_codec = compression_codec
self.is_in_partition_key = is_in_partition_key
self.is_in_sorting_key = is_in_sorting_key
self.is_in_primary_key = is_in_primary_key
self.is_in_sampling_key = is_in_sampling_key
@property
def db_table(self):
return "{}.{}".format(self.database, self.table)
def __str__(self):
return self.name
| 25.434783
| 56
| 0.622222
| 145
| 1,170
| 4.648276
| 0.310345
| 0.071217
| 0.047478
| 0.071217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00729
| 0.296581
| 1,170
| 45
| 57
| 26
| 0.811665
| 0.092308
| 0
| 0
| 0
| 0
| 0.004789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0.060606
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcab6a237bb88828d13a4bacbf608684ac108e0d
| 468
|
py
|
Python
|
CF#691/python/A.py
|
chaitanya1243/CP
|
a0e5e34daf6f7c22c9a91212b65338ef0c46d163
|
[
"MIT"
] | null | null | null |
CF#691/python/A.py
|
chaitanya1243/CP
|
a0e5e34daf6f7c22c9a91212b65338ef0c46d163
|
[
"MIT"
] | null | null | null |
CF#691/python/A.py
|
chaitanya1243/CP
|
a0e5e34daf6f7c22c9a91212b65338ef0c46d163
|
[
"MIT"
] | null | null | null |
def solve(n, red , blue):
rcount = bcount = 0
for i in range(n):
if int(red[i]) > int(blue[i]):
rcount = rcount +1
elif int(red[i]) < int(blue[i]):
bcount = bcount + 1
print( 'RED' if rcount>bcount else ('BLUE' if bcount>rcount else 'EQUAL'))
if __name__ == "__main__":
T = int(input())
for t in range(T):
n = int(input())
red = input()
blue = input()
solve(n, red, blue)
| 24.631579
| 79
| 0.5
| 67
| 468
| 3.373134
| 0.343284
| 0.053097
| 0.079646
| 0.115044
| 0.132743
| 0.132743
| 0
| 0
| 0
| 0
| 0
| 0.00974
| 0.34188
| 468
| 19
| 80
| 24.631579
| 0.724026
| 0
| 0
| 0
| 0
| 0
| 0.042918
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0
| 0
| 0.066667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcac6e3cd42df76570409abe5d700496d0e0e054
| 1,688
|
py
|
Python
|
example/hydrogen.py
|
NLESC-JCER/pyCHAMP
|
97523237b3521a426d664b6e2972257045ff8f5e
|
[
"Apache-2.0"
] | 4
|
2019-05-15T13:09:23.000Z
|
2021-03-28T09:10:11.000Z
|
example/hydrogen.py
|
NLESC-JCER/pyCHAMP
|
97523237b3521a426d664b6e2972257045ff8f5e
|
[
"Apache-2.0"
] | 14
|
2019-04-23T15:05:07.000Z
|
2019-08-14T13:21:07.000Z
|
example/hydrogen.py
|
NLESC-JCER/pyCHAMP
|
97523237b3521a426d664b6e2972257045ff8f5e
|
[
"Apache-2.0"
] | 1
|
2019-09-30T22:55:53.000Z
|
2019-09-30T22:55:53.000Z
|
import autograd.numpy as np
from pyCHAMP.wavefunction.wf_base import WF
from pyCHAMP.optimizer.minimize import Minimize
from pyCHAMP.sampler.metropolis import Metropolis
from pyCHAMP.sampler.hamiltonian import Hamiltonian
from pyCHAMP.solver.vmc import VMC
class Hydrogen(WF):
def __init__(self, nelec, ndim):
WF.__init__(self, nelec, ndim)
def values(self, parameters, pos):
""" Compute the value of the wave function.
Args:
parameters : parameters of th wf
x: position of the electron
Returns: values of psi
"""
beta = parameters[0]
if pos.ndim == 1:
pos = pos.reshape(1, -1)
r = np.sqrt(np.sum(pos**2, 1))
return 2*np.exp(-beta*r).reshape(-1, 1)
def nuclear_potential(self, pos):
r = np.sqrt(np.sum(pos**2, 1))
rm1 = - 1. / r
return rm1.reshape(-1, 1)
def electronic_potential(self, pos):
return 0
if __name__ == "__main__":
wf = Hydrogen(nelec=1, ndim=3)
sampler = Metropolis(nwalkers=1000, nstep=1000, step_size=3,
nelec=1, ndim=3, domain={'min': -5, 'max': 5})
sampler = Hamiltonian(nwalkers=1000, nstep=1000,
step_size=3, nelec=1, ndim=3)
optimizer = Minimize(method='bfgs', maxiter=25, tol=1E-4)
# VMS solver
vmc = VMC(wf=wf, sampler=sampler, optimizer=optimizer)
# single point
opt_param = [1.]
pos, e, s = vmc.single_point(opt_param)
print('Energy : ', e)
print('Variance : ', s)
vmc.plot_density(pos)
# optimization
init_param = [0.5]
vmc.optimize(init_param)
vmc.plot_history()
| 26.375
| 71
| 0.603081
| 227
| 1,688
| 4.361233
| 0.39207
| 0.055556
| 0.027273
| 0.033333
| 0.117172
| 0.117172
| 0.117172
| 0.117172
| 0.082828
| 0.082828
| 0
| 0.040883
| 0.275474
| 1,688
| 63
| 72
| 26.793651
| 0.768602
| 0.109597
| 0
| 0.054054
| 0
| 0
| 0.027529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0.162162
| 0.027027
| 0.378378
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcac914db6f36c54dadd0991d6bb9fbf2492dbe9
| 585
|
py
|
Python
|
braintree/account_updater_daily_report.py
|
futureironman/braintree_python
|
26bb8a857bc29322a8bca2e8e0fe6d99cfe6a1ac
|
[
"MIT"
] | 182
|
2015-01-09T05:26:46.000Z
|
2022-03-16T14:10:06.000Z
|
braintree/account_updater_daily_report.py
|
futureironman/braintree_python
|
26bb8a857bc29322a8bca2e8e0fe6d99cfe6a1ac
|
[
"MIT"
] | 95
|
2015-02-24T23:29:56.000Z
|
2022-03-13T03:27:58.000Z
|
braintree/account_updater_daily_report.py
|
futureironman/braintree_python
|
26bb8a857bc29322a8bca2e8e0fe6d99cfe6a1ac
|
[
"MIT"
] | 93
|
2015-02-19T17:59:06.000Z
|
2022-03-19T17:01:25.000Z
|
from braintree.configuration import Configuration
from braintree.resource import Resource
class AccountUpdaterDailyReport(Resource):
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
if "report_url" in attributes:
self.report_url = attributes.pop("report_url")
if "report_date" in attributes:
self.report_date = attributes.pop("report_date")
def __repr__(self):
detail_list = ["report_url", "report_date"]
return super(AccountUpdaterDailyReport, self).__repr__(detail_list)
| 36.5625
| 75
| 0.71453
| 63
| 585
| 6.222222
| 0.365079
| 0.091837
| 0.076531
| 0.127551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196581
| 585
| 15
| 76
| 39
| 0.834043
| 0
| 0
| 0
| 0
| 0
| 0.107692
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcaf2fd4c9457e78084e56f1b1fab3aa1985e417
| 394
|
py
|
Python
|
Curso em Vídeo/Mundo 2 Estruturas de Controle/Desafios/desafio053.py
|
henriqueumeda/-Estudo-python
|
28e93a377afa4732037a29eb74d4bc7c9e24b62f
|
[
"MIT"
] | null | null | null |
Curso em Vídeo/Mundo 2 Estruturas de Controle/Desafios/desafio053.py
|
henriqueumeda/-Estudo-python
|
28e93a377afa4732037a29eb74d4bc7c9e24b62f
|
[
"MIT"
] | null | null | null |
Curso em Vídeo/Mundo 2 Estruturas de Controle/Desafios/desafio053.py
|
henriqueumeda/-Estudo-python
|
28e93a377afa4732037a29eb74d4bc7c9e24b62f
|
[
"MIT"
] | null | null | null |
frase = input('Digite uma frase: ').upper().strip().replace(' ', '')
tamanho = int(len(frase))
inverso = ''
#Opção mais simples:
# inverso = frase[::-1]
for contador in range(tamanho-1, -1, -1):
inverso += frase[contador]
print('O inverso de {} é {}'.format(frase, inverso))
if frase == inverso:
print('Temos um palíndromo!')
else:
print('A frase digitada não é um palíndromo!')
| 24.625
| 68
| 0.639594
| 54
| 394
| 4.666667
| 0.592593
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012232
| 0.170051
| 394
| 16
| 69
| 24.625
| 0.75841
| 0.104061
| 0
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.3
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcaf71bd0c6cd2298c1b67ea7ef95ddacb0851be
| 16,589
|
py
|
Python
|
mctimer.py
|
Sharpieman20/MCtimer
|
5d4609f3697778de090816b8a768b82bbe217294
|
[
"Beerware"
] | null | null | null |
mctimer.py
|
Sharpieman20/MCtimer
|
5d4609f3697778de090816b8a768b82bbe217294
|
[
"Beerware"
] | null | null | null |
mctimer.py
|
Sharpieman20/MCtimer
|
5d4609f3697778de090816b8a768b82bbe217294
|
[
"Beerware"
] | null | null | null |
import atexit
import os
import sys
import platform
import json
import glob
import datetime
import time
import threading
import tkinter as tk
from pynput import mouse
from pathlib import Path
from playsound import playsound
from enum import Enum
import copy
#"THE BEER-WARE LICENSE" (Revision 42):
#bleach86 wrote this file. As long as you retain this notice you can do whatever you want with this stuff.
#If we meet some day, and you think this stuff is worth it, you can buy me a beer in return
input_fil = Path("/Users/sharpieman20/MCtimer/MCtimer") / "input.txt"
# continuously read from input file every 10ms
# when you get a "reset timer" message, reset the timer
#
# class Category:
# def __init__():
# self.actions = []
# self.attempts = []
# # convert actions to attempts
# def read():
# def write():
# class Actions(Enum):
# CREATE_WORLD = 0
# START = 1
# class Attempt:
stage = 0
ind = 0
time_count = 0
rsg = [
("World Created", True),
([
"Savannah",
"Desert",
"Plains",
"Other"
], False),
([
"0-15",
"15-30",
"30-45",
"45-60",
"60-75",
"75+"
], False),
([
"Iron",
"Logs",
"Feathers",
"Wool",
"Gravel"
], True),
("Enter Nether", True),
("Find Fortress", True),
("Find Spawner", True),
("Exit Spawner", True),
("Exit Nether", True),
("Tower Build Start", True),
("Tower Build Finished", True),
("Tower Leave", True),
("Enter Stronghold", True),
("Enter End", True),
("Finish", True)
]
cur_stages = {}
json_file = 'mct_config.json'
with open(json_file) as json_file:
data2 = json.load(json_file)
if data2['borderless'] == 'true':
data2['borderless']
else:
data2['borderless'] = False
running_path = Path.cwd()
NUM_CHARS = 11
system_type = platform.system()
if system_type == 'Linux':
directory = os.path.expanduser(data2['linux_saves'])
elif system_type == 'Darwin':
directory = os.path.expanduser(data2['mac_saves'])
elif system_type == 'Windows':
directory = os.path.expanduser(data2['windows_saves'])
amount2 = 0
last_amount = 0
window = tk.Tk()
# bg = BindGlobal(widget=window)
window.text = tk.StringVar()
window.text2 = tk.StringVar()
window.text3 = tk.StringVar()
window.text4 = tk.StringVar()
window.geometry("{}x{}".format(data2["width"], data2["height"]))
window.configure(bg='black')
rt = time.time()
old_version = False
did_change = False
count = 0
ig = 0
base = 0
program_time = 0
metronome_armed = False
metronome_running = False
metronome_active = False
metronome_beats = int(data2['metronome_beats'])
listener = None
metronome_time = 0
base_update = int(data2['base_update'])
rta_update = int(data2['rta_update']) * base_update
metronome_bpm = int(data2['metronome_bpm'])
metronome_interval = 0
if data2['auto_start'] == 'true':
click1 = 1
click2 = 1
else:
click1 = 0
click2 = 0
cur_fil = None
world_base_time = 0
def get_time():
global last_amount
global old_version
global amount2
global ig
global did_change
# print("-------------------------")
if data2['1.7+'] == 'false':
try:
global cur_fil
global world_base_time
mc_dir = Path(directory).parent
stats_dir = mc_dir / "stats"
os.chdir(stats_dir)
json_file = glob.glob('*.dat')
stats_file = json_file[0]
amount = 0
with open(stats_file) as timer_file:
# print(timer_file)
data = json.load(timer_file)
for item in data["stats-change"]:
if "1100" in item:
amount = item["1100"]
# print(amount)
latest = max([os.path.join(directory,d) for d in os.listdir(directory)], key=os.path.getmtime)
# print(latest)
if latest != cur_fil:
cur_fil = latest
world_base_time = amount
# print("world base time now {}".format(world_base_time))
# print(amount)
amount2 = float(amount - world_base_time) / 20
# print(amount2)
run_time = str(datetime.timedelta(seconds=amount2, milliseconds=0.5))
# print(run_time)
if last_amount == amount:
ig = 0
return run_time[:-3]
else:
did_change = True
# print(latest + "\nTime: " + run_time)
last_amount = amount
ig = 0
return run_time[:-3]
except:
ig = 1
return '0:00:00.000'
else:
try:
latest = max([os.path.join(directory,d) for d in os.listdir(directory)], key=os.path.getmtime)
if system_type == "Linux" or system_type == "Darwin":
os.chdir(latest + '/stats/')
else:
os.chdir(latest + '\\stats\\')
json_file = glob.glob('*.json')
timer = json_file[0]
with open(timer) as json_file:
data = json.load(json_file)
try:
amount = data['stats']['minecraft:custom']['minecraft:play_one_minute']
except:
amount = data['stat.playOneMinute']
old_version = True
json_file.close()
amount2 = float(amount) / 20
run_time = str(datetime.timedelta(seconds=amount2, milliseconds=0.5))
if last_amount == amount:
ig = 0
return run_time[:-3]
else:
did_change = True
print(latest + "\nTime: " + run_time)
last_amount = amount
ig = 0
return run_time[:-3]
except:
ig = 1
return '0:00:00.000'
def window2():
font_name = data2['font_name']
rta_font_size = data2['rta_font_size']
igt_font_size = data2['igt_font_size']
font_modifiers = data2['font_modifiers']
rta_font = (font_name, rta_font_size, font_modifiers)
igt_font = (font_name, igt_font_size, font_modifiers)
greeting = tk.Label(fg=data2['rta_color'], bg=data2['bg_color'], font=rta_font, textvariable=window.text)
greeting.pack()
if data2['show_igt'] == 'true':
greeting2 = tk.Label(fg=data2['igt_color'], bg=data2['bg_color'], font=igt_font, textvariable=window.text2)
greeting2.pack()
if data2['use_counter'] == 'true':
greeting3 = tk.Label(fg=data2['counter_color'], bg=data2['bg_color'], font=rta_font, textvariable=window.text3)
greeting3.pack()
# bg.gbind(data2['increment'], on_increment_counter)
# greeting.after(0, update_count)
if data2['use_splits'] == 'true':
split_font_size = data2['split_font_size']
split_font = (font_name, split_font_size, font_modifiers)
greeting4 = tk.Label(fg=data2['split_color'], bg=data2['bg_color'], font=split_font, textvariable=window.text4)
greeting4.pack()
# bg.gbind(data2['cycle'], cycle)
# bg.gbind(data2['split'], split)
# bg.gbind(data2['skip'], skip)
reset_split()
# greeting.after(0, update_count)
# bg.gbind(data2['pause'], on_press)
# bg.gbind(data2['reset_start'], on_press2)
# if data2['enable_metronome'] == 'true':
# bg.gbind(data2['arm_metronome'], arm_metronome)
# bg.gbind(data2['start_metronome'], start_metronome)
# bg.gbind(data2['exit'], clicked3)
# bg.bind(data2['start_metronome'], start_metronome)
''' this works for the window detecting right click '''
# window.bind(data2['start_metronome'], start_metronome)
#window.bind("<Button-1>", clicked)
#window.bind("<Button-3>", clicked2)
greeting.after(0, tick_time)
greeting.after(0, update_time2)
window.title("MCtimer")
window.attributes('-topmost', True)
window.overrideredirect(data2['borderless'])
window.geometry(data2['window_pos'])
window.mainloop()
def update_time():
global rt
global program_time
# do_metronome_action()
if click1 == 1:
window.text.set(real_time())
elif click1 == 0:
# rt = time.time()
diff = amount2 - base
rtc = str(datetime.timedelta(seconds=diff))
diff_txt = rtc[:-3]
# print(diff_txt)
window.text.set(diff_txt)
# print(base)
if click2 == 0:
rt = time.time()
window.text.set("0:00:00.000")
# window.after(int(data2['rta_update'])/10, update_time)
def tick_time():
global time_count
global metronome_armed
time_count += 1
update_time()
if metronome_armed or time_count % 20 == 0:
check_input()
window.after(rta_update, tick_time)
def check_input():
txt = input_fil.read_text()
input_fil.write_text("")
global metronome_armed
# print(txt)
if "start_metronome" in txt:
print(data2['enable_metronome'])
if data2['enable_metronome'] == 'true':
start_metronome(None)
if "arm_metronome" in txt:
metronome_armed = True
if "pause_timer" in txt:
left_click()
if "start_timer" in txt:
right_click()
def update_time2():
window.text2.set(get_time())
window.after(1000, update_time2)
def update_count():
count_str = str(count)
text_str = ""
for i in range(0, int(NUM_CHARS/2)):
text_str += " "
text_str += count_str
for i in range(0, int(NUM_CHARS/2)):
text_str += " "
window.text3.set(text_str)
window.after(rta_update, update_count)
# def update_split()
def on_press(event):
left_click()
def on_press2(event):
right_click()
def update_split():
global stage
text_str = cur_stages[stage][0]
if type(text_str) == type([]):
text_str = text_str[ind]
window.text4.set(text_str)
def reset_split():
global ind, stage, cur_stages
ind = 0
stage = 0
cur_stages = copy.deepcopy(rsg)
update_split()
def cycle(event):
global ind, stage
ind += 1
item = cur_stages[stage]
if type(item[0]) == type([]):
if ind == len(item[0]):
ind = 0
else:
ind = 0
update_split()
def split(event):
global stage, ind
item = cur_stages[stage]
if item[1]:
if type(item[0]) == type([]):
item[0].remove(item[0][ind])
if len(item[0]) == 0:
stage += 1
ind = 0
update_split()
return
stage += 1
ind = 0
update_split()
def skip(event):
global stage
stage += 1
update_split()
def on_increment_counter(event):
increment_counter()
def clicked3(event):
sys.exit(1)
def clicked2(event):
right_click()
def clicked(event):
left_click()
def write_to_log(text):
pass
# log_dir = Path("/Users/sharpieman20/MCtimer/MCtimer/logs")
# log_fil = log_dir / data2["current_section"]
# log_fil.touch()
# log_fil = log_fil.open("a")
# log_fil.write(str(text)+"\n")
def left_click():
global click1
if click1 == 1:
click1 = 0
elif click1 == 0:
click1 = 0
# global base
# write_to_log(str(amount2-base))
# base = amount2
def right_click():
global click1
global click2
global count
global did_change
count = 0
did_change = True
if click2 == 1:
click1 = 0
click2 = 0
elif click2 == 0:
click2 = 1
click1 = 1
# print(float(amount2))
# print("hehe")
global base
write_to_log("reset {}".format(str(amount2-base)))
base = amount2
def increment_counter():
global count
count += 1
''' METRONOME CODE '''
''' Metronome mouse listener '''
def exit_handler():
global listener
mouse.Listener.stop(listener)
window.quit()
atexit.register(exit_handler)
def listen_for_right_click():
def on_click(x, y, button, pressed):
# print(button)
if pressed:
if pressed and button == mouse.Button.right:
start_metronome(None)
return False
# mouse.Listener.stop(listener)
# print("Right Click Detected (pressed)")
with mouse.Listener(on_click=on_click) as listener:
# listener.start()
listener.join()
''' Sound playing code '''
def play_file_named(str_name):
playsound((running_path / str_name).as_posix(), block = True)
def play_up_beep():
play_file_named("MetronomeHit.mp3")
def play_normal_beep():
play_file_named("MetronomeBase.mp3")
def play_metronome_preset():
time.sleep(0.06)
play_file_named("MetronomePreset.mp3")
''' Metronome functions '''
def arm_metronome(event):
global metronome_armed
global metronome_running
if metronome_armed or metronome_running:
return
metronome_armed = True
# x = threading.Thread(target=listen_for_right_click, daemon=True)
# x.start()
listen_for_right_click()
print("armed and ready")
def start_metronome(event):
run_metronome()
# print(metronome_running)
# arm_metronome = False
def run_metronome():
global metronome_time
global metronome_interval
global metronome_running
if data2['has_metronome_preset'] == 'true':
play_metronome_preset()
metronome_running = False
return
metronome_time = 0
base_time = round(time.time()*1000)
metronome_interval = int(100 * 60 / metronome_bpm)*10
time.sleep(float(data2['beat_offset'])*metronome_interval/1000.0)
# print(metronome_interval)555
while metronome_running:
start_time = round(time.time()*1000) - base_time
do_metronome_action()
end_time = round(time.time()*1000) - base_time
elapsed = end_time - start_time
time.sleep((metronome_interval - elapsed)/1000.0)
# print("{} {} {}".format(start_time, end_time, ))
metronome_time += metronome_interval
def do_metronome_action():
global metronome_running
global metronome_interval
if not metronome_running:
return
# print(metronome_interval)
# metronome_time = program_time - metronome_start_time
if metronome_time >= metronome_interval * metronome_beats:
metronome_running = False
return
# print(metronome_time)
# print(metronome_interval)
# print(time.time()*1000)
if metronome_time % metronome_interval == 0:
if (metronome_time % (metronome_interval*4)) == metronome_interval*3:
# print("up beep")
play_up_beep()
# pass
else:
# print("normal beep")
play_normal_beep()
# pass
# print(time.time()*1000)
# print()
def real_time():
global rt
global click1
global click2
global amount2
global old_version
global stage
global ig
global did_change
if data2['auto_adjust'] == 'true':
# print(did_change)
# print(base)
if did_change:
rt = float(time.time()) - float(amount2)
if data2['allow_offset'] == 'true':
rt += base
did_change = False
if data2['auto_start'] == 'true':
if ig == 1:
rt = time.time()
click1 = 1
click2 = 1
stage = 0
reset_split()
return '0:00:00.000'
elif click1 == 1:
if old_version == True and stage == 0:
ig = 0
rt = float(time.time()) - float(amount2)
rtc = str(datetime.timedelta(seconds=rt))
stage = 1
print("stop")
return rtc[:-3]
else:
ig = 0
rt2 = time.time()
real_time = rt2 - rt
rtc = str(datetime.timedelta(seconds=real_time))
# rt = float(amount2) - float(base)
# rtc = str(datetime.timedelta(seconds=rt))
return rtc[:-3]
else:
if click1 == 1:
rt2 = time.time()
real_time = rt2 - rt
rtc = str(datetime.timedelta(seconds=real_time))
return rtc[:-3]
def main():
window2()
main()
| 24.833832
| 119
| 0.578335
| 2,005
| 16,589
| 4.6
| 0.171571
| 0.012144
| 0.01171
| 0.020492
| 0.218909
| 0.133579
| 0.093028
| 0.08674
| 0.08674
| 0.08674
| 0
| 0.031005
| 0.300078
| 16,589
| 668
| 120
| 24.833832
| 0.763328
| 0.151727
| 0
| 0.366743
| 0
| 0
| 0.080072
| 0.00434
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079727
| false
| 0.002278
| 0.034169
| 0
| 0.150342
| 0.009112
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcb1c97f3222308944fcb2351152a564408ff396
| 7,357
|
py
|
Python
|
Vehicle_Counting_colab.py
|
manolosolalinde/Vehicle-Counting
|
898e1993613ea5a6803078cc5026d2d690c12322
|
[
"MIT"
] | null | null | null |
Vehicle_Counting_colab.py
|
manolosolalinde/Vehicle-Counting
|
898e1993613ea5a6803078cc5026d2d690c12322
|
[
"MIT"
] | null | null | null |
Vehicle_Counting_colab.py
|
manolosolalinde/Vehicle-Counting
|
898e1993613ea5a6803078cc5026d2d690c12322
|
[
"MIT"
] | null | null | null |
import cv2
from trackers.tracker import create_blob, add_new_blobs, remove_duplicates
import numpy as np
from collections import OrderedDict
from detectors.detector import get_bounding_boxes
import uuid
import os
import contextlib
from datetime import datetime
import argparse
from utils.detection_roi import get_roi_frame, draw_roi
from counter import get_counting_line, is_passed_counting_line
# parse CLI arguments
parser = argparse.ArgumentParser()
parser.add_argument('video', help='relative/absolute path to video or camera input of traffic scene')
parser.add_argument('--iscam', action='store_true', help='specify if video capture is from a camera')
parser.add_argument('--droi', help='specify a detection region of interest (ROI) \
i.e a set of vertices that represent the area (polygon) \
where you want detections to be made (format: 1,2|3,4|5,6|7,8|9,10 \
default: 0,0|frame_width,0|frame_width,frame_height|0,frame_height \
[i.e the whole video frame])')
parser.add_argument('--showdroi', action='store_true', help='display/overlay the detection roi on the video')
parser.add_argument('--mctf', type=int, help='maximum consecutive tracking failures \
i.e number of tracking failures before the tracker concludes \
the tracked object has left the frame')
parser.add_argument('--di', type=int, help='detection interval i.e number of frames \
before detection is carried out again (in order to find new vehicles \
and update the trackers of old ones)')
parser.add_argument('--detector', help='select a model/algorithm to use for vehicle detection \
(options: yolo, haarc, bgsub, ssd | default: yolo)')
parser.add_argument('--tracker', help='select a model/algorithm to use for vehicle tracking \
(options: csrt, kcf, camshift | default: kcf)')
parser.add_argument('--record', action='store_true', help='record video and vehicle count logs')
parser.add_argument('--clposition', help='position of counting line (options: top, bottom, \
left, right | default: bottom)')
parser.add_argument('--hideimage', action='store_true', help='hide resulting image')
args = parser.parse_args()
# capture traffic scene video
video = int(args.video) if args.iscam else args.video
cap = cv2.VideoCapture(video)
_, frame = cap.read()
# configs
blobs = OrderedDict()
blob_id = 1
frame_counter = 0
DETECTION_INTERVAL = 10 if args.di == None else args.di
MAX_CONSECUTIVE_TRACKING_FAILURES = 3 if args.mctf == None else args.mctf
detector = 'yolo' if args.detector == None else args.detector
tracker = 'kcf' if args.tracker == None else args.tracker
f_height, f_width, _ = frame.shape
# init video object and log file to record counting
if args.record:
output_video = cv2.VideoWriter('./videos/output.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (f_width, f_height))
log_file_name = 'log.txt'
with contextlib.suppress(FileNotFoundError):
os.remove(log_file_name)
log_file = open(log_file_name, 'a')
log_file.write('vehicle_id, count, datetime\n')
log_file.flush()
# set counting line
clposition = 'bottom' if args.clposition == None else args.clposition
counting_line = get_counting_line(clposition, f_width, f_height)
vehicle_count = 0
# create detection ROI
droi = [(0, 0), (f_width, 0), (f_width, f_height), (0, f_height)]
if args.droi:
droi = []
points = args.droi.replace(' ', '').split('|')
for point_str in points:
point = tuple(map(int, point_str.split(',')))
droi.append(point)
# initialize trackers and create new blobs
droi_frame = get_roi_frame(frame, droi)
initial_bboxes = get_bounding_boxes(droi_frame, detector)
for box in initial_bboxes:
_blob = create_blob(box, frame, tracker)
blobs[blob_id] = _blob
blob_id += 1
while True:
k = cv2.waitKey(1)
if args.iscam or cap.get(cv2.CAP_PROP_POS_FRAMES) + 1 < cap.get(cv2.CAP_PROP_FRAME_COUNT):
_, frame = cap.read()
nframes = cap.get(cv2.CAP_PROP_POS_FRAMES)
frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
if nframes % 10 == 0 or nframes == 1:
print("Processing {} of {} frames".format(nframes,frame_count))
for _id, blob in list(blobs.items()):
# update trackers
success, box = blob.tracker.update(frame)
if success:
blob.num_consecutive_tracking_failures = 0
blob.update(box)
else:
blob.num_consecutive_tracking_failures += 1
# delete untracked blobs
if blob.num_consecutive_tracking_failures >= MAX_CONSECUTIVE_TRACKING_FAILURES:
del blobs[_id]
# count vehicles
if is_passed_counting_line(blob.centroid, counting_line, clposition) and not blob.counted:
blob.counted = True
vehicle_count += 1
# log count data to a file (vehicle_id, count, datetime)
if args.record:
_row = '{0}, {1}, {2}\n'.format('v_' + str(_id), vehicle_count, datetime.now())
log_file.write(_row)
log_file.flush()
if frame_counter >= DETECTION_INTERVAL:
# rerun detection
droi_frame = get_roi_frame(frame, droi)
boxes = get_bounding_boxes(droi_frame, detector)
blobs, current_blob_id = add_new_blobs(boxes, blobs, frame, tracker, blob_id, counting_line, clposition)
blob_id = current_blob_id
blobs = remove_duplicates(blobs)
frame_counter = 0
# draw and label blob bounding boxes
for _id, blob in blobs.items():
(x, y, w, h) = [int(v) for v in blob.bounding_box]
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame, 'v_' + str(_id), (x, y - 2), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
# draw counting line
cv2.line(frame, counting_line[0], counting_line[1], (0, 255, 0), 3)
# display vehicle count
cv2.putText(frame, 'Count: ' + str(vehicle_count), (20, 60), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 2, cv2.LINE_AA)
# show detection roi
if args.showdroi:
frame = draw_roi(frame, droi)
# save frame in video output
if args.record:
output_video.write(frame)
# visualize vehicle counting
if not args.hideimage:
resized_frame = cv2.resize(frame, (858, 480))
cv2.imshow('tracking', resized_frame)
frame_counter += 1
# save frame if 's' key is pressed
if k & 0xFF == ord('s'):
cv2.imwrite(os.path.join('screenshots', 'ss_' + uuid.uuid4().hex + '.png'), frame)
print('Screenshot taken.')
else:
print('End of video.')
# end video loop if on the last frame
break
# end video loop if 'q' key is pressed
if k & 0xFF == ord('q'):
print('Video exited.')
break
# end capture, close window, close log file and video objects if any
cap.release()
if not args.hideimage:
cv2.destroyAllWindows()
if args.record:
log_file.close()
output_video.release()
| 40.646409
| 125
| 0.644964
| 1,007
| 7,357
| 4.54717
| 0.270109
| 0.017034
| 0.040839
| 0.016598
| 0.116401
| 0.076436
| 0.062022
| 0.017471
| 0.017471
| 0
| 0
| 0.018939
| 0.246432
| 7,357
| 181
| 126
| 40.646409
| 0.806999
| 0.085905
| 0
| 0.137405
| 0
| 0.007634
| 0.079517
| 0
| 0
| 0
| 0.001194
| 0
| 0
| 1
| 0
| false
| 0.015267
| 0.091603
| 0
| 0.091603
| 0.030534
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcb26248e648d4d9b19d0a9ff813f2b53c5baabf
| 2,615
|
py
|
Python
|
tests/rbac/api/role/propose_member_test.py
|
kthblmfld/sawtooth-next-directory
|
57291f1a7e6ce1dfc11a9c5e2930e8c5ebd31707
|
[
"Apache-2.0"
] | null | null | null |
tests/rbac/api/role/propose_member_test.py
|
kthblmfld/sawtooth-next-directory
|
57291f1a7e6ce1dfc11a9c5e2930e8c5ebd31707
|
[
"Apache-2.0"
] | null | null | null |
tests/rbac/api/role/propose_member_test.py
|
kthblmfld/sawtooth-next-directory
|
57291f1a7e6ce1dfc11a9c5e2930e8c5ebd31707
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
""" Propose Role Add Member Test """
# pylint: disable=invalid-name
import time
import requests
import pytest
from rbac.common.logs import get_logger
from tests.rbac import helper
from tests.rbac.api.assertions import assert_api_error
from tests.rbac.api.assertions import assert_api_success
from tests.rbac.api.assertions import assert_api_post_requires_auth
LOGGER = get_logger(__name__)
@pytest.mark.api
@pytest.mark.api_role
def test_api_propose_role_member():
""" Test a user proposing to add themselves to a role
"""
owner = helper.api.user.current
role = helper.api.role.create.new(user=owner)
user = helper.api.user.current2
url = helper.api.role.member.propose.url(role_id=role["id"])
data = {"id": user["user_id"]}
assert assert_api_post_requires_auth(url=url, json=data)
response = requests.post(
url=url, headers={"Authorization": user["token"]}, json=data
)
result = assert_api_success(response)
assert result["proposal_id"]
time.sleep(0.5) # temporary until API refactored to return the proposal
proposal = helper.api.proposal.get(result["proposal_id"], owner)
assert proposal["id"] == result["proposal_id"]
assert proposal["status"] == "OPEN"
assert proposal["type"] == "ADD_ROLE_MEMBER"
assert proposal["object"] == role["id"]
assert proposal["target"] == user["user_id"]
assert proposal["opener"] == user["user_id"]
@pytest.mark.api
@pytest.mark.api_role
def test_api_propose_role_member_required_fields():
""" Test proposing adding a member to a role with missing fields
"""
role, _ = helper.api.role.current
user = helper.api.user.create.current
url = helper.api.role.member.propose.url(role_id=role["id"])
data = {}
response = requests.post(
url=url, headers={"Authorization": user["token"]}, json=data
)
assert_api_error(response, "Bad Request: id field is required", 400)
| 36.830986
| 79
| 0.702868
| 364
| 2,615
| 4.925824
| 0.362637
| 0.040156
| 0.029002
| 0.026771
| 0.281093
| 0.258226
| 0.258226
| 0.258226
| 0.189626
| 0.189626
| 0
| 0.006364
| 0.1587
| 2,615
| 70
| 80
| 37.357143
| 0.808636
| 0.338815
| 0
| 0.238095
| 0
| 0
| 0.106195
| 0
| 0
| 0
| 0
| 0
| 0.309524
| 1
| 0.047619
| false
| 0
| 0.190476
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcb2a48f3534ac05974fe4c223430ebf965fdf0b
| 881
|
py
|
Python
|
f2v.py
|
ClimberY/video_super_resolution_toolbox
|
e03fd34f60bf1104bd78ac0738a2648cee2eae46
|
[
"MIT"
] | null | null | null |
f2v.py
|
ClimberY/video_super_resolution_toolbox
|
e03fd34f60bf1104bd78ac0738a2648cee2eae46
|
[
"MIT"
] | null | null | null |
f2v.py
|
ClimberY/video_super_resolution_toolbox
|
e03fd34f60bf1104bd78ac0738a2648cee2eae46
|
[
"MIT"
] | null | null | null |
import cv2
import os
import numpy as np
from PIL import Image
def frame2video(im_dir, video_dir, fps):
im_list = os.listdir(im_dir)
im_list.sort(key=lambda x: int(x.replace("_RBPNF7", "").split('.')[0]))
img = Image.open(os.path.join(im_dir, im_list[0]))
img_size = img.size # 获得图片分辨率,im_dir文件夹下的图片分辨率需要一致
fourcc = cv2.VideoWriter_fourcc(*'XVID')
videoWriter = cv2.VideoWriter(video_dir, fourcc, fps, img_size)
for i in im_list:
im_name = os.path.join(im_dir + i)
frame = cv2.imdecode(np.fromfile(im_name, dtype=np.uint8), -1)
videoWriter.write(frame)
videoWriter.release()
if __name__ == '__main__':
im_dir = '/media/hy/Seagate Expansion Drive/Results/merge_dir/' # 帧存放路径
video_dir = '/media/hy/Seagate Expansion Drive/Results/sandy.mp4' # 合成视频存放的路径
fps = 15 # 帧率
frame2video(im_dir, video_dir, fps)
| 33.884615
| 82
| 0.682179
| 132
| 881
| 4.325758
| 0.477273
| 0.052539
| 0.056042
| 0.073555
| 0.28021
| 0.227671
| 0.1331
| 0
| 0
| 0
| 0
| 0.019499
| 0.185017
| 881
| 25
| 83
| 35.24
| 0.775766
| 0.053348
| 0
| 0
| 0
| 0
| 0.148372
| 0.056695
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.190476
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcb332026597a8538e3390f5b54de4be3aa00f42
| 11,103
|
py
|
Python
|
mflops/model_info.py
|
shuncyu/mflops
|
81fddf9407bcbdca02b9c57f6b03640b3fb94101
|
[
"MIT"
] | 1
|
2020-12-17T03:09:20.000Z
|
2020-12-17T03:09:20.000Z
|
mflops/model_info.py
|
shuncyu/mflops
|
81fddf9407bcbdca02b9c57f6b03640b3fb94101
|
[
"MIT"
] | null | null | null |
mflops/model_info.py
|
shuncyu/mflops
|
81fddf9407bcbdca02b9c57f6b03640b3fb94101
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 17:38:48 2020
@author: luke
"""
import sys
from functools import partial
import torch
import torch.nn as nn
import prettytable as pt
from .basic_hook import MODULES_MAPPING
def get_model_compute_info(model, input_res,
print_per_layer_stat=False,
input_constructor=None, ost=sys.stdout,
verbose=False, ignore_modules=[],
custom_modules_hooks={}):
assert type(input_res) is tuple
assert len(input_res) >= 1
assert isinstance(model, nn.Module)
global CUSTOM_MODULES_MAPPING
CUSTOM_MODULES_MAPPING = custom_modules_hooks
compute_model = add_computing_methods(model)
compute_model.eval()
compute_model.start_compute(ost=ost, verbose=verbose, ignore_list=ignore_modules)
if input_constructor:
input = input_constructor(input_res)
_ = compute_model(**input)
else:
try:
batch = torch.ones(()).new_empty((1, *input_res),
dtype=next(compute_model.parameters()).dtype,
device=next(compute_model.parameters()).device)
except StopIteration:
batch = torch.ones(()).new_empty((1, *input_res))
_ = compute_model(batch)
flops_count, mac_count, params_count = compute_model.compute_average_compute_cost()
if print_per_layer_stat:
print_model_with_compute(compute_model, flops_count, mac_count, params_count, ost=ost)
compute_model.stop_compute()
CUSTOM_MODULES_MAPPING = {}
tb = pt.PrettyTable()
tb.field_names = ['Metrics', 'Value']
tb.add_row(['%s' %'Floating Point Operations (FLOPs)', '%8s' %to_string(flops_count)])
tb.add_row(['%s' %'Memory Access Cost (MAC)', '%8s' %to_string(mac_count)])
tb.add_row(['%s' %'Number of Parameters', '%8s' %to_string(params_count)])
print(tb)
return flops_count, mac_count, params_count
def to_string(params_num, units=None, precision=3):
if units is None:
if params_num // 10**9 > 0:
return str(round(params_num / 10**9, 3)) + ' G'
elif params_num // 10**6 > 0:
return str(round(params_num / 10**6, 3)) + ' M'
elif params_num // 10**3 > 0:
return str(round(params_num / 10**3, 3)) + ' K'
else:
return str(params_num)
else:
if units == 'G':
return str(round(params_num / 10**9, precision)) + ' ' + units
if units == 'M':
return str(round(params_num / 10**6, precision)) + ' ' + units
elif units == 'K':
return str(round(params_num / 10**3, precision)) + ' ' + units
else:
return str(params_num)
def print_model_with_compute(model, total_flops, total_mac, total_params, units='M',
precision=3, ost=sys.stdout):
def accumulate_params(self):
if is_supported_instance(self):
return self.__params__
else:
sum = 0
for m in self.children():
sum += m.accumulate_params()
return sum
def accumulate_flops(self):
if is_supported_instance(self):
return self.__flops__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def accumulate_mac(self):
if is_supported_instance(self):
return self.__mac__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_mac()
return sum
def compute_repr(self):
accumulated_params_num = self.accumulate_params()
accumulated_flops_cost = self.accumulate_flops()
accumulated_mac_cost = self.accumulate_mac()
return ', '.join([to_string(accumulated_params_num,
units=units, precision=precision),
'{:.3%} Params'.format(accumulated_params_num / total_params),
to_string(accumulated_flops_cost,
units=units, precision=precision),
'{:.3%} FLOPs'.format(accumulated_flops_cost / total_flops),
to_string(accumulated_mac_cost,
units=units, precision=precision),
'{:.3%} MAC'.format(accumulated_mac_cost / total_mac),
'{:.3} MAC/FLOPs'.format(accumulated_mac_cost / (accumulated_flops_cost + 1e-5) \
* total_flops / (total_mac + 1e-5)),
self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
m.accumulate_mac = accumulate_mac.__get__(m)
m.accumulate_params = accumulate_params.__get__(m)
compute_extra_repr = compute_repr.__get__(m)
if m.extra_repr != compute_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = compute_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
if hasattr(m, 'accumulate_mac'):
del m.accumulate_mac
model.apply(add_extra_repr)
print(repr(model), file=ost)
model.apply(del_extra_repr)
def get_model_parameters_number(model):
params_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params_num
def add_computing_methods(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
net_main_module.start_compute = start_compute.__get__(net_main_module)
net_main_module.stop_compute = stop_compute.__get__(net_main_module)
net_main_module.reset_compute = reset_compute.__get__(net_main_module)
net_main_module.compute_average_compute_cost = compute_average_compute_cost.__get__(
net_main_module)
net_main_module.reset_compute()
return net_main_module
def compute_average_compute_cost(self):
"""
A method that will be available after add_computing_methods() is called
on a desired net object.
Returns current mean flops/mac consumption per image.
"""
batches_count = self.__batch_counter__
flops_sum = 0
mac_sum = 0
params_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
mac_sum += module.__mac__
params_sum = get_model_parameters_number(self)
return flops_sum / batches_count, mac_sum / batches_count, params_sum
def start_compute(self, **kwargs):
"""
A method that will be available after add_computing_methods() is called
on a desired net object.
Activates the computation of mean flops/mac consumption per image.
Call it before you run the network.
"""
add_batch_counter_hook_function(self)
seen_types = set()
def add_compute_hook_function(module, ost, verbose, ignore_list):
if type(module) in ignore_list:
seen_types.add(type(module))
if is_supported_instance(module):
module.__params__ = 0
elif is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
if type(module) in CUSTOM_MODULES_MAPPING:
handle = module.register_forward_hook(
CUSTOM_MODULES_MAPPING[type(module)])
else:
handle = module.register_forward_hook(MODULES_MAPPING[type(module)])
module.__flops_handle__ = handle
module.__mac_handle__ = handle
seen_types.add(type(module))
else:
if verbose and not type(module) in (nn.Sequential, nn.ModuleList) and \
not type(module) in seen_types:
print('Warning: module ' + type(module).__name__ +
' is treated as a zero-op.', file=ost)
seen_types.add(type(module))
self.apply(partial(add_compute_hook_function, **kwargs))
def stop_compute(self):
"""
A method that will be available after add_computing_methods() is called
on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_compute_hook_function)
def reset_compute(self):
"""
A method that will be available after add_computing_methods() is called
on a desired net object.
Resets statistics computed so far.
"""
add_batch_counter_variables_or_reset(self)
self.apply(add_compute_variable_or_reset)
def batch_counter_hook(module, input, output):
batch_size = 1
if len(input) > 0:
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = len(input)
else:
pass
print('Warning! No positional inputs found for a module,'
' assuming batch size is 1.')
module.__batch_counter__ += batch_size
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
def add_compute_variable_or_reset(module):
if is_supported_instance(module):
if hasattr(module, '__flops__') or hasattr(module, '__mac__') or \
hasattr(module, '__params__'):
print('Warning: variables __flops__ or __mac__ or __params__ are already '
'defined for the module' + type(module).__name__ +
' ptflops can affect your code!')
module.__flops__ = 0
module.__mac__ = 0
module.__params__ = get_model_parameters_number(module)
def is_supported_instance(module):
if type(module) in MODULES_MAPPING or type(module) in CUSTOM_MODULES_MAPPING:
return True
return False
def remove_compute_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
if hasattr(module, '__mac_handle__'):
module.__mac_handle__.remove()
del module.__mac_handle__
| 35.359873
| 107
| 0.631721
| 1,358
| 11,103
| 4.764359
| 0.172312
| 0.033385
| 0.022102
| 0.02272
| 0.359351
| 0.278825
| 0.213138
| 0.181607
| 0.132767
| 0.111283
| 0
| 0.009632
| 0.280014
| 11,103
| 313
| 108
| 35.472843
| 0.799725
| 0.086103
| 0
| 0.181818
| 0
| 0
| 0.057023
| 0.004785
| 0
| 0
| 0
| 0
| 0.018182
| 1
| 0.104545
| false
| 0.004545
| 0.027273
| 0
| 0.236364
| 0.040909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcb33dd64b91d776f626dc908c114e472e82874d
| 2,301
|
py
|
Python
|
dosagelib/plugins/derideal.py
|
Church-/dosage
|
7ef18a2a2e9f77aa1e64a44906731506a00fac45
|
[
"MIT"
] | 1
|
2020-06-18T17:51:13.000Z
|
2020-06-18T17:51:13.000Z
|
dosagelib/plugins/derideal.py
|
Church-/dosage
|
7ef18a2a2e9f77aa1e64a44906731506a00fac45
|
[
"MIT"
] | null | null | null |
dosagelib/plugins/derideal.py
|
Church-/dosage
|
7ef18a2a2e9f77aa1e64a44906731506a00fac45
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class Derideal(_ParserScraper):
baseUrl = 'https://www.derideal.com/'
imageSearch = '//img[contains(@class, "comic-page")]'
prevSearch = '//a[i[contains(@class, "fa-angle-left")]]'
latestSearch = '//a[i[contains(@class, "fa-angle-double-right")]]'
starter = indirectStarter
def __init__(self, name, sub, first, last=None):
if name == 'Derideal':
super(Derideal, self).__init__(name)
else:
super(Derideal, self).__init__('Derideal/' + name)
self.url = self.baseUrl + sub
self.stripUrl = self.url + '/%s/'
self.firstStripUrl = self.stripUrl % first
self.startUrl = self.firstStripUrl
if last:
self.endOfLife = True
def starter(self):
indexPage = self.getPage(self.url)
self.chapters = indexPage.xpath('//a[contains(text(), "Read this episode")]/@href')
self.currentChapter = len(self.chapters)
return indirectStarter(self)
def namer(self, imageUrl, pageUrl):
filename = pageUrl.rstrip('/').rsplit('/', 1)[-1]
filename = filename.replace('espanol-escape-25', 'escape-26')
filename = filename.replace('espanol-w-a-l-l-y', 'w-a-l-l-y')
filename = filename.replace('hogar-prision', 'home-prison')
filename = filename.replace('strip', 'pe').replace('purpurina-effect', 'pe')
filename = filename.replace('sector-de-seguridad', 'security-sector')
filename = 'ch' + str(self.currentChapter) + '-' + filename
if pageUrl in self.chapters:
self.currentChapter -= 1
return filename
@classmethod
def getmodules(cls):
return (
cls('Derideal', 'derideal', 'cover-prime'),
cls('Legacy', 'derideal-legacy', 'the-dream-cover', last='derideal-is-on-hiatus'),
cls('LRE', 'RLE', 'the-leyend-of-the-rose-cover'),
cls('ProjectPrime', 'project-prime', 'custus-part-i-cover'),
cls('PurpurinaEffect', 'purpurina-effect', 'purpurina-effect-cover'),
cls('TheVoid', 'the-void', 'the-void-cover')
)
| 40.368421
| 94
| 0.614081
| 257
| 2,301
| 5.44358
| 0.474708
| 0.057184
| 0.082202
| 0.025733
| 0.038599
| 0.031451
| 0
| 0
| 0
| 0
| 0
| 0.012878
| 0.223816
| 2,301
| 56
| 95
| 41.089286
| 0.770437
| 0.046936
| 0
| 0
| 0
| 0
| 0.276382
| 0.074463
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.044444
| 0.022222
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcb3b617387a63312fcb662d0698c65cf437acee
| 3,340
|
py
|
Python
|
LearnFunction/learnfunction01.py
|
subash-kc/2022-01-04-Python
|
5ce51e4265bcd860a4e62423edef6ec9cd1437b4
|
[
"MIT"
] | 1
|
2022-01-14T18:03:42.000Z
|
2022-01-14T18:03:42.000Z
|
LearnFunction/learnfunction01.py
|
subash-kc/2022-01-04-Python
|
5ce51e4265bcd860a4e62423edef6ec9cd1437b4
|
[
"MIT"
] | null | null | null |
LearnFunction/learnfunction01.py
|
subash-kc/2022-01-04-Python
|
5ce51e4265bcd860a4e62423edef6ec9cd1437b4
|
[
"MIT"
] | null | null | null |
"""
Function are subprograms which are used to compute a value or perform a task.
Type of Functions:-
Built in Functions:
print(), upper()
User define functions
Advantage of Functions
1. Write once and use it as many time as you need. This provides code reusability
2. Function facilitates ease of code maintenance
3. Divide Large task into many small task so it will help you to debug code
4. You can remove or add new feature to a function anytime.
"""
"""
We can define a function using def keyword followed by function name with parentheses. This is also called
as Creating a function, Writing a Function, Defining a FUnction.
Syntax:-
def function_name():
Local Variable
block of statement
return(variable or expression)
def function_name(param1, param2, param3, .....)
Local Variable
Block of statement
return (variable or expression)
Note - Nooed to mainitain a proper indentation
"""
# creating a list
def add():
list = [8, 2, 3, 0, 7]
total = 0;
for i in range(0, len(list)):
total = total + list[i]
print('Sum of all elements in given list: ', total)
if __name__ == '__main__':
add()
print()
# another method
def sum_list():
mylist = [8, 2, 3, 0, 7]
# Using inbuilt sum method
total = sum(mylist)
print("Sum of all elements in given list1: ", total)
if __name__ == '__main__':
sum_list()
print()
def multiplylist():
list_multiply = [8, 2, 3, -1, 7]
total = 1;
for x in list_multiply:
total = total * x
print(total)
if __name__ == '__main__':
multiplylist()
# Method 2: Unsing numpy.prid() ^ Install numpy package
import numpy
def product_total():
list_product = [8, 2, 3, -1, 7]
total = numpy.prod(list_product)
print("Another method using numpy method to find product in list: ", total)
product_total()
print()
def findingminmax(num1: int, num2: int, num3: int) -> int:
max = 0;
if (num1 > num2 and num1 > num2):
max = num1
elif (num2 > num1 and num2 > num3):
max = num2
else:
max = num3
print("The maximum number in given list is: ", max)
findingminmax(22, 26, 30)
print()
print("Another Method to find maximum")
def findingmaximum(num1: int, num2: int, num3: int) -> int:
find_max_list = (num1, num2, num3)
return max(find_max_list)
x = int(input("Enter your first Number: "))
y = int(input("Enter your second Number: "))
z = int(input("Enter your third Number: "))
print("Maximum number is ::>", findingmaximum(x, y, z))
"""Python program to print the even numbers from a given list"""
def find_even():
sample_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
for num in sample_list:
if num % 2 == 0:
print(num, end=" ")
find_even()
print()
"""
Pythhon program to find prime numbers in given list
Function should return true if the number is prime; else false
"""
def isPrime(num):
if (num < 2):
return True
for i in range (2, num//2+1):
if(num%i==0):
return False
return True
number =int(input("Enter the number you will like to check whether the number is prime or not: \n"))
if isPrime(number):
print(number, "is a Prime Number")
else:
print(number, "is not a Prime number")
"""
Another Method to find prime number
"""
| 18.870056
| 106
| 0.645808
| 503
| 3,340
| 4.204771
| 0.326044
| 0.021277
| 0.005674
| 0.021277
| 0.115366
| 0.110638
| 0.101182
| 0.052009
| 0.052009
| 0
| 0
| 0.029786
| 0.246108
| 3,340
| 176
| 107
| 18.977273
| 0.810167
| 0.170359
| 0
| 0.173913
| 0
| 0
| 0.210858
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115942
| false
| 0
| 0.014493
| 0
| 0.188406
| 0.217391
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcb3deb24bc63c8049391df8c67ec2a72c8f437a
| 945
|
py
|
Python
|
trackr/cli.py
|
rpedigoni/trackr
|
ab5cf0cc661d003c6bd2ffa5516babf2e931de78
|
[
"MIT"
] | 9
|
2017-04-23T23:54:56.000Z
|
2021-12-26T02:21:28.000Z
|
trackr/cli.py
|
rpedigoni/trackr
|
ab5cf0cc661d003c6bd2ffa5516babf2e931de78
|
[
"MIT"
] | null | null | null |
trackr/cli.py
|
rpedigoni/trackr
|
ab5cf0cc661d003c6bd2ffa5516babf2e931de78
|
[
"MIT"
] | 3
|
2017-04-23T23:55:13.000Z
|
2017-05-03T01:20:23.000Z
|
# coding: utf-8
import click
@click.command()
@click.option('--carrier', prompt='Carrier ID', help='Example: "ect" for Correios')
@click.option('--object-id', prompt='Object ID',
help='Example: PN871429404BR')
def main(carrier, object_id):
from trackr import Trackr
from trackr.exceptions import PackageNotFound
try:
p = Trackr.track(carrier, object_id)
except PackageNotFound as e:
click.echo(click.style(
u'Package with object ID {} ({}) not found'.format(
object_id, carrier),
fg='red')
)
if e.carrier_message:
click.echo(click.style(
u'Carrier message: {}'.format(e.carrier_message),
fg='red',)
)
return
click.echo(click.style(u'Package found!', fg='green'))
for t in p.tracking_info:
click.echo(t.__unicode__())
if __name__ == "__main__":
main()
| 25.540541
| 83
| 0.582011
| 111
| 945
| 4.792793
| 0.441441
| 0.090226
| 0.078947
| 0.107143
| 0.139098
| 0.101504
| 0
| 0
| 0
| 0
| 0
| 0.014749
| 0.28254
| 945
| 36
| 84
| 26.25
| 0.769912
| 0.013757
| 0
| 0.074074
| 0
| 0
| 0.193548
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.111111
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcb3f4ba8d64955ba6c3c16193d7d7869a8725dd
| 3,043
|
py
|
Python
|
pitop/common/notifications.py
|
pi-top/pi-top-Python-SDK
|
6c83cc5f612d77f86f8d391c7f2924a28f7b1232
|
[
"Apache-2.0"
] | 28
|
2020-11-24T08:02:58.000Z
|
2022-02-27T18:37:33.000Z
|
pitop/common/notifications.py
|
pi-top/pi-top-Python-SDK
|
6c83cc5f612d77f86f8d391c7f2924a28f7b1232
|
[
"Apache-2.0"
] | 263
|
2020-11-10T14:35:10.000Z
|
2022-03-31T12:35:13.000Z
|
pitop/common/notifications.py
|
pi-top/pi-top-Python-SDK
|
6c83cc5f612d77f86f8d391c7f2924a28f7b1232
|
[
"Apache-2.0"
] | 1
|
2022-01-31T22:48:35.000Z
|
2022-01-31T22:48:35.000Z
|
from enum import Enum, auto
from subprocess import CalledProcessError, run
from pitop.common.command_runner import run_command
from pitop.common.logger import PTLogger
class NotificationAction:
def __init__(self, call_to_action_text, command_str) -> None:
self.call_to_action_text = call_to_action_text
self.command_str = command_str
class NotificationActionManager:
def __init__(self):
self.actions = list()
self.default_action = None
self.close_action = None
def add_action(self, call_to_action_text, command_str) -> None:
action = NotificationAction(call_to_action_text, command_str)
self.actions.append(action)
def set_default_action(self, command_str) -> None:
default_action = NotificationAction("", command_str)
self.default_action = default_action
def set_close_action(self, command_str) -> None:
close_action = NotificationAction("", command_str)
self.close_action = close_action
class NotificationUrgencyLevel(Enum):
low = auto()
normal = auto()
critical = auto()
def send_notification(
title: str,
text: str,
icon_name: str = "",
timeout: int = 0,
app_name: str = "",
notification_id: int = -1,
actions_manager: NotificationActionManager = None,
urgency_level: NotificationUrgencyLevel = None,
capture_notification_id: bool = True,
) -> str:
# Check that `notify-send-ng` is available, as it's not a hard dependency of the package
try:
run(["dpkg-query", "-l", "notify-send-ng"], capture_output=True, check=True)
except CalledProcessError:
raise Exception("notify-send-ng not installed")
cmd = "/usr/bin/notify-send "
cmd += "--print-id "
cmd += "--expire-time=" + str(timeout) + " "
if icon_name:
cmd += "--icon=" + icon_name + " "
if notification_id >= 0:
cmd += "--replace=" + str(notification_id) + " "
if actions_manager is not None:
for action in actions_manager.actions:
cmd += (
'--action="'
+ action.call_to_action_text
+ ":"
+ action.command_str
+ '" '
)
if actions_manager.default_action is not None:
cmd += (
"--default-action=" + actions_manager.default_action.command_str + " "
)
if actions_manager.close_action is not None:
cmd += "--close-action=" + actions_manager.close_action.command_str + " "
if app_name:
cmd += "--app-name=" + app_name + " "
if urgency_level is not None:
cmd += "--urgency=" + urgency_level.name + " "
cmd += ' "' + title + '" '
cmd += '"' + text + '"'
PTLogger.info("notify-send command: {}".format(cmd))
try:
resp_stdout = run_command(cmd, 2000, capture_output=capture_notification_id)
except Exception as e:
PTLogger.warning("Failed to show message: {}".format(e))
raise
return resp_stdout
| 29.833333
| 92
| 0.621755
| 350
| 3,043
| 5.16
| 0.282857
| 0.066445
| 0.039867
| 0.053156
| 0.187154
| 0.087486
| 0.037652
| 0.037652
| 0
| 0
| 0
| 0.003126
| 0.264213
| 3,043
| 101
| 93
| 30.128713
| 0.803484
| 0.028262
| 0
| 0.052632
| 0
| 0
| 0.08291
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078947
| false
| 0
| 0.052632
| 0
| 0.223684
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcb80d7d2c6e6e1e230619095dac5498b39b51c1
| 3,989
|
py
|
Python
|
items/coins.py
|
leerichoang/Legend-Of-Peach
|
bef98ba7afdbddc497c45f8adedfb60e81176bfb
|
[
"MIT"
] | null | null | null |
items/coins.py
|
leerichoang/Legend-Of-Peach
|
bef98ba7afdbddc497c45f8adedfb60e81176bfb
|
[
"MIT"
] | null | null | null |
items/coins.py
|
leerichoang/Legend-Of-Peach
|
bef98ba7afdbddc497c45f8adedfb60e81176bfb
|
[
"MIT"
] | 2
|
2019-10-15T23:22:16.000Z
|
2019-10-29T04:38:02.000Z
|
import pygame
from pygame.sprite import Sprite
class Coins(Sprite):
"""Coins"""
def __init__(self, hub, x, y, name='coin', state='floating'):
super().__init__()
# Values
self.name = name
self.hub = hub
self.original_pos = [x, y]
self.rest_height = y
self.rest_x = x
self.velY = 0
self.upwards = True
self.state = state
self.scale = (30, 50)
self.scale2 = (14, 50)
self.scale3 = (4, 50)
# Screen Camera
self.screen = self.hub.main_screen
self.screen_rect = self.screen.get_rect()
self.camera = hub.camera
# Images
self.index = 0
self.change_freq = 120
self.player_clock = pygame.time.get_ticks() + self.change_freq
self.frameRate = 30
self.clock = pygame.time.get_ticks() + self.frameRate
self.image_index = [pygame.image.load("imgs/Items/coin1.png"),
pygame.image.load("imgs/Items/coin2.png"),
pygame.image.load("imgs/Items/coin3.png"),
pygame.image.load("imgs/Items/coin2.png")]
self.image_index[0] = pygame.transform.scale(self.image_index[0], self.scale)
self.image_index[1] = pygame.transform.scale(self.image_index[1], self.scale2)
self.image_index[2] = pygame.transform.scale(self.image_index[2], self.scale3)
self.image_index[3] = pygame.transform.scale(self.image_index[3], self.scale2)
self.resting_index = [pygame.image.load("imgs/Items/CoinForBlackBG.png"),
pygame.image.load("imgs/Items/CoinForBlackBG1.png"),
pygame.image.load("imgs/Items/CoinForBlackBG2.png"),
pygame.image.load("imgs/Items/CoinForBlackBG1.png")]
for i in range(len(self.resting_index)):
self.resting_index[i] = pygame.transform.scale(self.resting_index[i], self.scale)
if self.state == "floating":
self.image = self.image_index[self.index]
else:
self.image = self.resting_index[self.index]
self.rect = self.image.get_rect()
self.rect.x = self.original_pos[0]
self.rect.y = self.original_pos[1]
def draw(self):
self.screen.blit(self.image, self.rect)
def update(self):
self.check_state()
def check_state(self):
if self.state == "floating":
self.start_anim()
elif self.state == "resting":
self.resting()
def start_anim(self):
"""Starts coin spin animation"""
self.velY = 5
if self.rect.y == (self.rest_height - 60):
self.upwards = False
if self.upwards:
self.rect.y -= self.velY
else:
self.rect.y += self.velY
# start timer
if pygame.time.get_ticks() > self.player_clock:
self.player_clock = pygame.time.get_ticks() + self.change_freq
if self.index == 0:
self.original_pos[0] += 8
elif self.index == 1:
self.original_pos[0] += 5
elif self.index == 2:
self.original_pos[0] -= 5
elif self.index == 3:
self.original_pos[0] -= 8
self.index += 1
self.index %= len(self.image_index)
self.image = self.image_index[self.index]
if self.rect.y == self.rest_height:
self.hub.gamemode.coins += 1
self.hub.gamemode.check_coins()
self.hub.gamemode.score += 200
self.kill()
def resting(self):
"""Starts coin rest animation"""
# start timer
if pygame.time.get_ticks() > self.player_clock:
self.player_clock = pygame.time.get_ticks() + self.change_freq
self.index += 1
self.index %= len(self.resting_index)
self.image = self.resting_index[self.index]
| 34.387931
| 93
| 0.560792
| 494
| 3,989
| 4.402834
| 0.182186
| 0.074483
| 0.077241
| 0.069885
| 0.516782
| 0.44046
| 0.316782
| 0.211034
| 0.109885
| 0.109885
| 0
| 0.022303
| 0.314365
| 3,989
| 115
| 94
| 34.686957
| 0.772943
| 0.028077
| 0
| 0.174419
| 0
| 0
| 0.060669
| 0.030853
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.023256
| 0
| 0.104651
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcbacb893f1fc24efc7e31b69bae2dad2d6081f7
| 293
|
py
|
Python
|
tests/test_clean.py
|
tcapelle/nb_helpers
|
432b1f014f1b780b5a4d3722d44f237387db2330
|
[
"MIT"
] | 7
|
2022-01-13T09:54:39.000Z
|
2022-02-08T23:34:47.000Z
|
tests/test_clean.py
|
tcapelle/nb_helpers
|
432b1f014f1b780b5a4d3722d44f237387db2330
|
[
"MIT"
] | 62
|
2021-12-14T10:24:13.000Z
|
2022-02-09T00:00:12.000Z
|
tests/test_clean.py
|
tcapelle/nb_helpers
|
432b1f014f1b780b5a4d3722d44f237387db2330
|
[
"MIT"
] | 2
|
2022-01-20T10:41:51.000Z
|
2022-02-04T11:26:41.000Z
|
from pathlib import Path
from nb_helpers.clean import clean_all, clean_one
from tests import TEST_PATH
TEST_PATH
TEST_NB = Path("test_nb.py")
def test_clean_one():
"clean just one nb"
clean_one(TEST_NB)
def test_clean_all():
"clean all test nbs"
clean_all(path=TEST_PATH)
| 17.235294
| 49
| 0.744027
| 51
| 293
| 3.980392
| 0.294118
| 0.157635
| 0.128079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177474
| 293
| 16
| 50
| 18.3125
| 0.842324
| 0.122867
| 0
| 0
| 0
| 0
| 0.153584
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.272727
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcbc7df90a025f59202f5950277107bf1a366441
| 5,746
|
py
|
Python
|
apps/technical_analysis.py
|
KiloSat/FirstNivesh
|
0fe200e08bb9f7d89de91f59eb14448fa7b972b9
|
[
"MIT"
] | null | null | null |
apps/technical_analysis.py
|
KiloSat/FirstNivesh
|
0fe200e08bb9f7d89de91f59eb14448fa7b972b9
|
[
"MIT"
] | null | null | null |
apps/technical_analysis.py
|
KiloSat/FirstNivesh
|
0fe200e08bb9f7d89de91f59eb14448fa7b972b9
|
[
"MIT"
] | 2
|
2021-04-03T16:39:23.000Z
|
2021-08-15T08:09:21.000Z
|
import streamlit as st
def app():
import yfinance as yf
import streamlit as st
import datetime
import matplotlib.pyplot as plt
import talib
import ta
import numpy as np
import matplotlib.ticker as mticker
import pandas as pd
import requests
yf.pdr_override()
st.write("""
# Technical Analysis of Securites
Shown below are the **Moving Average Crossovers**, **Bollinger Bands**, **MACD's**, **Commodity Channel Indexes**, **Relative Strength Indexes** and **Extended Market Calculators** of any stock
""")
st.sidebar.header('User Input Parameters')
today = datetime.date.today()
def user_input_features():
ticker = st.sidebar.text_input("Ticker", 'GME')
start_date = st.sidebar.text_input("Start Date", '2019-01-01')
end_date = st.sidebar.text_input("End Date", f'{today}')
return ticker, start_date, end_date
symbol, start, end = user_input_features()
def get_symbol(symbol):
cticker = yf.Ticker(symbol)
company_name = cticker.info['longName']
return company_name
company_name = get_symbol(symbol.upper())
start = pd.to_datetime(start)
end = pd.to_datetime(end)
# Read data
data = yf.download(symbol,start,end)
# Adjusted Close Price
st.header(f"""
Adjusted Close Price\n {company_name}
""")
st.line_chart(data['Adj Close'])
# ## SMA and EMA
#Simple Moving Average
data['SMA'] = talib.SMA(data['Adj Close'], timeperiod = 20)
# Exponential Moving Average
data['EMA'] = talib.EMA(data['Adj Close'], timeperiod = 20)
# Plot
st.header(f"""
Simple Moving Average vs. Exponential Moving Average\n {company_name}
""")
st.line_chart(data[['Adj Close','SMA','EMA']])
# Bollinger Bands
data['upper_band'], data['middle_band'], data['lower_band'] = talib.BBANDS(data['Adj Close'], timeperiod =20)
# Plot
st.header(f"""
Bollinger Bands\n {company_name}
""")
st.line_chart(data[['Adj Close','upper_band','middle_band','lower_band']])
# ## MACD (Moving Average Convergence Divergence)
# MACD
data['macd'], data['macdsignal'], data['macdhist'] = talib.MACD(data['Adj Close'], fastperiod=12, slowperiod=26, signalperiod=9)
# Plot
st.header(f"""Moving Average Convergence Divergence\n {company_name}""")
st.line_chart(data[['macd','macdsignal']])
## CCI (Commodity Channel Index)
# CCI
cci = ta.trend.cci(data['High'], data['Low'], data['Close'], 31, 0.015)
# Plot
st.header(f"""Commodity Channel Index\n {company_name}""")
st.line_chart(cci)
# ## RSI (Relative Strength Index)
# RSI
data['RSI'] = talib.RSI(data['Adj Close'], timeperiod=14)
# Plot
st.header(f"""Relative Strength Index\n {company_name}""")
st.line_chart(data['RSI'])
# ## OBV (On Balance Volume)
# OBV
data['OBV'] = talib.OBV(data['Adj Close'], data['Volume'])/10**6
# Plot
st.header(f"""On Balance Volume\n {company_name}""")
st.line_chart(data['OBV'])
# Extended Market
fig, ax1 = plt.subplots()
#Asks for stock ticker
sma = 50
limit = 10
data = yf.download(symbol,start, today)
#calculates sma and creates a column in the dataframe
data['SMA'+str(sma)] = data.iloc[:,4].rolling(window=sma).mean()
data['PC'] = ((data["Adj Close"]/data['SMA'+str(sma)])-1)*100
mean = round(data["PC"].mean(), 2)
stdev = round(data["PC"].std(), 2)
current= round(data["PC"][-1], 2)
yday= round(data["PC"][-2], 2)
stats = [['Mean', mean], ['Standard Deviation', stdev], ['Current', current], ['Yesterday', yday]]
frame = pd.DataFrame(stats,columns = ['Statistic', 'Value'])
st.header(f"""Extended Market Calculator\n {company_name}""")
st.dataframe(frame.style.hide_index())
# fixed bin size
bins = np.arange(-100, 100, 1)
plt.rcParams['figure.figsize'] = 15, 10
plt.xlim([data["PC"].min()-5, data["PC"].max()+5])
plt.hist(data["PC"], bins=bins, alpha=0.5)
plt.title(symbol+"-- % From "+str(sma)+" SMA Histogram since "+str(start.year))
plt.xlabel('Percent from '+str(sma)+' SMA (bin size = 1)')
plt.ylabel('Count')
plt.axvline( x=mean, ymin=0, ymax=1, color='k', linestyle='--')
plt.axvline( x=stdev+mean, ymin=0, ymax=1, color='gray', alpha=1, linestyle='--')
plt.axvline( x=2*stdev+mean, ymin=0, ymax=1, color='gray',alpha=.75, linestyle='--')
plt.axvline( x=3*stdev+mean, ymin=0, ymax=1, color='gray', alpha=.5, linestyle='--')
plt.axvline( x=-stdev+mean, ymin=0, ymax=1, color='gray', alpha=1, linestyle='--')
plt.axvline( x=-2*stdev+mean, ymin=0, ymax=1, color='gray',alpha=.75, linestyle='--')
plt.axvline( x=-3*stdev+mean, ymin=0, ymax=1, color='gray', alpha=.5, linestyle='--')
plt.axvline( x=current, ymin=0, ymax=1, color='r', label = 'today')
plt.axvline( x=yday, ymin=0, ymax=1, color='blue', label = 'yesterday')
#add more x axis labels
ax1.xaxis.set_major_locator(mticker.MaxNLocator(14))
st.pyplot(fig)
#Create Plots
fig2, ax2 = plt.subplots()
data=data[-150:]
data['PC'].plot(label='close',color='k')
plt.title(symbol+"-- % From "+str(sma)+" SMA Over last 100 days")
plt.xlabel('Date')
plt.ylabel('Percent from '+str(sma)+' EMA')
#add more x axis labels
ax2.xaxis.set_major_locator(mticker.MaxNLocator(8))
plt.axhline( y=limit, xmin=0, xmax=1, color='r')
plt.rcParams['figure.figsize'] = 15, 10
st.pyplot(fig2)
| 34.202381
| 197
| 0.603376
| 779
| 5,746
| 4.392811
| 0.283697
| 0.035359
| 0.035067
| 0.0263
| 0.303331
| 0.249562
| 0.205435
| 0.156341
| 0.156341
| 0.124489
| 0
| 0.025095
| 0.223286
| 5,746
| 167
| 198
| 34.407186
| 0.741654
| 0.077793
| 0
| 0.107843
| 0
| 0.009804
| 0.239833
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.107843
| 0
| 0.156863
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcbd7d0edc16eccd95b307b889e7f1a174b4d31c
| 4,642
|
py
|
Python
|
tests/sentry/mediators/sentry_apps/test_creator.py
|
pombredanne/django-sentry
|
4ad09417fb3cfa3aa4a0d4175ae49fe02837c567
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/mediators/sentry_apps/test_creator.py
|
pombredanne/django-sentry
|
4ad09417fb3cfa3aa4a0d4175ae49fe02837c567
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/mediators/sentry_apps/test_creator.py
|
pombredanne/django-sentry
|
4ad09417fb3cfa3aa4a0d4175ae49fe02837c567
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from mock import patch
from django.db import IntegrityError
from sentry.mediators.sentry_apps import Creator
from sentry.models import (
AuditLogEntry,
AuditLogEntryEvent,
ApiApplication,
IntegrationFeature,
SentryApp,
SentryAppComponent,
User,
)
from sentry.testutils import TestCase
class TestCreator(TestCase):
def setUp(self):
self.user = self.create_user()
self.org = self.create_organization(owner=self.user)
self.creator = Creator(
name="nulldb",
user=self.user,
author="Sentry",
organization=self.org,
scopes=("project:read",),
webhook_url="http://example.com",
schema={"elements": [self.create_issue_link_schema()]},
is_internal=False,
)
def test_slug(self):
app = self.creator.call()
assert app.slug == "nulldb"
def test_creates_proxy_user(self):
self.creator.call()
assert User.objects.get(username__contains="nulldb", is_sentry_app=True)
def test_creates_api_application(self):
self.creator.call()
proxy = User.objects.get(username__contains="nulldb")
assert ApiApplication.objects.get(owner=proxy)
def test_creates_sentry_app(self):
self.creator.call()
proxy = User.objects.get(username__contains="nulldb")
app = ApiApplication.objects.get(owner=proxy)
sentry_app = SentryApp.objects.get(
name="nulldb", application=app, owner=self.org, proxy_user=proxy
)
assert sentry_app
assert sentry_app.scope_list == ["project:read"]
def test_expands_rolled_up_events(self):
self.creator.events = ["issue"]
app = self.creator.call()
sentry_app = SentryApp.objects.get(id=app.id)
assert "issue.created" in sentry_app.events
def test_creates_ui_components(self):
self.creator.schema = {
"elements": [self.create_issue_link_schema(), self.create_alert_rule_action_schema()]
}
app = self.creator.call()
assert SentryAppComponent.objects.filter(sentry_app_id=app.id, type="issue-link").exists()
assert SentryAppComponent.objects.filter(
sentry_app_id=app.id, type="alert-rule-action"
).exists()
def test_creates_integration_feature(self):
app = self.creator.call()
assert IntegrationFeature.objects.filter(sentry_app=app).exists()
@patch("sentry.mediators.sentry_apps.creator.Creator.log")
@patch("sentry.models.integrationfeature.IntegrationFeature.objects.create")
def test_raises_error_creating_integration_feature(self, mock_create, mock_log):
mock_create.side_effect = IntegrityError()
self.creator.call()
mock_log.assert_called_with(sentry_app="nulldb", error_message="")
def test_creates_audit_log_entry(self):
request = self.make_request(user=self.user, method="GET")
Creator.run(
name="nulldb",
user=self.user,
author="Sentry",
organization=self.org,
scopes=("project:read",),
webhook_url="http://example.com",
schema={"elements": [self.create_issue_link_schema()]},
request=request,
is_internal=False,
)
assert AuditLogEntry.objects.filter(event=AuditLogEntryEvent.SENTRY_APP_ADD).exists()
def test_blank_schema(self):
self.creator.schema = ""
assert self.creator.call()
def test_none_schema(self):
self.creator.schema = None
assert self.creator.call()
def test_schema_with_no_elements(self):
self.creator.schema = {"elements": []}
assert self.creator.call()
@patch("sentry.analytics.record")
def test_records_analytics(self, record):
sentry_app = Creator.run(
name="nulldb",
user=self.user,
author="Sentry",
organization=self.org,
scopes=("project:read",),
webhook_url="http://example.com",
schema={"elements": [self.create_issue_link_schema()]},
request=self.make_request(user=self.user, method="GET"),
is_internal=False,
)
record.assert_called_with(
"sentry_app.created",
user_id=self.user.id,
organization_id=self.org.id,
sentry_app=sentry_app.slug,
)
def test_allows_name_that_exists_as_username_already(self):
self.create_user(username="nulldb")
assert self.creator.call()
| 32.013793
| 98
| 0.640888
| 523
| 4,642
| 5.460803
| 0.210325
| 0.069328
| 0.063025
| 0.033613
| 0.428221
| 0.326681
| 0.27486
| 0.261204
| 0.261204
| 0.233543
| 0
| 0
| 0.246446
| 4,642
| 144
| 99
| 32.236111
| 0.816467
| 0
| 0
| 0.330435
| 0
| 0
| 0.091771
| 0.029513
| 0
| 0
| 0
| 0
| 0.13913
| 1
| 0.130435
| false
| 0
| 0.052174
| 0
| 0.191304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcbdf778d11c4a8378ce0f01967703c04ca3e0b9
| 17,869
|
py
|
Python
|
python/Model_Files/LFV_3/parameters.py
|
ZAKI1905/HEP-Phen
|
bc06fecb2aa6bf108b59f76794e63c29eb37a35a
|
[
"MIT"
] | 1
|
2019-10-21T08:25:46.000Z
|
2019-10-21T08:25:46.000Z
|
python/Model_Files/LFV_3/parameters.py
|
ZAKI1905/HEP-Phen
|
bc06fecb2aa6bf108b59f76794e63c29eb37a35a
|
[
"MIT"
] | null | null | null |
python/Model_Files/LFV_3/parameters.py
|
ZAKI1905/HEP-Phen
|
bc06fecb2aa6bf108b59f76794e63c29eb37a35a
|
[
"MIT"
] | null | null | null |
# This file was automatically created by FeynRules 2.3.32
# Mathematica version: 11.3.0 for Mac OS X x86 (64-bit) (March 7, 2018)
# Date: Sat 21 Apr 2018 20:48:39
from object_library import all_parameters, Parameter
from function_library import complexconjugate, re, im, csc, sec, acsc, asec, cot
# This is a default parameter object representing 0.
ZERO = Parameter(name = 'ZERO',
nature = 'internal',
type = 'real',
value = '0.0',
texname = '0')
# User-defined parameters.
cabi = Parameter(name = 'cabi',
nature = 'external',
type = 'real',
value = 0.227736,
texname = '\\theta _c',
lhablock = 'CKMBLOCK',
lhacode = [ 1 ])
aEWM1 = Parameter(name = 'aEWM1',
nature = 'external',
type = 'real',
value = 127.9,
texname = '\\text{aEWM1}',
lhablock = 'SMINPUTS',
lhacode = [ 1 ])
Gf = Parameter(name = 'Gf',
nature = 'external',
type = 'real',
value = 0.0000116637,
texname = 'G_f',
lhablock = 'SMINPUTS',
lhacode = [ 2 ])
aS = Parameter(name = 'aS',
nature = 'external',
type = 'real',
value = 0.1184,
texname = '\\alpha _s',
lhablock = 'SMINPUTS',
lhacode = [ 3 ])
ymdo = Parameter(name = 'ymdo',
nature = 'external',
type = 'real',
value = 0.00504,
texname = '\\text{ymdo}',
lhablock = 'YUKAWA',
lhacode = [ 1 ])
ymup = Parameter(name = 'ymup',
nature = 'external',
type = 'real',
value = 0.00255,
texname = '\\text{ymup}',
lhablock = 'YUKAWA',
lhacode = [ 2 ])
yms = Parameter(name = 'yms',
nature = 'external',
type = 'real',
value = 0.101,
texname = '\\text{yms}',
lhablock = 'YUKAWA',
lhacode = [ 3 ])
ymc = Parameter(name = 'ymc',
nature = 'external',
type = 'real',
value = 1.27,
texname = '\\text{ymc}',
lhablock = 'YUKAWA',
lhacode = [ 4 ])
ymb = Parameter(name = 'ymb',
nature = 'external',
type = 'real',
value = 4.7,
texname = '\\text{ymb}',
lhablock = 'YUKAWA',
lhacode = [ 5 ])
ymt = Parameter(name = 'ymt',
nature = 'external',
type = 'real',
value = 172,
texname = '\\text{ymt}',
lhablock = 'YUKAWA',
lhacode = [ 6 ])
yme = Parameter(name = 'yme',
nature = 'external',
type = 'real',
value = 0.000511,
texname = '\\text{yme}',
lhablock = 'YUKAWA',
lhacode = [ 11 ])
ymm = Parameter(name = 'ymm',
nature = 'external',
type = 'real',
value = 0.10566,
texname = '\\text{ymm}',
lhablock = 'YUKAWA',
lhacode = [ 13 ])
ymtau = Parameter(name = 'ymtau',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{ymtau}',
lhablock = 'YUKAWA',
lhacode = [ 15 ])
kq = Parameter(name = 'kq',
nature = 'external',
type = 'real',
value = 0.001,
texname = 'k_q',
lhablock = 'FRBlock',
lhacode = [ 1 ])
lamf = Parameter(name = 'lamf',
nature = 'external',
type = 'real',
value = 0.1,
texname = 'l_{\\text{fi}}',
lhablock = 'FRBlock',
lhacode = [ 2 ])
yf1x1 = Parameter(name = 'yf1x1',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf1x1}',
lhablock = 'FRBlock6',
lhacode = [ 1, 1 ])
yf1x2 = Parameter(name = 'yf1x2',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf1x2}',
lhablock = 'FRBlock6',
lhacode = [ 1, 2 ])
yf1x3 = Parameter(name = 'yf1x3',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf1x3}',
lhablock = 'FRBlock6',
lhacode = [ 1, 3 ])
yf2x1 = Parameter(name = 'yf2x1',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf2x1}',
lhablock = 'FRBlock6',
lhacode = [ 2, 1 ])
yf2x2 = Parameter(name = 'yf2x2',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf2x2}',
lhablock = 'FRBlock6',
lhacode = [ 2, 2 ])
yf2x3 = Parameter(name = 'yf2x3',
nature = 'external',
type = 'complex',
value = 1.e-6,
texname = '\\text{yf2x3}',
lhablock = 'FRBlock6',
lhacode = [ 2, 3 ])
yf3x1 = Parameter(name = 'yf3x1',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf3x1}',
lhablock = 'FRBlock6',
lhacode = [ 3, 1 ])
yf3x2 = Parameter(name = 'yf3x2',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf3x2}',
lhablock = 'FRBlock6',
lhacode = [ 3, 2 ])
yf3x3 = Parameter(name = 'yf3x3',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf3x3}',
lhablock = 'FRBlock6',
lhacode = [ 3, 3 ])
MZ = Parameter(name = 'MZ',
nature = 'external',
type = 'real',
value = 91.1876,
texname = '\\text{MZ}',
lhablock = 'MASS',
lhacode = [ 23 ])
Me = Parameter(name = 'Me',
nature = 'external',
type = 'real',
value = 0.000511,
texname = '\\text{Me}',
lhablock = 'MASS',
lhacode = [ 11 ])
MMU = Parameter(name = 'MMU',
nature = 'external',
type = 'real',
value = 0.10566,
texname = '\\text{MMU}',
lhablock = 'MASS',
lhacode = [ 13 ])
MTA = Parameter(name = 'MTA',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{MTA}',
lhablock = 'MASS',
lhacode = [ 15 ])
MU = Parameter(name = 'MU',
nature = 'external',
type = 'real',
value = 0.00255,
texname = 'M',
lhablock = 'MASS',
lhacode = [ 2 ])
MC = Parameter(name = 'MC',
nature = 'external',
type = 'real',
value = 1.27,
texname = '\\text{MC}',
lhablock = 'MASS',
lhacode = [ 4 ])
MT = Parameter(name = 'MT',
nature = 'external',
type = 'real',
value = 172,
texname = '\\text{MT}',
lhablock = 'MASS',
lhacode = [ 6 ])
MD = Parameter(name = 'MD',
nature = 'external',
type = 'real',
value = 0.00504,
texname = '\\text{MD}',
lhablock = 'MASS',
lhacode = [ 1 ])
MS = Parameter(name = 'MS',
nature = 'external',
type = 'real',
value = 0.101,
texname = '\\text{MS}',
lhablock = 'MASS',
lhacode = [ 3 ])
MB = Parameter(name = 'MB',
nature = 'external',
type = 'real',
value = 4.7,
texname = '\\text{MB}',
lhablock = 'MASS',
lhacode = [ 5 ])
MH = Parameter(name = 'MH',
nature = 'external',
type = 'real',
value = 125,
texname = '\\text{MH}',
lhablock = 'MASS',
lhacode = [ 25 ])
MP = Parameter(name = 'MP',
nature = 'external',
type = 'real',
value = 120,
texname = '\\text{MP}',
lhablock = 'MASS',
lhacode = [ 9000005 ])
Mfi = Parameter(name = 'Mfi',
nature = 'external',
type = 'real',
value = 10,
texname = '\\text{Mfi}',
lhablock = 'MASS',
lhacode = [ 9000006 ])
WZ = Parameter(name = 'WZ',
nature = 'external',
type = 'real',
value = 2.4952,
texname = '\\text{WZ}',
lhablock = 'DECAY',
lhacode = [ 23 ])
WW = Parameter(name = 'WW',
nature = 'external',
type = 'real',
value = 2.085,
texname = '\\text{WW}',
lhablock = 'DECAY',
lhacode = [ 24 ])
WT = Parameter(name = 'WT',
nature = 'external',
type = 'real',
value = 1.50833649,
texname = '\\text{WT}',
lhablock = 'DECAY',
lhacode = [ 6 ])
WH = Parameter(name = 'WH',
nature = 'external',
type = 'real',
value = 0.00589569,
texname = '\\text{WH}',
lhablock = 'DECAY',
lhacode = [ 25 ])
WH1 = Parameter(name = 'WH1',
nature = 'external',
type = 'real',
value = 0.00575308848,
texname = '\\text{WH1}',
lhablock = 'DECAY',
lhacode = [ 9000005 ])
Wfi = Parameter(name = 'Wfi',
nature = 'external',
type = 'real',
value = 6.03044e-9,
texname = '\\text{Wfi}',
lhablock = 'DECAY',
lhacode = [ 9000006 ])
aEW = Parameter(name = 'aEW',
nature = 'internal',
type = 'real',
value = '1/aEWM1',
texname = '\\alpha _{\\text{EW}}')
G = Parameter(name = 'G',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aS)*cmath.sqrt(cmath.pi)',
texname = 'G')
CKM1x1 = Parameter(name = 'CKM1x1',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM1x1}')
CKM1x2 = Parameter(name = 'CKM1x2',
nature = 'internal',
type = 'complex',
value = 'cmath.sin(cabi)',
texname = '\\text{CKM1x2}')
CKM1x3 = Parameter(name = 'CKM1x3',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM1x3}')
CKM2x1 = Parameter(name = 'CKM2x1',
nature = 'internal',
type = 'complex',
value = '-cmath.sin(cabi)',
texname = '\\text{CKM2x1}')
CKM2x2 = Parameter(name = 'CKM2x2',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM2x2}')
CKM2x3 = Parameter(name = 'CKM2x3',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM2x3}')
CKM3x1 = Parameter(name = 'CKM3x1',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM3x1}')
CKM3x2 = Parameter(name = 'CKM3x2',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM3x2}')
CKM3x3 = Parameter(name = 'CKM3x3',
nature = 'internal',
type = 'complex',
value = '1',
texname = '\\text{CKM3x3}')
MW = Parameter(name = 'MW',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(MZ**2/2. + cmath.sqrt(MZ**4/4. - (aEW*cmath.pi*MZ**2)/(Gf*cmath.sqrt(2))))',
texname = 'M_W')
ee = Parameter(name = 'ee',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aEW)*cmath.sqrt(cmath.pi)',
texname = 'e')
sw2 = Parameter(name = 'sw2',
nature = 'internal',
type = 'real',
value = '1 - MW**2/MZ**2',
texname = '\\text{sw2}')
cw = Parameter(name = 'cw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(1 - sw2)',
texname = 'c_w')
sw = Parameter(name = 'sw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(sw2)',
texname = 's_w')
g1 = Parameter(name = 'g1',
nature = 'internal',
type = 'real',
value = 'ee/cw',
texname = 'g_1')
gw = Parameter(name = 'gw',
nature = 'internal',
type = 'real',
value = 'ee/sw',
texname = 'g_w')
vev = Parameter(name = 'vev',
nature = 'internal',
type = 'real',
value = '(2*MW*sw)/ee',
texname = '\\text{vev}')
mfi = Parameter(name = 'mfi',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(100 - (kq*vev**2)/2.)',
texname = 'M_{\\text{fi}}')
AH = Parameter(name = 'AH',
nature = 'internal',
type = 'real',
value = '(47*ee**2*(1 - (2*MH**4)/(987.*MT**4) - (14*MH**2)/(705.*MT**2) + (213*MH**12)/(2.634632e7*MW**12) + (5*MH**10)/(119756.*MW**10) + (41*MH**8)/(180950.*MW**8) + (87*MH**6)/(65800.*MW**6) + (57*MH**4)/(6580.*MW**4) + (33*MH**2)/(470.*MW**2)))/(72.*cmath.pi**2*vev)',
texname = 'A_H')
GH = Parameter(name = 'GH',
nature = 'internal',
type = 'real',
value = '-(G**2*(1 + (13*MH**6)/(16800.*MT**6) + MH**4/(168.*MT**4) + (7*MH**2)/(120.*MT**2)))/(12.*cmath.pi**2*vev)',
texname = 'G_H')
Gphi = Parameter(name = 'Gphi',
nature = 'internal',
type = 'real',
value = '-(G**2*(1 + MH**6/(560.*MT**6) + MH**4/(90.*MT**4) + MH**2/(12.*MT**2)))/(8.*cmath.pi**2*vev)',
texname = 'G_h')
lam = Parameter(name = 'lam',
nature = 'internal',
type = 'real',
value = 'MH**2/(2.*vev**2)',
texname = '\\text{lam}')
yb = Parameter(name = 'yb',
nature = 'internal',
type = 'real',
value = '(ymb*cmath.sqrt(2))/vev',
texname = '\\text{yb}')
yc = Parameter(name = 'yc',
nature = 'internal',
type = 'real',
value = '(ymc*cmath.sqrt(2))/vev',
texname = '\\text{yc}')
ydo = Parameter(name = 'ydo',
nature = 'internal',
type = 'real',
value = '(ymdo*cmath.sqrt(2))/vev',
texname = '\\text{ydo}')
ye = Parameter(name = 'ye',
nature = 'internal',
type = 'real',
value = '(yme*cmath.sqrt(2))/vev',
texname = '\\text{ye}')
ym = Parameter(name = 'ym',
nature = 'internal',
type = 'real',
value = '(ymm*cmath.sqrt(2))/vev',
texname = '\\text{ym}')
ys = Parameter(name = 'ys',
nature = 'internal',
type = 'real',
value = '(yms*cmath.sqrt(2))/vev',
texname = '\\text{ys}')
yt = Parameter(name = 'yt',
nature = 'internal',
type = 'real',
value = '(ymt*cmath.sqrt(2))/vev',
texname = '\\text{yt}')
ytau = Parameter(name = 'ytau',
nature = 'internal',
type = 'real',
value = '(ymtau*cmath.sqrt(2))/vev',
texname = '\\text{ytau}')
yup = Parameter(name = 'yup',
nature = 'internal',
type = 'real',
value = '(ymup*cmath.sqrt(2))/vev',
texname = '\\text{yup}')
muH = Parameter(name = 'muH',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(lam*vev**2)',
texname = '\\mu')
| 31.459507
| 288
| 0.38245
| 1,497
| 17,869
| 4.553106
| 0.151637
| 0.148768
| 0.114437
| 0.109742
| 0.456866
| 0.361796
| 0.268192
| 0.235915
| 0.118545
| 0.031103
| 0
| 0.062539
| 0.466674
| 17,869
| 567
| 289
| 31.514991
| 0.652676
| 0.012983
| 0
| 0.550209
| 0
| 0.008368
| 0.188883
| 0.038911
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004184
| 0
| 0.004184
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcbfd5dadc46bd5eab08a4b4f4c45a601d0075b6
| 16,826
|
py
|
Python
|
octoprint_octopod/__init__.py
|
mnebelung/OctoPrint-OctoPod
|
3af1a2e1ad7c6f73ed05d9c1ff029fb645f3115a
|
[
"Apache-2.0"
] | 52
|
2019-05-28T03:41:20.000Z
|
2022-01-29T00:32:57.000Z
|
octoprint_octopod/__init__.py
|
mnebelung/OctoPrint-OctoPod
|
3af1a2e1ad7c6f73ed05d9c1ff029fb645f3115a
|
[
"Apache-2.0"
] | 111
|
2019-05-28T14:50:01.000Z
|
2022-03-21T22:12:05.000Z
|
octoprint_octopod/__init__.py
|
mnebelung/OctoPrint-OctoPod
|
3af1a2e1ad7c6f73ed05d9c1ff029fb645f3115a
|
[
"Apache-2.0"
] | 11
|
2019-07-20T15:36:21.000Z
|
2021-12-30T16:53:56.000Z
|
# coding=utf-8
from __future__ import absolute_import
import datetime
import logging
import sys
import flask
import octoprint.plugin
from octoprint.events import eventManager, Events
from octoprint.server import user_permission
from octoprint.util import RepeatedTimer
from .bed_notifications import BedNotifications
from .custom_notifications import CustomNotifications
from .ifttt_notifications import IFTTTAlerts
from .job_notifications import JobNotifications
from .layer_notifications import LayerNotifications
from .libs.sbc import SBCFactory, SBC, RPi
from .mmu import MMUAssistance
from .palette2 import Palette2Notifications
from .paused_for_user import PausedForUser
from .soc_temp_notifications import SocTempNotifications
from .thermal_protection_notifications import ThermalProtectionNotifications
from .tools_notifications import ToolsNotifications
# Plugin that stores APNS tokens reported from iOS devices to know which iOS devices to alert
# when print is done or other relevant events
debug_soc_temp = False
class OctopodPlugin(octoprint.plugin.SettingsPlugin,
octoprint.plugin.AssetPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.StartupPlugin,
octoprint.plugin.SimpleApiPlugin,
octoprint.plugin.EventHandlerPlugin,
octoprint.plugin.ProgressPlugin):
def __init__(self):
super(OctopodPlugin, self).__init__()
self._logger = logging.getLogger("octoprint.plugins.octopod")
self._checkTempTimer = None
self._ifttt_alerts = IFTTTAlerts(self._logger)
self._job_notifications = JobNotifications(self._logger, self._ifttt_alerts)
self._tool_notifications = ToolsNotifications(self._logger, self._ifttt_alerts)
self._bed_notifications = BedNotifications(self._logger, self._ifttt_alerts)
self._mmu_assitance = MMUAssistance(self._logger, self._ifttt_alerts)
self._paused_for_user = PausedForUser(self._logger, self._ifttt_alerts)
self._palette2 = Palette2Notifications(self._logger, self._ifttt_alerts)
self._layerNotifications = LayerNotifications(self._logger, self._ifttt_alerts)
self._check_soc_temp_timer = None
self._soc_timer_interval = 5.0 if debug_soc_temp else 30.0
self._soc_temp_notifications = SocTempNotifications(self._logger, self._ifttt_alerts, self._soc_timer_interval,
debug_soc_temp)
self._custom_notifications = CustomNotifications(self._logger)
self._thermal_protection_notifications = ThermalProtectionNotifications(self._logger, self._ifttt_alerts)
# StartupPlugin mixin
def on_after_startup(self):
self._logger.info("OctoPod loaded!")
# Set logging level to what we have in the settings
if self._settings.get_boolean(["debug_logging"]):
self._logger.setLevel(logging.DEBUG)
else:
self._logger.setLevel(logging.INFO)
# Register to listen for messages from other plugins
self._plugin_manager.register_message_receiver(self.on_plugin_message)
# Start timer that will check bed temperature and send notifications if needed
self._restart_timer()
# if running on linux then check soc temperature
if sys.platform.startswith("linux") or debug_soc_temp:
sbc = RPi(self._logger) if debug_soc_temp else SBCFactory().factory(self._logger)
if sbc.is_supported:
self._soc_temp_notifications.sbc = sbc
sbc.debugMode = debug_soc_temp
self._soc_temp_notifications.send_plugin_message = self.send_plugin_message
self.start_soc_timer(self._soc_timer_interval)
# SettingsPlugin mixin
def get_settings_defaults(self):
return dict(
debug_logging=False,
server_url='http://octopodprint.com/',
camera_snapshot_url='http://localhost:8080/?action=snapshot',
tokens=[],
sound_notification='default',
temp_interval=5,
tool0_low=0,
tool0_target_temp=False,
bed_low=30,
bed_target_temp_hold=10,
mmu_interval=5,
pause_interval=5,
palette2_printing_error_codes=[103, 104, 111, 121],
progress_type='50', # 0=disabled, 25=every 25%, 50=every 50%, 100=only when finished
ifttt_key='',
ifttt_name='',
soc_temp_high=75,
thermal_runway_threshold=10,
thermal_threshold_minutes_frequency=10,
thermal_cooldown_seconds_threshold=14,
thermal_warmup_bed_seconds_threshold=19,
thermal_warmup_hotend_seconds_threshold=39,
thermal_warmup_chamber_seconds_threshold=19,
thermal_below_target_threshold=5,
webcam_flipH=False,
webcam_flipV=False,
webcam_rotate90=False,
notify_first_X_layers=1,
print_complete_delay_seconds=0
)
def on_settings_save(self, data):
old_debug_logging = self._settings.get_boolean(["debug_logging"])
octoprint.plugin.SettingsPlugin.on_settings_save(self, data)
new_debug_logging = self._settings.get_boolean(["debug_logging"])
if old_debug_logging != new_debug_logging:
if new_debug_logging:
self._logger.setLevel(logging.DEBUG)
else:
self._logger.setLevel(logging.INFO)
def get_settings_version(self):
return 13
def on_settings_migrate(self, target, current):
if current is None or current == 1:
# add the 2 new values included
self._settings.set(['temp_interval'], self.get_settings_defaults()["temp_interval"])
self._settings.set(['bed_low'], self.get_settings_defaults()["bed_low"])
if current is None or current <= 2:
self._settings.set(['bed_target_temp_hold'], self.get_settings_defaults()["bed_target_temp_hold"])
if current is None or current <= 3:
self._settings.set(['mmu_interval'], self.get_settings_defaults()["mmu_interval"])
if current is None or current <= 4:
self._settings.set(['pause_interval'], self.get_settings_defaults()["pause_interval"])
if current is None or current <= 5:
self._settings.set(['tool0_low'], self.get_settings_defaults()["tool0_low"])
if current is None or current <= 6:
self._settings.set(['palette2_printing_error_codes'],
self.get_settings_defaults()["palette2_printing_error_codes"])
if current is None or current <= 7:
self._settings.set(['progress_type'], self.get_settings_defaults()["progress_type"])
if current is None or current <= 8:
self._settings.set(['ifttt_key'], self.get_settings_defaults()["ifttt_key"])
self._settings.set(['ifttt_name'], self.get_settings_defaults()["ifttt_name"])
if current is None or current <= 9:
self._settings.set(['soc_temp_high'], self.get_settings_defaults()["soc_temp_high"])
self._settings.set(['webcam_flipH'], self._settings.global_get(["webcam", "flipH"]))
self._settings.set(['webcam_flipV'], self._settings.global_get(["webcam", "flipV"]))
self._settings.set(['webcam_rotate90'], self._settings.global_get(["webcam", "rotate90"]))
if current is None or current <= 10:
self._settings.set(['tool0_target_temp'], self.get_settings_defaults()["tool0_target_temp"])
if current is None or current <= 11:
self._settings.set(['thermal_runway_threshold'], self.get_settings_defaults()["thermal_runway_threshold"])
self._settings.set(['thermal_threshold_minutes_frequency'], self.get_settings_defaults()["thermal_threshold_minutes_frequency"])
self._settings.set(['sound_notification'], self.get_settings_defaults()["sound_notification"])
if current is None or current <= 12:
self._settings.set(['thermal_cooldown_seconds_threshold'], self.get_settings_defaults()["thermal_cooldown_seconds_threshold"])
self._settings.set(['thermal_below_target_threshold'], self.get_settings_defaults()["thermal_below_target_threshold"])
self._settings.set(['thermal_warmup_bed_seconds_threshold'], self.get_settings_defaults()["thermal_warmup_bed_seconds_threshold"])
self._settings.set(['thermal_warmup_hotend_seconds_threshold'], self.get_settings_defaults()["thermal_warmup_hotend_seconds_threshold"])
self._settings.set(['thermal_warmup_chamber_seconds_threshold'], self.get_settings_defaults()["thermal_warmup_chamber_seconds_threshold"])
if current is None or current <= 13:
self._settings.set(['notify_first_X_layers'], self.get_settings_defaults()["notify_first_X_layers"])
# AssetPlugin mixin
def get_assets(self):
# Define your plugin's asset files to automatically include in the
# core UI here.
return dict(
js=["js/octopod.js"],
css=["css/octopod.css"],
)
# ProgressPlugin
# progress-hook
def on_print_progress(self, storage, path, progress):
# progress 0 - 100
self._job_notifications.on_print_progress(self._settings, progress)
# EventHandlerPlugin mixin
def on_event(self, event, payload):
if event == Events.PRINTER_STATE_CHANGED:
self._job_notifications.send__print_job_notification(self._settings, self._printer, payload)
elif event == "DisplayLayerProgress_layerChanged":
# Event sent from DisplayLayerProgress plugin when there was a detected layer changed
self._layerNotifications.layer_changed(self._settings, payload["currentLayer"])
elif event == Events.PRINT_STARTED or event == Events.PRINT_DONE or event == Events.PRINT_CANCELLED \
or event == Events.PRINT_FAILED:
# Reset layers for which we need to send a notification. Each new print job has its own
self._layerNotifications.reset_layers()
# SimpleApiPlugin mixin
def update_token(self, old_token, new_token, device_name, printer_id, printer_name, language_code):
self._logger.debug("Received tokens for %s." % device_name)
existing_tokens = self._settings.get(["tokens"])
# Safety check in case a user manually modified config.yaml and left invalid JSON
if existing_tokens is None:
existing_tokens = []
found = False
updated = False
for token in existing_tokens:
# Check if existing token has been updated
if token["apnsToken"] == old_token and token["printerID"] == printer_id:
if old_token != new_token:
self._logger.debug("Updating token for %s." % device_name)
# Token that exists needs to be updated with new token
token["apnsToken"] = new_token
token["date"] = datetime.datetime.now().strftime("%x %X")
updated = True
found = True
elif token["apnsToken"] == new_token and token["printerID"] == printer_id:
found = True
if found:
if printer_name is not None and ("printerName" not in token or token["printerName"] != printer_name):
# Printer name in OctoPod has been updated
token["printerName"] = printer_name
token["date"] = datetime.datetime.now().strftime("%x %X")
updated = True
if language_code is not None and (
"languageCode" not in token or token["languageCode"] != language_code):
# Language being used by OctoPod has been updated
token["languageCode"] = language_code
token["date"] = datetime.datetime.now().strftime("%x %X")
updated = True
break
if not found:
self._logger.debug("Adding token for %s." % device_name)
# Token was not found so we need to add it
existing_tokens.append(
{'apnsToken': new_token, 'deviceName': device_name, 'date': datetime.datetime.now().strftime("%x %X"),
'printerID': printer_id, 'printerName': printer_name, 'languageCode': language_code})
updated = True
if updated:
# Save new settings
self._settings.set(["tokens"], existing_tokens)
self._settings.save()
eventManager().fire(Events.SETTINGS_UPDATED)
self._logger.debug("Tokens saved")
def get_api_commands(self):
return dict(updateToken=["oldToken", "newToken", "deviceName", "printerID"], test=[],
snooze=["eventCode", "minutes"], addLayer=["layer"], removeLayer=["layer"], getLayers=[],
getSoCTemps=[])
def on_api_command(self, command, data):
if not user_permission.can():
return flask.make_response("Insufficient rights", 403)
if command == 'updateToken':
# Convert from ASCII to UTF-8 since some chars will fail otherwise (e.g. apostrophe) - Only for Python 2
if sys.version_info[0] == 2:
data["deviceName"] = data["deviceName"].encode("utf-8")
printer_name = data["printerName"] if 'printerName' in data else None
language_code = data["languageCode"] if 'languageCode' in data else None
self.update_token("{oldToken}".format(**data), "{newToken}".format(**data), "{deviceName}".format(**data),
"{printerID}".format(**data), printer_name, language_code)
elif command == 'test':
payload = dict(
state_id="OPERATIONAL",
state_string="Operational"
)
code = self._job_notifications.send__print_job_notification(self._settings, self._printer, payload,
data["server_url"], data["camera_snapshot_url"],
data["camera_flip_h"], data["camera_flip_v"],
data["camera_rotate90"],
True)
return flask.jsonify(dict(code=code))
elif command == 'snooze':
if data["eventCode"] == 'mmu-event':
self._mmu_assitance.snooze(data["minutes"])
else:
return flask.make_response("Snooze for unknown event", 400)
elif command == 'addLayer':
self._layerNotifications.add_layer(data["layer"])
elif command == 'removeLayer':
self._layerNotifications.remove_layer(data["layer"])
elif command == 'getLayers':
return flask.jsonify(dict(layers=self._layerNotifications.get_layers()))
elif command == 'getSoCTemps':
return flask.jsonify(self._soc_temp_notifications.get_soc_temps())
else:
return flask.make_response("Unknown command", 400)
# TemplatePlugin mixin
def get_template_configs(self):
return [
dict(type="settings", name="OctoPod Notifications", custom_bindings=True)
]
# Softwareupdate hook
def get_update_information(self):
# Define the configuration for your plugin to use with the Software Update
# Plugin here. See https://github.com/foosel/OctoPrint/wiki/Plugin:-Software-Update
# for details.
return dict(
octopod=dict(
displayName="OctoPod Plugin",
displayVersion=self._plugin_version,
# version check: github repository
type="github_release",
user="gdombiak",
repo="OctoPrint-OctoPod",
current=self._plugin_version,
# update method: pip
pip="https://github.com/gdombiak/OctoPrint-OctoPod/archive/{target_version}.zip"
)
)
# Plugin messages
def on_plugin_message(self, plugin, data, permissions=None):
self._palette2.check_plugin_message(self._settings, plugin, data)
def send_plugin_message(self, data):
self._plugin_manager.send_plugin_message(self._identifier, data)
# Timer functions
def _restart_timer(self):
# stop the timer
if self._checkTempTimer:
self._logger.debug(u"Stopping Timer...")
self._checkTempTimer.cancel()
self._checkTempTimer = None
# start a new timer
interval = self._settings.get_int(['temp_interval'])
if interval:
self._logger.debug(u"Starting Timer...")
self._checkTempTimer = RepeatedTimer(interval, self.run_timer_job, None, None, True)
self._checkTempTimer.start()
def run_timer_job(self):
self._bed_notifications.check_temps(self._settings, self._printer)
self._tool_notifications.check_temps(self._settings, self._printer)
self._thermal_protection_notifications.check_temps(self._settings, self._printer)
def start_soc_timer(self, interval):
self._logger.debug(u"Monitoring SoC temp with Timer")
self._check_soc_temp_timer = RepeatedTimer(interval, self.update_soc_temp, run_first=True)
self._check_soc_temp_timer.start()
def update_soc_temp(self):
self._soc_temp_notifications.check_soc_temp(self._settings)
# GCODE hook
def process_gcode(self, comm, line, *args, **kwargs):
line = self._paused_for_user.process_gcode(self._settings, self._printer, line)
return self._mmu_assitance.process_gcode(self._settings, line)
# Helper functions
def push_notification(self, message, image=None):
"""
Send arbitrary push notification to OctoPod app running on iPhone (includes Apple Watch and iPad)
via the OctoPod APNS service.
:param message: (String) Message to include in the notification
:param image: Optional. (PIL Image) Image to include in the notification
:return: True if the notification was successfully sent
"""
return self._custom_notifications.send_notification(self._settings, message, image)
# If you want your plugin to be registered within OctoPrint under a different name than what you defined in setup.py
# ("OctoPrint-PluginSkeleton"), you may define that here. Same goes for the other metadata derived from setup.py that
# can be overwritten via __plugin_xyz__ control properties. See the documentation for that.
__plugin_name__ = "OctoPod Plugin"
__plugin_pythoncompat__ = ">=2.7,<4"
def __plugin_load__():
global __plugin_implementation__
__plugin_implementation__ = OctopodPlugin()
global __plugin_hooks__
__plugin_hooks__ = {
"octoprint.plugin.softwareupdate.check_config": __plugin_implementation__.get_update_information,
"octoprint.comm.protocol.gcode.received": __plugin_implementation__.process_gcode
}
global __plugin_helpers__
__plugin_helpers__ = {
"apns_notification": __plugin_implementation__.push_notification
}
| 39.683962
| 141
| 0.757934
| 2,175
| 16,826
| 5.557701
| 0.19908
| 0.045665
| 0.031023
| 0.039957
| 0.245367
| 0.155113
| 0.092985
| 0.065602
| 0.036813
| 0.036813
| 0
| 0.00852
| 0.135029
| 16,826
| 423
| 142
| 39.777778
| 0.822042
| 0.136099
| 0
| 0.074074
| 0
| 0
| 0.15568
| 0.049475
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077441
| false
| 0
| 0.070707
| 0.020202
| 0.198653
| 0.124579
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|