hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7bbdf574388c84658ffc5b1e989b4bad6ddb075e
| 9,045
|
py
|
Python
|
befh/exchanges/okex_spot.py
|
philsong/BitcoinExchangeFH
|
3c45d4be2ea2a258f132d982f62f69d649e0b083
|
[
"Apache-2.0"
] | 32
|
2017-12-15T07:30:11.000Z
|
2020-07-16T10:15:18.000Z
|
befh/exchanges/okex_spot.py
|
bijiasuo/BitcoinExchangeFH
|
9aa7b790cf74cf9fe48662147c30fc05e045e9ed
|
[
"Apache-2.0"
] | null | null | null |
befh/exchanges/okex_spot.py
|
bijiasuo/BitcoinExchangeFH
|
9aa7b790cf74cf9fe48662147c30fc05e045e9ed
|
[
"Apache-2.0"
] | 20
|
2017-11-09T15:28:39.000Z
|
2019-12-10T01:02:57.000Z
|
from befh.ws_api_socket import WebSocketApiClient
from befh.market_data import L2Depth, Trade
from befh.exchanges.gateway import ExchangeGateway
from befh.instrument import Instrument
from befh.util import Logger
from befh.clients.sql_template import SqlClientTemplate
import time
import threading
import json
from functools import partial
from datetime import datetime
class ExchGwApiOkexSpotWs(WebSocketApiClient):
"""
Exchange socket
"""
def __init__(self):
"""
Constructor
"""
WebSocketApiClient.__init__(self, 'ExchGwOkexSpot')
@classmethod
def get_timestamp_offset(cls):
return 1000
@classmethod
def get_order_book_timestamp_field_name(cls):
return 'timestamp'
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_link(cls):
return 'wss://real.okex.com:10441/websocket'
@classmethod
def get_order_book_subscription_string(cls, instmt):
return json.dumps({"event":"addChannel", "channel": instmt.get_order_book_channel_id()})
@classmethod
def get_trades_subscription_string(cls, instmt):
return json.dumps({"event":"addChannel", "channel": instmt.get_trades_channel_id()})
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
# l2_depth = instmt.get_l2_depth()
l2_depth = L2Depth()
keys = list(raw.keys())
if cls.get_order_book_timestamp_field_name() in keys and \
cls.get_bids_field_name() in keys and \
cls.get_asks_field_name() in keys:
# Date time
timestamp = float(raw[cls.get_order_book_timestamp_field_name()])/cls.get_timestamp_offset()
l2_depth.date_time = datetime.utcfromtimestamp(timestamp).strftime("%Y%m%d %H:%M:%S.%f")
# Bids
bids = raw[cls.get_bids_field_name()]
bids = sorted(bids, key=lambda x: x[0], reverse=True)
max_bid_len = min(len(bids), 5)
for i in range(0, max_bid_len):
l2_depth.bids[i].price = float(bids[i][0]) if type(bids[i][0]) != float else bids[i][0]
l2_depth.bids[i].volume = float(bids[i][1]) if type(bids[i][1]) != float else bids[i][1]
# Asks
asks = raw[cls.get_asks_field_name()]
asks = sorted(asks, key=lambda x: x[0])
max_ask_len = min(len(asks), 5)
for i in range(0, max_ask_len):
l2_depth.asks[i].price = float(asks[i][0]) if type(asks[i][0]) != float else asks[i][0]
l2_depth.asks[i].volume = float(asks[i][1]) if type(asks[i][1]) != float else asks[i][1]
else:
raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return l2_depth
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
trade_id = raw[0]
trade_price = float(raw[1])
trade_volume = float(raw[2])
date_time = raw[3]
trade_side = raw[4]
# trade.date_time = date_time
trade.trade_id = str(trade_id)
trade.trade_price = trade_price
trade.trade_volume = trade_volume
trade.trade_side = Trade.parse_side(trade_side)
return trade
class ExchGwOkexSpot(ExchangeGateway):
"""
Exchange gateway
"""
def __init__(self, db_clients):
"""
Constructor
:param db_client: Database client
"""
ExchangeGateway.__init__(self, ExchGwApiOkexSpotWs(), db_clients)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'Okex'
def on_open_handler(self, instmt, ws):
"""
Socket on open handler
:param instmt: Instrument
:param ws: Web socket
"""
Logger.info(self.__class__.__name__, "Instrument %s is subscribed in channel %s" % \
(instmt.get_instmt_code(), instmt.get_exchange_name()))
if not instmt.get_subscribed():
instmt_code_split = instmt.get_instmt_code().split('_')
if len(instmt_code_split) == 2:
# Future instruments
instmt.set_order_book_channel_id("ok_sub_spot_%s_%s_depth_5" % \
(instmt_code_split[0].lower(),
instmt_code_split[1].lower()))
instmt.set_trades_channel_id("ok_sub_spot_%s_%s_deals" % \
(instmt_code_split[0].lower(),
instmt_code_split[1].lower()))
else:
# Spot instruments
instmt.set_order_book_channel_id("ok_sub_spot_%s_depth_5" % instmt.get_instmt_code().lower())
instmt.set_trades_channel_id("ok_sub_spot_%s_deals" % instmt.get_instmt_code().lower())
ws.send(self.api_socket.get_order_book_subscription_string(instmt))
# ws.send(self.api_socket.get_trades_subscription_string(instmt))
instmt.set_subscribed(True)
def on_close_handler(self, instmt, ws):
"""
Socket on close handler
:param instmt: Instrument
:param ws: Web socket
"""
Logger.info(self.__class__.__name__, "Instrument %s is unsubscribed in channel %s" % \
(instmt.get_instmt_code(), instmt.get_exchange_name()))
instmt.set_subscribed(False)
def on_message_handler(self, instmt, messages):
"""
Incoming message handler
:param instmt: Instrument
:param message: Message
"""
for message in messages:
keys = message.keys()
# print(keys)
if 'channel' in keys:
if 'data' in keys:
if message['channel'] == instmt.get_order_book_channel_id():
data = message['data']
l2_depth = self.api_socket.parse_l2_depth(instmt, data)
if l2_depth is not None:
# Insert only if the first 5 levels are different
# if l2_depth is not None and instmt.get_l2_depth().is_diff(instmt.get_prev_l2_depth()):
instmt.set_prev_l2_depth(instmt.get_l2_depth())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
elif message['channel'] == instmt.get_trades_channel_id():
for trade_raw in message['data']:
trade = self.api_socket.parse_trade(instmt, trade_raw)
if trade.trade_id != instmt.get_exch_trade_id():
instmt.incr_trade_id()
instmt.set_exch_trade_id(trade.trade_id)
self.insert_trade(instmt, trade)
elif 'success' in keys:
Logger.info(self.__class__.__name__, "Subscription to channel %s is %s" \
% (message['channel'], message['success']))
else:
Logger.info(self.__class__.__name__, ' - ' + json.dumps(message))
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_prev_l2_depth(L2Depth(20))
instmt.set_l2_depth(L2Depth(20))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
return [self.api_socket.connect(self.api_socket.get_link(),
on_message_handler=partial(self.on_message_handler, instmt),
on_open_handler=partial(self.on_open_handler, instmt),
on_close_handler=partial(self.on_close_handler, instmt))]
if __name__ == '__main__':
exchange_name = 'Okex'
instmt_name = 'BCHBTC'
instmt_code = 'BCH_BTC'
instmt = Instrument(exchange_name, instmt_name, instmt_code)
db_client = SqlClientTemplate()
Logger.init_log()
exch = ExchGwOkexSpot([db_client])
td = exch.start(instmt)
| 38.326271
| 116
| 0.570591
| 1,044
| 9,045
| 4.627395
| 0.165709
| 0.033326
| 0.028152
| 0.02691
| 0.336576
| 0.274477
| 0.217346
| 0.170358
| 0.153798
| 0.136411
| 0
| 0.01172
| 0.330238
| 9,045
| 235
| 117
| 38.489362
| 0.785738
| 0.100387
| 0
| 0.131034
| 0
| 0
| 0.062363
| 0.016202
| 0
| 0
| 0
| 0
| 0
| 1
| 0.110345
| false
| 0
| 0.075862
| 0.048276
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bbe5cef3d1aeca66fb6ca826edab503eb8c860b
| 587
|
py
|
Python
|
hardhat/recipes/python/twisted.py
|
stangelandcl/hardhat
|
1ad0c5dec16728c0243023acb9594f435ef18f9c
|
[
"MIT"
] | null | null | null |
hardhat/recipes/python/twisted.py
|
stangelandcl/hardhat
|
1ad0c5dec16728c0243023acb9594f435ef18f9c
|
[
"MIT"
] | null | null | null |
hardhat/recipes/python/twisted.py
|
stangelandcl/hardhat
|
1ad0c5dec16728c0243023acb9594f435ef18f9c
|
[
"MIT"
] | null | null | null |
from .base import PipBaseRecipe
class TwistedRecipe(PipBaseRecipe):
def __init__(self, *args, **kwargs):
super(TwistedRecipe, self).__init__(*args, **kwargs)
self.sha256 = 'a4cc164a781859c74de47f17f0e85f4b' \
'ce8a3321a9d0892c015c8f80c4158ad9'
self.pythons = ['python3']
self.pydepends = ['Automat',
'constantly',
'hyperlink',
'incremental',
'zope.interface']
self.name = 'twisted'
self.version = '18.4.0'
| 32.611111
| 60
| 0.524702
| 41
| 587
| 7.317073
| 0.731707
| 0.066667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13172
| 0.366269
| 587
| 17
| 61
| 34.529412
| 0.674731
| 0
| 0
| 0
| 0
| 0
| 0.229983
| 0.109029
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bbf00877f721b0c24c4e63d13a17b9fddb98274
| 250
|
py
|
Python
|
EXC/CW1/task3/combiner.py
|
easyCZ/UoE-Projects
|
7651c8caf329c4f7b4562eba441bfc24124cfcfd
|
[
"BSD-2-Clause"
] | null | null | null |
EXC/CW1/task3/combiner.py
|
easyCZ/UoE-Projects
|
7651c8caf329c4f7b4562eba441bfc24124cfcfd
|
[
"BSD-2-Clause"
] | 1
|
2022-02-23T07:34:53.000Z
|
2022-02-23T07:34:53.000Z
|
EXC/CW1/task3/combiner.py
|
easyCZ/UoE-Projects
|
7651c8caf329c4f7b4562eba441bfc24124cfcfd
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python
# combiner.py
import sys
word_count = 0
line_count = 0
for line in sys.stdin:
words, lines = line.strip().split('\t')
word_count += int(words)
line_count += int(lines)
print("{0}\t{1}".format(word_count, line_count))
| 17.857143
| 48
| 0.66
| 41
| 250
| 3.878049
| 0.560976
| 0.169811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019231
| 0.168
| 250
| 14
| 48
| 17.857143
| 0.745192
| 0.112
| 0
| 0
| 0
| 0
| 0.045249
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bbf1685508e5466a589c9ca9ef370e0a3b9611c
| 1,376
|
py
|
Python
|
tests/exploratory/user_data/radish/steps.py
|
tuxrosi/radish
|
b21fa751f8dfc4309451476151c810b44975babb
|
[
"MIT"
] | null | null | null |
tests/exploratory/user_data/radish/steps.py
|
tuxrosi/radish
|
b21fa751f8dfc4309451476151c810b44975babb
|
[
"MIT"
] | null | null | null |
tests/exploratory/user_data/radish/steps.py
|
tuxrosi/radish
|
b21fa751f8dfc4309451476151c810b44975babb
|
[
"MIT"
] | null | null | null |
import re
from radish.stepregistry import step
from radish import when, then
from radish.terrain import world
@step(re.compile("I have the number in user data as (.+)"))
def have_number(step, input_variable):
if world.config.user_data:
if input_variable in world.config.user_data:
step.context.numbers.append(int(world.config.user_data[input_variable]))
else:
msg = "Variable [{0}] is not in the user data (-u/--user-data) specified on the command-line."
assert False, msg.format(input_variable)
else:
assert (
False
), "There is no user data (-u/--user-data) specified on the command-line."
@when("I sum them")
def sum_numbers(step):
step.context.result = sum(step.context.numbers)
@then(re.compile("I expect the result to be the value in user data as (.+)"))
def expect_result(step, result_variable):
if world.config.user_data:
if result_variable in world.config.user_data:
assert step.context.result == int(world.config.user_data[result_variable])
else:
msg = "Variable [{0}] is not in the user data (-u/--user-data) specified on the command-line."
assert False, msg.format(input_variable)
else:
assert (
False
), "There is no user data (-u/--user-data) specified on the command-line."
| 35.282051
| 106
| 0.652616
| 198
| 1,376
| 4.449495
| 0.252525
| 0.145289
| 0.102157
| 0.129398
| 0.61748
| 0.533485
| 0.46765
| 0.397276
| 0.397276
| 0.397276
| 0
| 0.001899
| 0.234738
| 1,376
| 38
| 107
| 36.210526
| 0.834758
| 0
| 0
| 0.516129
| 0
| 0.064516
| 0.300872
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 1
| 0.096774
| false
| 0
| 0.129032
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bbf1d84d1d1e722a857754d78ceb86118a7eadb
| 3,462
|
py
|
Python
|
django/core/views.py
|
andreyvpng/askme
|
65139c347a6b80f0a660ca24d6dd864e4531903a
|
[
"Apache-2.0"
] | 2
|
2018-10-29T09:37:47.000Z
|
2019-11-28T14:11:12.000Z
|
django/core/views.py
|
andreyvpng/askme
|
65139c347a6b80f0a660ca24d6dd864e4531903a
|
[
"Apache-2.0"
] | null | null | null |
django/core/views.py
|
andreyvpng/askme
|
65139c347a6b80f0a660ca24d6dd864e4531903a
|
[
"Apache-2.0"
] | 2
|
2018-09-18T14:09:46.000Z
|
2019-11-28T14:11:14.000Z
|
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.http.response import HttpResponseBadRequest, HttpResponseRedirect
from django.urls import reverse_lazy
from django.urls.base import reverse
from django.views.generic import CreateView, DeleteView, DetailView, View
from .forms import AnswerForm, QuestionForm
from .models import Answer, Like, Question
User = get_user_model()
class AnswerDetailView(DetailView):
queryset = Answer.objects.all_with_question()
class AnswerCreateView(LoginRequiredMixin, CreateView):
model = Answer
form_class = AnswerForm
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.question = self.get_question()
if self.object.question.asked_to != self.request.user:
return HttpResponseBadRequest()
self.object.save()
return super().form_valid(form)
def get_question(self):
return Question.objects.get(id=self.kwargs['pk'])
class AnswerDeleteView(LoginRequiredMixin, DeleteView):
model = Answer
success_url = reverse_lazy('user:my-profile')
def dispatch(self, *args, **kwargs):
answer = self.get_object()
if answer.question.asked_to != self.request.user:
raise PermissionDenied
return super().dispatch(*args, **kwargs)
class PrivateQuestionDetailView(DetailView):
model = Question
def dispatch(self, *args, **kwargs):
question = self.get_object()
if question.asked_to != self.request.user:
raise PermissionDenied
try:
return reverse(question.answer.get_absolute_url())
except ObjectDoesNotExist:
pass
return super().dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
answer_form = AnswerForm()
ctx.update({'answer_form': answer_form})
return ctx
class QuestionCreateView(LoginRequiredMixin, CreateView):
model = Question
form_class = QuestionForm
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.asked_by = self.request.user
self.object.asked_to = self.get_user()
self.object.save()
return super().form_valid(form)
def get_success_url(self):
return reverse('user:profile', kwargs={
'pk': self.get_user().id
})
def get_user(self):
return User.objects.get(id=self.kwargs['pk'])
class QuestionDeleteView(LoginRequiredMixin, DeleteView):
model = Question
success_url = reverse_lazy('user:inbox')
def dispatch(self, *args, **kwargs):
question = self.get_object()
if question.asked_to != self.request.user:
raise PermissionDenied
return super().dispatch(*args, **kwargs)
class LikeView(LoginRequiredMixin, View):
def post(self, request, pk):
answer = Answer.objects.get(id=pk)
like = Like.objects.filter(answer=answer,
liked_by=request.user)
if like:
like.delete()
else:
like = Like.objects.create(answer=answer,
liked_by=request.user)
like.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
| 27.696
| 77
| 0.662334
| 383
| 3,462
| 5.869452
| 0.232376
| 0.035587
| 0.024466
| 0.033808
| 0.344751
| 0.298488
| 0.258452
| 0.232651
| 0.232651
| 0.232651
| 0
| 0
| 0.237146
| 3,462
| 124
| 78
| 27.919355
| 0.851193
| 0
| 0
| 0.337349
| 0
| 0
| 0.019064
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.120482
| false
| 0.012048
| 0.108434
| 0.036145
| 0.578313
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bc111fc110f0ab3862581da0b6b979e7a706d1e
| 3,234
|
py
|
Python
|
drowsiness_detector.py
|
zhww-107/drowsiness_detector
|
855995e1da36ffc0ec1fda7df8ea1aafc35c416d
|
[
"BSD-2-Clause"
] | 1
|
2020-05-12T12:31:51.000Z
|
2020-05-12T12:31:51.000Z
|
drowsiness_detector.py
|
zhww-107/drowsiness_detector
|
855995e1da36ffc0ec1fda7df8ea1aafc35c416d
|
[
"BSD-2-Clause"
] | null | null | null |
drowsiness_detector.py
|
zhww-107/drowsiness_detector
|
855995e1da36ffc0ec1fda7df8ea1aafc35c416d
|
[
"BSD-2-Clause"
] | null | null | null |
from imutils import face_utils
from scipy.spatial import distance
import cv2
import dlib
import imutils
import pygame
import time
# Initializing the alert sound
pygame.mixer.init()
alert_sound = pygame.mixer.Sound("alert_sound.wav")
default_volume = 0.2
# Eye-Aspect-Ratio data
EAR_threshhold = 0.17 # One valid frame is counted when EAR is lower than this value
frame_count = 0 # Number of frames when EAR is lower than EAR_threshhold
EAR_total_frame = 25 # Having frame_count larger than this value is considered drowsiness
# Play the alarm in a given volume
def alert(volume):
alert_sound.set_volume(volume)
alert_sound.play()
# Given an eye landmark, compute its eye_aspect_ratio
def eye_aspect_ratio(eye):
v1 = distance.euclidean(eye[1], eye[5])
v2 = distance.euclidean(eye[2], eye[4])
h1 = distance.euclidean(eye[0], eye[3])
return (v1 + v2) / (2 * h1)
# Initialize the face detector and Facial landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# Access the camera
cap = cv2.VideoCapture(0)
# Main loop for drowsiness detection
while True:
# Read the camera input, resize it, and concert it to grayscale frame
ret, frame = cap.read()
frame = imutils.resize(frame, width=600)
raw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces in grayscale frame
bounds = detector(raw,0)
for bound in bounds:
# Predict facial landmarks for each detected face
shape = predictor(raw,bound)
# Convert the facial lanmarks into a 1-D numpy array (x, y)
shape = face_utils.shape_to_np(shape)
# Left and right eyes' indexes for facial landmarks
left_eye = shape[42:48]
right_eye = shape[36:42]
# The main EAR is the average of left and right eye's EAR
left_EAR = eye_aspect_ratio(left_eye)
right_EAR = eye_aspect_ratio(right_eye)
EAR = (left_EAR + right_EAR) / 2
# Draw the facial landmarks for left eye
for (x, y) in left_eye:
cv2.circle(frame, (x, y), 1, (0, 255, 0), -1)
# Draw the facial landmarks for right eye
for (x, y) in right_eye:
cv2.circle(frame, (x, y), 1, (0, 255, 0), -1)
# Alarm when drowsiness is detected
if EAR < EAR_threshhold:
frame_count += 1
# Volume increases gradually
if frame_count >= EAR_total_frame:
alert(0.2 + (frame_count - 25) * 0.2)
time.sleep(3)
else:
frame_count = 0
# Display informations
cv2.putText(frame, "Frame: {:.0f}".format(frame_count), (30, 60),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "Eye-Aspect-Ratio: {:.2f}".format(EAR), (30, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "Press Q to exit.", (410, 320),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# Display the frame
cv2.imshow("Drowsiness_Detector", frame)
# Provide a way to exit the program -- pressing "Q"
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
| 31.096154
| 89
| 0.649969
| 478
| 3,234
| 4.276151
| 0.34728
| 0.034247
| 0.041096
| 0.030822
| 0.13454
| 0.082681
| 0.082681
| 0.082681
| 0.082681
| 0.082681
| 0
| 0.048991
| 0.248918
| 3,234
| 104
| 90
| 31.096154
| 0.792507
| 0.300557
| 0
| 0.118644
| 0
| 0
| 0.055828
| 0.016525
| 0
| 0
| 0.001787
| 0
| 0
| 1
| 0.033898
| false
| 0
| 0.118644
| 0
| 0.169492
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bc160c90d8d420f5bacbdb3fbe421c84e36aaf4
| 11,809
|
py
|
Python
|
trunk-tap.py
|
schreiberstein/trunk-tap.py
|
aacf32816e2a558e31ebc431edf84e23ef22146d
|
[
"MIT"
] | 15
|
2017-10-22T15:08:58.000Z
|
2022-01-03T22:21:12.000Z
|
trunk-tap.py
|
ideechaniz/trunk-tap.py
|
aacf32816e2a558e31ebc431edf84e23ef22146d
|
[
"MIT"
] | 2
|
2018-04-04T18:52:54.000Z
|
2019-02-20T10:16:13.000Z
|
trunk-tap.py
|
ideechaniz/trunk-tap.py
|
aacf32816e2a558e31ebc431edf84e23ef22146d
|
[
"MIT"
] | 6
|
2017-10-23T03:03:16.000Z
|
2021-07-03T16:28:29.000Z
|
#!/usr/bin/env python3
# < trunk-tap.py >
# Version 1.0 < 20171022 >
# Copyright 2017: Alexander Schreiber < schreiberstein[at]gmail.com >
# https://github.com/schreiberstein/trunk-tap.py
# MIT License:
# ============
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# See: https://opensource.org/licenses/MIT
# Introduction:
# =============
# trunk-tap.py is a Linux command line utility to connects a set of 802.1Q VLANs to a TINC VPN/OpenVPN TAP-interface and is designed to be invoked by ifup/ifdown scripts after starting or stopping a VPN connection.
# Dependencies (on Debian): python3, iproute2, bridge-utils, vlan (including kernel module '8021q' in /etc/modules)
# It reads the filenames from the content of a folder containing files corresponding to the VLAN ID (e.g. '100', '105', ...), then creates VLAN interfaces on a local Ethernet adapter used as "trunk port" (e.g. 'eth1.100', 'eth1.105', ...).
# The script then proceeds to generate bridge interfaces for every VLAN ID. (e.g. "trunk0.100", "trunk0.105", ...) and attaches the respective Ethernet VLAN interfaces to the bridge. (e.g. 'trunk0.105 <-> eth1.105', ...)
# After that, the local infrastructure is ready to be attached to the VPN layer 2 tunnel.
# This is achieved by enabling the TAP interface ("up"), creating VLAN interfaces on the TAP adapter (e.g. 'tap0.100', 'tap0.105', ...) and attaching them to the respective bridge.
# Illustration:
# =============
# (TINC VPN / OpenVPN)
# -------- SITE 1 ------- -------- SITE 2 -------
# eth1.100 <-> trunk0.100 <--\ ################ /--> trunk0.100 <-> eth1.100
# eth1.105 <-> trunk0.105 <--->> ---TAP-TUNNEL--- <<---> trunk0.105 <-> eth1.105
# eth1.110 <-> trunk0.110 <--/ ################ \--> trunk0.110 <-> eth1.110
# Hint: Interface names (ethernet adapter, bridge name, ...) do not neccesarily have to be identical among sites.
# --------------------------------------------------------------------------------------------------------------- #
# Code:
# =====
# Import required Python3 modules
import os, sys, subprocess
from pathlib import Path
# Create VLAN-interfaces on trunk interface (e.g. 'eth1.100', 'eth1.105', ...)
def trunk_vlan_add():
# Initialize our trunk interface, if it is not up yet
p = subprocess.Popen("ip link set dev " + trunk_interface + " up", shell=True)
p.communicate()
# Create VLAN interfaces on trunk_interface
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link add link " + trunk_interface + " name " + trunk_interface + "." + filename + " type vlan id " + filename +" ; " + "ip link set " + trunk_interface + "." + filename + " up", shell=True)
p.communicate()
continue
return
# Function to remove VLAN interfaces from trunk interface
def trunk_vlan_del():
# Remove VLAN interfaces on trunk_interface
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link set dev " + trunk_interface + "." + filename + " down" + " ; " + "ip link delete " + trunk_interface + "." + filename, shell=True)
p.communicate()
continue
return
# Function to create main bridge (no VLAN ID - May be used to attach a VLAN/network to provide network to devices without VLAN support (VLAN0 - untagged))
def bridge_add():
p = subprocess.Popen("ip link add name " + bridge_name + " type bridge" + " ; " + "ip link set " + bridge_name + " up" + " ; " + "ip link set " + trunk_interface + " master " + bridge_name, shell=True)
p.communicate()
return
# Function to remove bridge
def bridge_del():
p = subprocess.Popen("ip link set " + bridge_name + " down" + " ; " + "ip link delete " + bridge_name + " type bridge", shell=True)
p.communicate()
return
# Creates bridges to be used for VLAN bridging (e.g. 'trunk0.100', 'trunk0.105', ..) - illustration: eth1.105 <-> Bridge: trunk0.105 <-> tap0.105
def bridge_vlan_add():
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link add name " + bridge_name + "." + filename + " type bridge" + " ; " + "ip link set " + bridge_name + "." + filename + " up", shell=True)
p.communicate()
continue
return
# Function to remove VLAN interfaces from the bridge
def bridge_vlan_del():
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link set dev " + bridge_name + "." + filename + " down" + " ; " + "ip link delete " + bridge_name + "." + filename, shell=True)
p.communicate()
continue
return
# Function to bridge the VLANs of the physical interface with the VLANs of the bridge
def bridge():
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link set " + trunk_interface + "." + filename + " master " + bridge_name + "." + filename, shell=True)
p.communicate()
continue
return
# Create VLAN-interfaces on tap interface
def tap_vlan_add():
# Initialize the tap interface, if it is not up yet
p = subprocess.Popen("ip link set dev " + tap_interface + " up", shell=True)
p.communicate()
# Create VLAN interfaces on tap interface
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link add link " + tap_interface + " name " + tap_interface + "." + filename + " type vlan id " + filename + " ; " + "ip link set dev " + tap_interface + "." + filename + " up", shell=True)
p.communicate()
continue
return
# Function to bridge the VLANs of the physical interface with the VLANs of the bridge
def tap_bridge():
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link set " + tap_interface + "." + filename + " master " + bridge_name + "." + filename, shell=True)
p.communicate()
continue
return
# Function to enable ("up") the tap interface
def tap_if_up():
p = subprocess.Popen("ip link set dev " + tap_interface + " down", shell=True)
p.communicate();
return
# Function to disable ("down") the tap interface
def tap_if_down():
p = subprocess.Popen("ip link set dev " + tap_interface + " down", shell=True)
p.communicate();
return
# Function to remove VLAN interfaces from tap interface
def tap_vlan_del():
# Remove VLAN interfaces on tinc_interface
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link set dev " + tap_interface + "." + filename + " down" + " ; " + "ip link delete " + tap_interface + "." + filename, shell=True)
p.communicate()
continue
return
# Function to remove members attached by the tap_bridge() function
def tap_unbridge():
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link set " + tap_interface + "." + filename + " nomaster", shell=True)
p.communicate()
continue
return
# Function to remove members attached by the bridge() function
def unbridge():
for filename in os.listdir(vlan_dir):
p = subprocess.Popen("ip link set " + trunk_interface + "." + filename + " nomaster", shell=True)
p.communicate()
continue
return
# ------------------------
# Note: Order of execution
# ------------------------
# Start:
# ------
# trunk_vlan_add()
# bridge_add()
# bridge_vlan_add()
# bridge()
# tap_if_up()
# tap_vlan_add()
# tap_bridge()
# Stop:
# -----
# tap_unbridge()
# tap_vlan_del()
# tap_if_down()
# unbridge()
# bridge_vlan_del()
# bridge_del()
# trunk_vlan_del()
# Start function - Used to execute all other functions
def start(no_tap):
trunk_vlan_add()
bridge_add()
bridge_vlan_add()
bridge()
# Don't do anything with the TAP interface if --no_tap was specified
if not no_tap:
tap_if_up()
tap_vlan_add()
tap_bridge()
return
# Stop function - reverses the actions performed by start()
def stop(no_tap):
# Don't do anything with the TAP interface if --no_tap was specified
if not no_tap:
tap_unbridge()
tap_vlan_del()
tap_if_down()
unbridge()
bridge_vlan_del()
bridge_del()
trunk_vlan_del()
return
# # # # # # # # #
# Main function #
# # # # # # # # #
def main():
# If no arguments are specified, quit.
if len(sys.argv) == 1:
print("Error: No arguments specified. Enter ./trunktap.py --help for more information.")
quit()
# If arguments are given, parse them and run script.
import argparse
parser = argparse.ArgumentParser()
# Add arguments
parser.add_argument("-start", dest="is_start", action="store_true", help="Creates all interfaces and establishes VLAN bridges")
parser.add_argument("-stop", dest="is_stop", action="store_true", help="Reverses -start: Removes the previously created interfaces")
parser.add_argument("-i", "--interface", dest="trunk_interface", help="Specify the trunk interface on the host that will provide the VLANs to the network (e.g. eth1)")
parser.add_argument("-t", "--tap-interface", dest="tap_interface", help="Specify the TAP interface on the host that will be used by TINC/OpenVPN (e.g. $INTERFACE, tap0)")
parser.add_argument("-v", "--vlan-dir", dest="vlan_dir", help="The path to the folder that contains the files that represent the VLANs that will be created. - Default: ./vlans/ ", default="./vlans/")
parser.add_argument("-b", "--bridge", dest="bridge_name", help="Name of the bridge that will be created. (e.g. trunk0, br0)")
parser.add_argument("--no-tap", dest="no_tap", help="Only for special use: If used, the VLANs will be created locally (e.g. trunk0.105 <-> eth1.105), but the TAP interface won't be used.", default=False, action="store_true")
# Parse arguments
arguments = parser.parse_args()
# Create local variables because the functions use these
global trunk_interface, tap_interface, vlan_dir, bridge_name
trunk_interface = arguments.trunk_interface
tap_interface = arguments.tap_interface
vlan_dir = arguments.vlan_dir
bridge_name = arguments.bridge_name
# Make sure that either start or stop was specified (NOT XOR)
if not arguments.is_start ^ arguments.is_stop:
print("Error: You have to specify either -start or -stop. Only one option is valid.")
quit()
# Make sure that arguments are not empty
if not (trunk_interface and tap_interface and vlan_dir and bridge_name):
print("Error: You have to specify -i, -t, -b and -v.")
quit()
# Execute either function start() or stop() and pass the no_tap-variable
if arguments.is_start:
start(arguments.no_tap)
if arguments.is_stop:
stop(arguments.no_tap)
quit()
# Only run main if the script is explicitly executed (e.g. './trunktap.py')
if __name__ == "__main__":
main()
| 41.146341
| 260
| 0.655348
| 1,606
| 11,809
| 4.719801
| 0.205479
| 0.044327
| 0.020185
| 0.037995
| 0.424274
| 0.41161
| 0.347625
| 0.335752
| 0.322823
| 0.282454
| 0
| 0.016827
| 0.209925
| 11,809
| 286
| 261
| 41.29021
| 0.795606
| 0.442459
| 0
| 0.447761
| 0
| 0.029851
| 0.242862
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.126866
| false
| 0
| 0.022388
| 0
| 0.268657
| 0.022388
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bc353399a2502106befa0365666e5d586522d04
| 4,404
|
py
|
Python
|
tests/common/mock_cgroup_commands.py
|
rbgithuub/WALinuxAgent
|
c0462f33bb5e3a33430fe3d172676d85cefa6227
|
[
"Apache-2.0"
] | null | null | null |
tests/common/mock_cgroup_commands.py
|
rbgithuub/WALinuxAgent
|
c0462f33bb5e3a33430fe3d172676d85cefa6227
|
[
"Apache-2.0"
] | null | null | null |
tests/common/mock_cgroup_commands.py
|
rbgithuub/WALinuxAgent
|
c0462f33bb5e3a33430fe3d172676d85cefa6227
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import contextlib
import os
import re
import subprocess
from azurelinuxagent.common.utils import fileutil
from tests.tools import patch, data_dir
#
# Default values for the mocked commands.
#
# The output comes from an Ubuntu 18 system
#
_default_commands = [
(r"systemctl --version",
'''systemd 237
+PAM +AUDIT +SELINUX +IMA +APPARMOR +SMACK +SYSVINIT +UTMP +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ +LZ4 +SECCOMP +BLKID +ELFUTILS +KMOD -IDN2 +IDN -PCRE2 default-hierarchy=hybrid
'''),
(r"mount -t cgroup",
'''cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd)
cgroup on /sys/fs/cgroup/rdma type cgroup (rw,nosuid,nodev,noexec,relatime,rdma)
cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset)
cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls,net_prio)
cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event)
cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb)
cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer)
cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory)
cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids)
cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices)
cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpu,cpuacct)
cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio)
'''),
(r"mount -t cgroup2",
'''cgroup on /sys/fs/cgroup/unified type cgroup2 (rw,nosuid,nodev,noexec,relatime)
'''),
(r"systemctl show walinuxagent\.service --property CPUAccounting",
'''CPUAccounting=no
'''),
(r"systemctl show walinuxagent\.service --property MemoryAccounting",
'''MemoryAccounting=no
'''),
(r"systemd-run --unit=([^\s]+) --scope ([^\s]+)",
'''
Running scope as unit: TEST_UNIT.scope
Thu 28 May 2020 07:25:55 AM PDT
'''),
]
_default_files = (
(r"/proc/self/cgroup", os.path.join(data_dir, 'cgroups', 'proc_self_cgroup')),
(r"/proc/[0-9]+/cgroup", os.path.join(data_dir, 'cgroups', 'proc_pid_cgroup')),
(r"/sys/fs/cgroup/unified/cgroup.controllers", os.path.join(data_dir, 'cgroups', 'sys_fs_cgroup_unified_cgroup.controllers')),
)
@contextlib.contextmanager
def mock_cgroup_commands():
original_popen = subprocess.Popen
original_read_file = fileutil.read_file
original_path_exists = os.path.exists
def mock_popen(command, *args, **kwargs):
if isinstance(command, list):
command_string = " ".join(command)
else:
command_string = command
for cmd in _default_commands:
match = re.match(cmd[0], command_string)
if match is not None:
command = ["echo", cmd[1]]
return original_popen(command, *args, **kwargs)
def mock_read_file(filepath, **kwargs):
for file in _default_files:
match = re.match(file[0], filepath)
if match is not None:
filepath = file[1]
return original_read_file(filepath, **kwargs)
def mock_path_exists(path):
for file in _default_files:
match = re.match(file[0], path)
if match is not None:
return True
return original_path_exists(path)
with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", side_effect=mock_popen) as patcher:
with patch("azurelinuxagent.common.cgroupapi.os.path.exists", side_effect=mock_path_exists):
with patch("azurelinuxagent.common.cgroupapi.fileutil.read_file", side_effect=mock_read_file):
yield patcher
| 38.631579
| 178
| 0.711172
| 621
| 4,404
| 4.94847
| 0.330113
| 0.024406
| 0.053693
| 0.054995
| 0.391474
| 0.248617
| 0.191344
| 0.04686
| 0.024732
| 0.024732
| 0
| 0.011187
| 0.167802
| 4,404
| 113
| 179
| 38.973451
| 0.827285
| 0.153951
| 0
| 0.196429
| 0
| 0
| 0.243243
| 0.121622
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.107143
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bc78e4dfebfc4162a535f0855d380aa68aa6df8
| 1,474
|
py
|
Python
|
main.py
|
saiamphora/XOR-NEATpy
|
091b6d6fc3b662491c8216227f5305841521e0ed
|
[
"Unlicense"
] | 1
|
2021-11-29T03:30:49.000Z
|
2021-11-29T03:30:49.000Z
|
main.py
|
saiamphora/XOR-NEATpy
|
091b6d6fc3b662491c8216227f5305841521e0ed
|
[
"Unlicense"
] | 1
|
2021-11-29T15:28:09.000Z
|
2021-11-29T15:28:09.000Z
|
main.py
|
saiamphora/XOR-NEATpy
|
091b6d6fc3b662491c8216227f5305841521e0ed
|
[
"Unlicense"
] | null | null | null |
from __future__ import print_function
import os
import neat
# 2-input XOR inputs and expected outputs.
xor_inputs = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]
xor_outputs = [(0.0,),(1.0,),(1.0,),(0.0,)]
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = 4.0
net = neat.nn.FeedForwardNetwork.create(genome, config)
for xi, xo in zip(xor_inputs, xor_outputs):
output = net.activate(xi)
genome.fitness -= (output[0] - xo[0]) ** 4
def run(config_file):
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_file)
p = neat.Population(config)
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
p.add_reporter(neat.Checkpointer(5))
winner = p.run(eval_genomes, 500)
print('\nBest genome:\n{!s}'.format(winner))
print('\nOutput:')
winner_net = neat.nn.FeedForwardNetwork.create(winner, config)
for xi, xo in zip(xor_inputs, xor_outputs):
output = winner_net.activate(xi)
print("input {!r}, expected output {!r}, got {!r}".format(xi, xo, output))
p = neat.Checkpointer.restore_checkpoint('neat-checkpoint-4')
p.run(eval_genomes, 10)
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config-feedforward')
run(config_path)
| 33.5
| 82
| 0.651967
| 206
| 1,474
| 4.514563
| 0.334951
| 0.021505
| 0.019355
| 0.012903
| 0.189247
| 0.115054
| 0.115054
| 0.109677
| 0.092473
| 0.092473
| 0
| 0.031463
| 0.202171
| 1,474
| 44
| 83
| 33.5
| 0.759354
| 0.027137
| 0
| 0.060606
| 0
| 0
| 0.073971
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.090909
| 0
| 0.151515
| 0.121212
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bc9519279bbaea50bce0ecf16967333a0bd62b5
| 319
|
py
|
Python
|
Autre/Internet.py
|
Yaya-Cout/Python
|
500a2bc18cbb0b9bf1470943def8fd8e8e76d36d
|
[
"Unlicense"
] | 5
|
2020-12-05T14:00:39.000Z
|
2021-12-02T11:44:54.000Z
|
Autre/Internet.py
|
Yaya-Cout/Python
|
500a2bc18cbb0b9bf1470943def8fd8e8e76d36d
|
[
"Unlicense"
] | 11
|
2021-03-15T17:51:43.000Z
|
2021-11-24T13:24:39.000Z
|
Autre/Internet.py
|
Yaya-Cout/Python
|
500a2bc18cbb0b9bf1470943def8fd8e8e76d36d
|
[
"Unlicense"
] | 1
|
2021-01-02T14:15:10.000Z
|
2021-01-02T14:15:10.000Z
|
def main():
import webbrowser
recherche = 0
while True:
if recherche >= 2:
print("Vous avez fait " + str(recherche) + " recherches.")
recherche += 1
adresse = input("Quel adresse veut-tu ouvrir")
webbrowser.open(adresse)
if __name__ == "__main__":
main()
| 19.9375
| 70
| 0.567398
| 34
| 319
| 5.088235
| 0.735294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013761
| 0.316614
| 319
| 15
| 71
| 21.266667
| 0.779817
| 0
| 0
| 0
| 0
| 0
| 0.194357
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.181818
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bc96e1706c4c4494a902bdb9aa51a33d9269620
| 6,502
|
py
|
Python
|
older/rc-qradar-search/query_runner/components/ariel_query.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | 65
|
2017-12-04T13:58:32.000Z
|
2022-03-24T18:33:17.000Z
|
older/rc-qradar-search/query_runner/components/ariel_query.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | 48
|
2018-03-02T19:17:14.000Z
|
2022-03-09T22:00:38.000Z
|
older/rc-qradar-search/query_runner/components/ariel_query.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | 95
|
2018-01-11T16:23:39.000Z
|
2022-03-21T11:34:29.000Z
|
"""Action Module circuits component to update incidents from QRadar Ariel queries"""
import logging
from datetime import datetime
import time
import copy
import json
from string import Template
from pkg_resources import Requirement, resource_filename
import resilient_circuits.template_functions as template_functions
from query_runner.lib.query_action import QueryRunner
from query_runner.lib.qradar_rest_client import QRadarClient
from query_runner.lib.misc import SearchTimeout, SearchFailure
try:
basestring
except NameError:
basestring = str
LOG = logging.getLogger(__name__)
CONFIG_DATA_SECTION = 'ariel'
def config_section_data():
"""sample config data for use in app.config"""
section_config_fn = resource_filename(Requirement("rc-qradar-search"), "query_runner/data/app.config.qradar")
query_dir = resource_filename(Requirement("rc-qradar-search"), "query_runner/data/queries_ariel")
with open(section_config_fn, 'r') as section_config_file:
section_config = Template(section_config_file.read())
return section_config.safe_substitute(directory=query_dir)
class AQLIncidentUpdate(QueryRunner):
""" Acknowledges and fires off new query requests """
def __init__(self, opts):
query_options = opts.get(CONFIG_DATA_SECTION, {})
jinja_filters = template_functions.JINJA_FILTERS
jinja_filters["datetime"] = self._datetime_filter
template_functions.ENV.filters.update(jinja_filters)
super(AQLIncidentUpdate, self).__init__(opts, query_options, run_search)
def _datetime_filter(self, val):
""" JINJA filter to convert ms to YYYY-MM-DD HH:mm:ss """
dt = datetime.fromtimestamp(val/1000.0)
return dt.strftime("%Y-%m-%d %H:%M:%S")
#############################
# Functions for running Query
#############################
def _wait_for_query_to_complete(search_id, qradar_client, timeout, polling_interval):
""" Poll QRadar until search execution finishes """
start_time = time.time()
search_status = qradar_client.get_search_status(search_id)
if not search_status:
# Sometimes it takes a little while to be able to query a search id
time.sleep(4)
search_status = qradar_client.get_search_status(search_id)
while search_status.get("status", "") in ("WAIT", "EXECUTE", "SORTING"):
if timeout != 0:
if time.time() - start_time > timeout:
raise SearchTimeout(search_id, search_status.get("status", ""))
time.sleep(polling_interval)
search_status = qradar_client.get_search_status(search_id)
if search_status.get("status", "") != "COMPLETED":
LOG.error(search_status)
raise SearchFailure(search_id, search_status.get("status", ""))
# end _wait_for_query_to_complete
def _get_query_results(search_id, qradar_client, item_range):
""" Get results from a complete QRadar query """
if item_range:
headers = {"Range": item_range}
else:
headers = None
url = "ariel/searches/{0}/results".format(search_id, headers=headers)
response = qradar_client.get(url)
LOG.debug(response)
# Replace "NULL" with ""
response = remove_nulls(response)
return response
# end _get_query_results
def remove_nulls(d):
""" recursively replace 'NULL' with '' in dictionary """
if isinstance(d, basestring):
if d == u'NULL':
return u''
else:
return d
new = {}
LOG.debug("d={d} ".format(d=d))
LOG.debug("type of d is {t}".format(t=type(d)))
for k, v in d.items():
if isinstance(v, dict):
v = remove_nulls(v)
elif isinstance(v, list):
v = [remove_nulls(v1) for v1 in v]
elif isinstance(v, basestring) and v == u'NULL':
v = u''
new[k] = v
LOG.info("Returning: {n}".format(n=new))
return new
def run_search(options, query_definition, event_message):
""" Run Ariel search and return result """
# Read the options and construct a QRadar client
qradar_url = options.get("qradar_url", "")
qradar_token = options.get("qradar_service_token", "")
timeout = int(options.get("query_timeout", 600))
polling_interval = int(options.get("polling_interval", 5))
if not all((qradar_url, qradar_token, timeout, polling_interval)):
LOG.error("Configuration file missing required values!")
raise Exception("Missing Configuration Values")
verify = options.get("qradar_verify", "")
if verify[:1].lower() in ("0", "f", "n"):
verify = False
else:
verify = True
qradar_client = QRadarClient(qradar_url, qradar_token, verify=verify)
error = None
response = None
try:
params = {'query_expression': query_definition.query}
url = "ariel/searches"
response = qradar_client.post(url, params=params)
LOG.debug(response)
search_id = response.get('search_id', '')
if not search_id:
error = "Query Failed: " + response.get("message", "No Error Message Found")
else:
LOG.info("Queued Search %s", search_id)
_wait_for_query_to_complete(search_id, qradar_client, timeout, polling_interval)
# Query Execution Finished, Get Results
response = _get_query_results(search_id, qradar_client, query_definition.range)
except Exception as exc:
if not query_definition.onerror:
raise
LOG.error(exc)
error = u"{}".format(exc)
if error:
mapdata = copy.deepcopy(event_message)
mapdata.update(query_definition.vars)
mapdata.update({"query": query_definition.query})
mapdata.update({"error": error})
error_template = json.dumps({"events": [query_definition.onerror]}, indent=2)
error_rendered = template_functions.render_json(error_template, mapdata)
response = error_rendered
if not response or len(response["events"]) == 0:
LOG.warn("No data returned from query")
if query_definition.default:
mapdata = copy.deepcopy(event_message)
mapdata.update(query_definition.vars)
mapdata.update({"query": query_definition.query})
default_template = json.dumps({"events": [query_definition.default]}, indent=2)
default_rendered = template_functions.render_json(default_template, mapdata)
response = default_rendered
return response
# end run_search
| 36.324022
| 113
| 0.669948
| 806
| 6,502
| 5.183623
| 0.266749
| 0.028722
| 0.013404
| 0.019148
| 0.218047
| 0.189325
| 0.157252
| 0.140498
| 0.140498
| 0.102441
| 0
| 0.003721
| 0.214703
| 6,502
| 178
| 114
| 36.52809
| 0.814532
| 0.101969
| 0
| 0.148438
| 0
| 0
| 0.097957
| 0.016064
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054688
| false
| 0
| 0.085938
| 0
| 0.203125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bcaa605df103e994b12588df4d84741fe74b87f
| 2,371
|
py
|
Python
|
first/sendmail-practice.py
|
bujige/Python-practice
|
c1eb76b0caaada628f23a477303f07d6be3f707c
|
[
"Apache-2.0"
] | null | null | null |
first/sendmail-practice.py
|
bujige/Python-practice
|
c1eb76b0caaada628f23a477303f07d6be3f707c
|
[
"Apache-2.0"
] | null | null | null |
first/sendmail-practice.py
|
bujige/Python-practice
|
c1eb76b0caaada628f23a477303f07d6be3f707c
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from email import encoders
from email.header import Header
from email.mime.multipart import MIMEBase, MIMEMultipart
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
import smtplib
# 格式化一个邮件地址
def _format_addr(s):
# parseaddr:解析字符串中的email地址
name, addr = parseaddr(s)
# name中包含中文,需要通过Header对象进行编码
# formataddr:parseaddr函数的逆函数
return formataddr((Header(name, 'utf-8').encode(), addr))
# 登录账户和口令
from_addr = input('From:')
password = input('Password:')
# 目标地址
to_addr = input('To:')
# 目标服务器
smtp_server = input('SMTP server:')
# 封装邮件
# 内容
msg = MIMEText('Hello,send by Python...', 'plain', 'utf-8')
# HTML邮件
msg = MIMEText('<html><body><h1>Hello</h1>' +
'<p>send by <a href="http://www.python.org">Python</a>...</p>' +
'</body></html>', 'html', 'utf-8')
# 发件人
msg['From'] = _format_addr('Python爱好者<%s>' % from_addr)
# 收件人
msg['To'] = _format_addr('管理员<%s>' % to_addr)
# 主题
msg['Subject'] = Header('来自SMTP的问候...', 'utf-8').encode()
# 邮件对象
msg = MIMEMultipart()
msg = MIMEMultipart('alternative')
msg['From'] = _format_addr('Python爱好者<%s>' % from_addr)
msg['To'] = _format_addr('管理员<%s>' % to_addr)
msg['Subject'] = Header('来自SMTP的问候。。。', 'utf-8').encode()
# 邮件正文是MIMEText:
msg.attach(MIMEText('send with file...', 'plain', 'utf-8'))
msg.attach(MIMEText('<html><body><h1>Hello</h1>' +
'<p><img src="cid:0"></p>' +
'</body></html>', 'html', 'utf-8'))
with open('/Users/doc88/Desktop/banner.png', 'rb') as f:
# 设置附件和MIME,从本地读取一个图片
mime = MIMEBase('image', 'jpeg', filename='banner.png')
# 加上必要的头信息:
mime.add_header('Content-Disposition', 'attachment', filename='banner.png')
mime.add_header('Content-ID', '<0>')
mime.add_header('X-Attachment-Id', '0')
# 把附件的内容读进来:
mime.set_payload(f.read())
# 用Base64编码:
encoders.encode_base64(mime)
# 添加到MIMEMultipart:
msg.attach(mime)
try:
# 发送邮件
# 创建服务器
server = smtplib.SMTP_SSL(smtp_server, 465)
# 打印出和SMTP服务器所有的交互信息
server.set_debuglevel(1)
# 登录服务器
server.login(from_addr, password)
# 发送邮件
# 发件账户,收件账户,内容
server.sendmail(from_addr, [to_addr], msg.as_string())
# 退出服务器
server.quit()
print('Success!')
except smtplib.SMTPException as e:
print('Fail,%s' % e)
| 28.22619
| 79
| 0.634753
| 304
| 2,371
| 4.855263
| 0.414474
| 0.02168
| 0.020325
| 0.02439
| 0.186992
| 0.186992
| 0.163957
| 0.081301
| 0
| 0
| 0
| 0.012794
| 0.175875
| 2,371
| 84
| 80
| 28.22619
| 0.742579
| 0.137916
| 0
| 0.086957
| 0
| 0.021739
| 0.258929
| 0.041171
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021739
| false
| 0.043478
| 0.130435
| 0
| 0.173913
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bcea7388e12344b8c218c07128ff9fb1cd5ed79
| 1,519
|
py
|
Python
|
yat-master/pymodule/common_sql/plain_parser/reader.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
yat-master/pymodule/common_sql/plain_parser/reader.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
yat-master/pymodule/common_sql/plain_parser/reader.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
#!/usr/bin/env python
# encoding=utf-8
"""
Copyright (c) 2021 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
class PlainReader:
def __init__(self, content):
if isinstance(content, (str, )):
self.content = content.splitlines(keepends=False)
else:
self.content = content
self.content_iter = iter(self.content)
self._cache = None
def next_line(self):
if self._cache is None:
return next(self.content_iter)
else:
swap = self._cache
self._cache = None
return swap
def top_line(self):
if self._cache is None:
self._cache = next(self.content_iter)
return self._cache
def skip_line(self):
if self._cache is None:
next(self.content_iter)
else:
self._cache = None
def has_next(self):
try:
if self._cache is None:
self._cache = next(self.content_iter)
return True
except StopIteration:
return False
| 27.125
| 84
| 0.623436
| 202
| 1,519
| 4.569307
| 0.485149
| 0.107259
| 0.081257
| 0.056338
| 0.223185
| 0.173348
| 0.173348
| 0.110509
| 0.110509
| 0.110509
| 0
| 0.009434
| 0.302172
| 1,519
| 55
| 85
| 27.618182
| 0.861321
| 0.35813
| 0
| 0.387097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16129
| false
| 0
| 0
| 0
| 0.354839
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8730231294cec0e238e9725d099edb7ac1ec02d
| 7,359
|
py
|
Python
|
compecon/basisSpline.py
|
daniel-schaefer/CompEcon-python
|
d3f66e04a7e02be648fc5a68065806ec7cc6ffd6
|
[
"MIT"
] | null | null | null |
compecon/basisSpline.py
|
daniel-schaefer/CompEcon-python
|
d3f66e04a7e02be648fc5a68065806ec7cc6ffd6
|
[
"MIT"
] | null | null | null |
compecon/basisSpline.py
|
daniel-schaefer/CompEcon-python
|
d3f66e04a7e02be648fc5a68065806ec7cc6ffd6
|
[
"MIT"
] | 1
|
2021-06-01T03:47:35.000Z
|
2021-06-01T03:47:35.000Z
|
import numpy as np
from scipy.sparse import csc_matrix, diags, tril
from .basis import Basis
__author__ = 'Randall'
# TODO: complete this class
# todo: compare performance of csr_matrix and csc_matrix to deal with sparse interpolation operators
# fixme: interpolation is 25 slower than in matlab when 2 dimensions!! 2x slower with only one
class BasisSpline(Basis):
def __init__(self, *args, k=3, **kwargs):
nargs = len(args)
if nargs == 1:
if isinstance(args[0], tuple):
breaks = [np.sort(br) for br in args[0]]
n = np.array([br.size + k - 1 for br in breaks])
a = np.array([br[0] for br in breaks])
b = np.array([br[-1] for br in breaks])
kwargs['nodetype'] = 'user'
else:
raise ValueError("If only 1 positional argument is provided, it must be a tuple of 'd' array-like, " +
"each of them containing the breaks for one dimension.")
elif nargs == 3:
n, a, b = np.broadcast_arrays(*np.atleast_1d(*args))
breaks = [np.linspace(aa, bb, nn + 1 - k) for aa, bb, nn in zip(a, b, n)]
kwargs['nodetype'] = 'canonical'
else:
txt = 'Either 1 or 3 positional arguments must be provided\n'
txt += '\t1 argument -> break points\n'
txt += '\t3 argument -> n, a, b'
raise ValueError(txt)
''' Check inputs '''
assert ((k > 0) and type(k) is int), 'k must be a positive integer'
assert np.all(n > k), 'number of nodes must exceed order of spline'
assert np.all([(br.size > 1) for br in breaks]), 'breakpoint sequence must contain at least two elements'
''' Make instance '''
kwargs['basistype'] = 'spline'
super().__init__(n, a, b, **kwargs)
self.k = k
self.breaks = breaks
self._set_nodes()
def _set_nodes(self):
"""
Sets the basis nodes
:return: None
"""
n = self.n
k = self.k
self._nodes = list()
for i in range(self.d):
x = np.cumsum(self._augbreaks(i, k))
x = (x[k : n[i] + k] - x[:n[i]]) / k
x[0] = self.a[i]
x[-1] = self.b[i]
self._nodes.append(x)
self._expand_nodes()
def _augbreaks(self, i, m,):
aa = np.repeat(self.a[i], m)
bb = np.repeat(self.b[i], m)
return np.concatenate((aa, self.breaks[i], bb))
def _update_diff_operators(self, i, order):
"""
Updates the list _D of differentiation operators
:param order: order of required derivative
:return: None
"""
keys = set(self._diff_operators[i].keys())
if (order in keys) or (order == 0):
return # Use previously stored values if available
n = self.n[i]
a = self.a[i]
b = self.b[i]
k = self.k
assert order <= k, 'order must be less or equal to k'
kk = k - 1 - min(order, 0)
augbreaks = self._augbreaks(i, kk)
if order > 0:
def sptemp(j):
temp = np.atleast_2d((k + 1 - j) / (augbreaks[k:(n + k - j)] - augbreaks[(j - 1):(n - 1)]))
return diags((-temp, temp), [0, 1], (n - j, n + 1 - j))
missing_keys = set(range(1, order + 1)) - keys
if 1 in missing_keys:
self._diff_operators[i][1] = sptemp(1)
missing_keys -= {1}
missing_keys = list(missing_keys)
missing_keys.sort(reverse=True)
while missing_keys:
j = missing_keys.pop()
self._diff_operators[i][j] = np.dot(sptemp(j), self._diff_operators[i][j - 1])
else:
def sptemp(j):
temp = (augbreaks[(kk + 1):(kk + n - j)] -
augbreaks[(kk - k + j + 1):(kk + n - k)]) / (k - j)
return tril(np.tile(temp, (n - j, 1)), -1)
missing_keys = set(range(order, 0)) - keys
if -1 in missing_keys:
self._diff_operators[i][-1] = sptemp(-1)
missing_keys -= {-1}
missing_keys = list(missing_keys)
missing_keys.sort(reverse=False)
while missing_keys:
j = missing_keys.pop()
self._diff_operators[i][j] = sptemp(j) * self._diff_operators[i][j + 1]
"""
Interpolation methods
"""
def _phi1d(self, i, x=None, order=0):
"""
Computes interpolation matrices for given data x and order of differentiation 'order' (integration if negative)
:param x: evaluation points (defaults to nodes)
:param order: a list of orders for differentiation (+) / integration (-)
:return a: dictionary with interpolation matrices, keys given by unique elements of order.
Example: Create a basis with 5 nodes, get the interpolation matrix evaluated at 20 points::
n, a, b = 5, 0, 4
x = numpy.linspace(a,b, 20)
Phi = BasisSpline(n, a, b)
Phi.Phi(x)
Phi(x)
Calling an instance directly (as in the last line) is equivalent to calling the interpolation method.
"""
n = self.n[i]
k = self.k
if order is None:
order = 0
order = np.atleast_1d(order).flatten()
assert np.max(order) < k, 'Derivatives defined for order less than k'
nn = n + np.maximum(0, -np.min(order)) # todo review why nn is not used, weird
# Check for x argument
xIsProvided = (x is not None)
x = x.flatten() if xIsProvided else self._nodes[i]
nx = x.size
minorder = np.min(order)
kaug = k - minorder
augbreaks = self._augbreaks(i, kaug)
ind = self._lookup(augbreaks, x)
# Recursively determine the values of a k-order basis matrix.
# This is placed in an (m x k+1-order) matrix
bas = np.zeros((kaug + 1, nx))
bas[0] = 1
Phidict = dict()
for j in range(1, kaug + 1):
for jj in range(j, 0, -1):
b0 = augbreaks[ind + jj - j]
b1 = augbreaks[ind + jj]
temp = bas[jj - 1] / (b1 - b0)
bas[jj] = (x - b0) * temp + bas[jj]
bas[jj - 1] = (b1 - x) * temp
# as now contains the order j spline basis
ii = np.where((k - j) == order)[0]
if ii.size > 0:
ii = ii[0]
oi = order[ii]
# Put values in appropriate columns of a sparse matrix
r = np.tile(np.arange(nx), k - oi + 1)
c = np.atleast_2d(np.arange(oi - k, 1)).T + np.atleast_2d(ind)
c = (c - (oi - minorder)).flatten()
data = bas[:k - oi + 1].flatten()
Phidict[oi] = csc_matrix((data, (r, c)), (nx, n-oi))
if oi:
# If needed compute derivative or anti-derivative
Phidict[oi] = Phidict[oi] * self._diff(i, oi)
# todo: review, i think this will return only unique values
Phi = np.array([Phidict[k] for k in order])
return Phi
| 36.430693
| 119
| 0.512298
| 989
| 7,359
| 3.73913
| 0.244692
| 0.047593
| 0.03218
| 0.034072
| 0.11033
| 0.098972
| 0.098972
| 0.098972
| 0.08437
| 0.08437
| 0
| 0.018904
| 0.367441
| 7,359
| 201
| 120
| 36.61194
| 0.77551
| 0.192825
| 0
| 0.128
| 0
| 0.008
| 0.08632
| 0
| 0
| 0
| 0
| 0.00995
| 0.04
| 1
| 0.056
| false
| 0
| 0.024
| 0
| 0.128
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c873b44db1fbe52cb97100b99eb41550c409cc9f
| 2,279
|
py
|
Python
|
vendors/rez-2.23.1-py2.7/rez/backport/shutilwhich.py
|
ColinKennedy/tk-config-default2-respawn
|
855fb8033daa549b92615792442f19a7f9c4f55c
|
[
"Linux-OpenIB"
] | 4
|
2019-01-11T03:41:28.000Z
|
2019-09-12T06:57:17.000Z
|
vendors/rez-2.23.1-py2.7/rez/backport/shutilwhich.py
|
ColinKennedy/tk-config-default2-respawn
|
855fb8033daa549b92615792442f19a7f9c4f55c
|
[
"Linux-OpenIB"
] | null | null | null |
vendors/rez-2.23.1-py2.7/rez/backport/shutilwhich.py
|
ColinKennedy/tk-config-default2-respawn
|
855fb8033daa549b92615792442f19a7f9c4f55c
|
[
"Linux-OpenIB"
] | 2
|
2019-01-10T05:00:18.000Z
|
2020-02-15T16:32:56.000Z
|
import os
import os.path
import sys
# Modified version from Python-3.3. 'env' environ dict override has been added.
def which(cmd, mode=os.F_OK | os.X_OK, env=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `env` defaults to os.environ,
if not supplied.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# Short circuit. If we're given a full path which matches the mode
# and it exists, we're done here.
if _access_check(cmd, mode):
return cmd
if env is None:
env = os.environ
path = env.get("PATH", os.defpath).split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
default_pathext = \
'.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC'
pathext = env.get("PATHEXT", default_pathext).split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
matches = [cmd for ext in pathext if cmd.lower().endswith(ext.lower())]
# If it does match, only test that one, otherwise we have to try
# others.
files = [cmd] if matches else [cmd + ext.lower() for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
dir = os.path.normcase(dir)
if not dir in seen:
seen.add(dir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
| 36.174603
| 79
| 0.617376
| 348
| 2,279
| 4.008621
| 0.41092
| 0.021505
| 0.007168
| 0.010036
| 0.018638
| 0.018638
| 0.018638
| 0
| 0
| 0
| 0
| 0.003086
| 0.289162
| 2,279
| 62
| 80
| 36.758065
| 0.858025
| 0.432207
| 0
| 0
| 0
| 0.03125
| 0.055024
| 0.042265
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.09375
| 0.03125
| 0.28125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c87b5c6d8dff26ac4e6274273976c58563c8553b
| 13,380
|
py
|
Python
|
clustering/runner.py
|
kburnik/naps-clustering
|
8ceaad61e7f1c3d76ad9e7c7491b705b936a6f19
|
[
"MIT"
] | null | null | null |
clustering/runner.py
|
kburnik/naps-clustering
|
8ceaad61e7f1c3d76ad9e7c7491b705b936a6f19
|
[
"MIT"
] | null | null | null |
clustering/runner.py
|
kburnik/naps-clustering
|
8ceaad61e7f1c3d76ad9e7c7491b705b936a6f19
|
[
"MIT"
] | null | null | null |
"""Class with high-level methods for processing NAPS and NAPS BE datasets."""
from config import DATA_NAPS_BE_ALL
from lib import partition_naps
from lib import plot
from lib import plot_clusters
from lib import plot_clusters_with_probability
from lib import plot_setup
from lib import read_naps
from lib import read_naps_be
from lib import reindex_partitions
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import scipy
import sklearn
class Runner:
"""Provides methods for processing NAPS with the clustering algorithm."""
def __init__(self, input_data, config):
self.input_data = input_data
self.config = config
def compute_raw_partitions(self):
"""Compute the k-means and returns the cluster index for each sample."""
kmeans = partition_naps(
samples=self.input_data.samples,
n_clusters=self.config.n_clusters)
return kmeans.labels_
def compute_stable_partitions(self):
"""Same as compute_raw_partition, but with stable index coloring."""
return reindex_partitions(
samples=self.input_data.samples,
indices=self.compute_raw_partitions())
def compute_average_partitions(self):
"""
Repeats the stable colored k-means and computes the average membership
of each input sample. For each sample, return the percentage of membership
to a cluster, as an array of size n_clusters (Monte-Carlo simulation).
"""
cluster_hist = np.zeros(
(self.input_data.size, self.config.n_clusters))
for k in range(self.config.n_iterations):
indices = self.compute_stable_partitions()
for i, cluster in enumerate(indices):
cluster_hist[i][cluster] += 1
return np.divide(cluster_hist, self.config.n_iterations)
def compute_stable_argmax_partitions(self):
"""Computes the stable partitions using the Monte-Carlo simulation, and
selects the most frequent cluster based on the probability (argmax)."""
indices_prob = self.compute_average_partitions()
self._display_undecided_index_count(indices_prob)
return np.argmax(indices_prob, axis=1)
def compute_naps_results(self, num_samples=5, prefix_dir='naps-clustering'):
"""Saves the clustering results and plots the NAPS clusters."""
with self.config.fork() as config:
p = config.n_iterations
for k in range(*config.n_clusters_range):
config.n_clusters = k
# Partition with caching.
indices = np.array(self.cached(
func=self.compute_stable_argmax_partitions,
prefix_dir=prefix_dir,
name='naps-clustering-k=%d-p=%d' % (k, p)))
# Split the input data.
partitioned_data = self.partition_input_data(indices)
# Save the separated datasets.
partitions_filename = self.join_path(
prefix_dir,
'naps-clustering-partitioned-full-k=%d-p=%d.csv' % (k, p))
with open(partitions_filename, "w") as f:
for cluster, data in partitioned_data.items():
f.write(data.serialize() + "\n")
# Save the chosen samples.
samples_filename = self.join_path(
prefix_dir,
'naps-clustering-partitioned-samples-k=%d-p=%d.csv' % (k, p))
with open(samples_filename, "w") as f:
for cluster, data in partitioned_data.items():
chunk = data.reduce_to_samples(num_samples)
f.write(chunk.serialize(";", use_quotes=False) + "\n")
self.plot(
indices=indices,
filename=self.join_path(
prefix_dir,
'naps-clustering-k=%d-p=%d.png' % (k, p)),
output_action='save')
def compute_naps_be_results(
self,
x_axis,
y_axis,
num_samples=5,
prefix_dir='naps-be-clustering'):
"""Saves the clustering results and plots the NAPS BE clusters."""
p = self.config.n_iterations
k = self.config.n_clusters
# Partition with caching.
indices = np.array(self.cached(
func=self.compute_stable_argmax_partitions,
prefix_dir=prefix_dir,
name='naps-be-clustering-%s-%s-k=%d-p=%d' % (x_axis, y_axis, k, p)))
# Split the input data.
partitioned_data = self.partition_input_data(indices)
# Save the separated datasets.
partitions_filename = self.join_path(
prefix_dir,
'naps-be-clustering-partitioned-full-%s-%s-k=%d-p=%d.csv' % (
x_axis, y_axis, k, p))
with open(partitions_filename, "w") as f:
for cluster, data in partitioned_data.items():
f.write(data.serialize() + "\n")
# Save the chosen samples.
samples_filename = self.join_path(
prefix_dir,
'naps-be-clustering-partitioned-samples-%s-%s-k=%d-p=%d.csv' % (
x_axis, y_axis, k, p))
with open(samples_filename, "w") as f:
for cluster, data in partitioned_data.items():
chunk = data.reduce_to_samples(num_samples)
f.write(chunk.serialize(";", use_quotes=False) + "\n")
self.plot(
indices=indices,
filename=self.join_path(
prefix_dir,
'naps-be-clustering-%s-%s-k=%d-p=%d.png' % (x_axis, y_axis, k, p)),
output_action='save')
def compute_stability_error_of_iterations(self):
"""Computes the stability error curve as a function of number of
iterations."""
with self.config.fork() as config:
return [
self._compute_stability_error_point(config.n_iterations)
for config.n_iterations in
range(*config.n_iterations_range)
]
def compute_stability_error_of_partition_count(self):
"""Computes the stability error curve as a function of number of
clusters."""
with self.config.fork() as config:
return [
self._compute_stability_error_point(config.n_clusters)
for config.n_clusters in
range(*config.n_clusters_range)
]
def partition_input_data(self, indices):
"""Splits the input data to partitions as defined by the indices."""
return self.input_data.split_on_key(lambda i, row: indices[i])
def plot(self, indices, output_action='save', filename=None):
"""Plots the clusters."""
if filename is None:
# TODO: Add date?
filename = self.join_path('out-single-run.png')
plot_clusters(
indices=indices,
input_data=self.input_data,
n_clusters=self.config.n_clusters,
output_action=output_action,
filename=filename)
def plot_repeated(
self,
partition_factory,
n_plots=10,
name='out',
prefix_dir='.'):
"""
Runs the partition_factory requested number of times, plots and saves the
images.
"""
for i in range(n_plots):
self.plot(
indices=partition_factory(),
output_action='save',
filename=self.join_path(prefix_dir, '%s-%02d.png' % (name, i)))
def plot_fuzzy(self, prefix_dir='.', name='out-fuzzy-simple'):
"""Plots the undecidable points."""
indices_prob = np.array(self.cached(
func=self.compute_average_partitions,
name=name,
prefix_dir=prefix_dir))
plot_clusters_with_probability(
indices_prob=indices_prob,
input_data=self.input_data,
plot_fuzzy_simple=True,
output_action='save',
filename=self.join_path(prefix_dir, '%s.png' % name))
def plot_cluster_number_evaluation_curve(
self,
evaluate,
title,
name,
score_label,
prefix_dir='.'):
"""Plots the evaluation curve as a function of number of clusters K."""
samples = self.input_data.samples
k_range = range(*self.config.n_clusters_range)
score = [evaluate(samples, k) for k in k_range]
self.save_csv(
data=zip(k_range, score),
columns=['partition count', score_label],
prefix_dir=prefix_dir,
name=name)
plt.figure(num=None, figsize=(16, 9), dpi=300)
plt.title(title)
plt.xlabel('partition count')
plt.ylabel(score_label)
plt.xticks(np.arange(*self.config.n_clusters_range, 2.0))
plt.plot(k_range, score)
plt.grid()
plt.savefig(self.join_path(prefix_dir, '%s.png' % name))
return plt
def plot_stability_error_curve(
self,
results,
title,
name,
xlabel,
ylabel,
xticks=200,
yticks=5,
figsize=(16, 6),
dpi=300,
prefix_dir='.'):
plt.figure(num=None, figsize=figsize, dpi=dpi)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xticks(np.arange(0, 1 + max([x for x, y in results]), xticks))
plt.yticks(np.arange(0, 1 + max([y for x, y in results]), yticks))
plt.plot(*zip(*results))
plt.grid()
plt.savefig(
self.join_path(prefix_dir, '%s.png' % name),
bbox_inches='tight')
return plt
def plot_multiple_cluster_number_evaluation_curves(
self,
input_data_list,
evaluate,
n_clusters_range,
title,
name,
score_label,
prefix_dir='.'):
"""Plots the evaluation curve for a given range of K."""
fig, ax = plot_setup()
plt.title(title)
plt.xlabel('partition count')
plt.ylabel(score_label)
plt.xticks(np.arange(*n_clusters_range, 2.0))
color = plt.cm.rainbow(np.linspace(0, 1, len(input_data_list)))
k_range = range(*n_clusters_range)
score_vectors = []
for i, input_data in enumerate(input_data_list):
score = [evaluate(input_data.samples, k) for k in k_range]
ax.plot(k_range, score, color=color[i], label=input_data.label_name)
score_vectors.append(score)
score_average = np.average(score_vectors, axis=0)
ax.plot(k_range, score_average, color=(0, 0, 0, 1), label="Average")
plt.grid()
plt.legend()
plt.savefig(self.join_path(prefix_dir, '%s.png' % name))
def _compute_stability_error_point(self, variable):
"""Computes one error point though the given number of evaluation
simulations."""
cluster_hist = np.zeros(
(self.input_data.size, self.config.n_clusters))
for i in range(self.config.n_evaluations):
indices = self.compute_stable_argmax_partitions()
for j, cluster in enumerate(indices):
cluster_hist[j][cluster] += 1
total_error = self._compute_total_histogram_error(
cluster_hist, self.config.n_evaluations)
error_point = (variable, total_error)
print(error_point)
return error_point
def cached(self, func, name, prefix_dir='.'):
"""Runs the provided method using a caching mechanism."""
filename = self.join_path(prefix_dir, '%s.cached-result.json' % name)
if os.path.exists(filename):
with open(filename, 'r') as f:
results = json.load(f)
else:
results = func()
with open(filename, 'w') as f:
try:
results = results.tolist()
except:
pass
json.dump(results, f)
return results
def save_csv(
self,
data,
columns,
name,
delimiter=';',
prefix_dir='.',
extension='.csv'):
"""Saves data into a CSV file."""
filename = self.join_path(prefix_dir, name + extension);
def encode(item):
return str(item)
with open(filename, 'w') as f:
f.write(delimiter.join(['"%s"' % column for column in columns]) + '\n')
for row in data:
f.write(delimiter.join([encode(item) for item in row]) + '\n')
def join_path(self, *args):
"""Joins a path for an output file and creates directories if they don't
exist."""
filename = os.path.join(self.config.out_dir, *args)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname, 0o755)
print("I/O path:", os.path.abspath(filename))
return filename
def _compute_total_histogram_error(self, hist, n_evaluations):
"""Computes the total error from the histogram of point cluster
membership."""
hist[hist == n_evaluations] = 0
sums_per_row = (hist != 0).sum(1)
return sums_per_row.sum() - np.count_nonzero(sums_per_row)
def _display_undecided_index_count(self, indices_prob):
"""Counts and prints out how many points have appeared at the edges of
clusters (the undecidability region)."""
print("Undecided count:", len(list(filter(
lambda row: np.max(row) == 0.5, indices_prob))))
@staticmethod
def compute_silhouette_score(samples, n_clusters):
"""Computes the silhouette score for a provided clustering result."""
kmeans = partition_naps(samples, n_clusters)
return sklearn.metrics.silhouette_score(
samples,
kmeans.labels_,
metric='euclidean')
@staticmethod
def stream_naps_be(
config,
x_dimensions, y_dimensions,
x_dimension_names, y_dimension_names):
"""Generates datasets for chosen pairs of dimensions."""
for i in range(len(x_dimensions)):
for j in range(len(y_dimensions)):
if len(x_dimensions) == len(y_dimensions) and j <= i:
continue
x_axis, y_axis = x_dimensions[i], y_dimensions[j]
x_name, y_name = x_dimension_names[i], y_dimension_names[j]
input_data = read_naps_be(
DATA_NAPS_BE_ALL,
label_field="label",
x_axis=x_axis,
y_axis=y_axis,
label_name="Label",
x_name=x_name,
y_name=y_name)
yield Runner(input_data=input_data, config=config), x_axis, y_axis
| 34.307692
| 78
| 0.65568
| 1,827
| 13,380
| 4.600985
| 0.158183
| 0.03212
| 0.019986
| 0.027837
| 0.412562
| 0.33833
| 0.298239
| 0.275399
| 0.269807
| 0.24542
| 0
| 0.004765
| 0.23139
| 13,380
| 389
| 79
| 34.395887
| 0.812622
| 0.145366
| 0
| 0.323529
| 0
| 0.006536
| 0.054953
| 0.031567
| 0
| 0
| 0
| 0.002571
| 0
| 1
| 0.081699
| false
| 0.003268
| 0.04902
| 0.003268
| 0.183007
| 0.009804
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c87d1cba2782a99d03e9fe56c04a83d537ce2a1a
| 2,936
|
py
|
Python
|
Algorithms_medium/1618. Maximum Font to Fit a Sentence in a Screen.py
|
VinceW0/Leetcode_Python_solutions
|
09e9720afce21632372431606ebec4129eb79734
|
[
"Xnet",
"X11"
] | 4
|
2020-08-11T20:45:15.000Z
|
2021-03-12T00:33:34.000Z
|
Algorithms_medium/1618. Maximum Font to Fit a Sentence in a Screen.py
|
VinceW0/Leetcode_Python_solutions
|
09e9720afce21632372431606ebec4129eb79734
|
[
"Xnet",
"X11"
] | null | null | null |
Algorithms_medium/1618. Maximum Font to Fit a Sentence in a Screen.py
|
VinceW0/Leetcode_Python_solutions
|
09e9720afce21632372431606ebec4129eb79734
|
[
"Xnet",
"X11"
] | null | null | null |
"""
1618. Maximum Font to Fit a Sentence in a Screen
Medium
You are given a string text. We want to display text on a screen of width w and height h. You can choose any font size from array fonts, which contains the available font sizes in ascending order.
You can use the FontInfo interface to get the width and height of any character at any available font size.
The FontInfo interface is defined as such:
interface FontInfo {
// Returns the width of character ch on the screen using font size fontSize.
// O(1) per call
public int getWidth(int fontSize, char ch);
// Returns the height of any character on the screen using font size fontSize.
// O(1) per call
public int getHeight(int fontSize);
}
The calculated width of text for some fontSize is the sum of every getWidth(fontSize, text[i]) call for each 0 <= i < text.length (0-indexed). The calculated height of text for some fontSize is getHeight(fontSize). Note that text is displayed on a single line.
It is guaranteed that FontInfo will return the same value if you call getHeight or getWidth with the same parameters.
It is also guaranteed that for any font size fontSize and any character ch:
getHeight(fontSize) <= getHeight(fontSize+1)
getWidth(fontSize, ch) <= getWidth(fontSize+1, ch)
Return the maximum font size you can use to display text on the screen. If text cannot fit on the display with any font size, return -1.
Example 1:
Input: text = "helloworld", w = 80, h = 20, fonts = [6,8,10,12,14,16,18,24,36]
Output: 6
Example 2:
Input: text = "leetcode", w = 1000, h = 50, fonts = [1,2,4]
Output: 4
Example 3:
Input: text = "easyquestion", w = 100, h = 100, fonts = [10,15,20,25]
Output: -1
Constraints:
1 <= text.length <= 50000
text contains only lowercase English letters.
1 <= w <= 107
1 <= h <= 104
1 <= fonts.length <= 105
1 <= fonts[i] <= 105
fonts is sorted in ascending order and does not contain duplicates.
"""
# """
# This is FontInfo's API interface.
# You should not implement it, or speculate about its implementation
# """
#class FontInfo(object):
# Return the width of char ch when fontSize is used.
# def getWidth(self, fontSize, ch):
# """
# :type fontSize: int
# :type ch: char
# :rtype int
# """
#
# def getHeight(self, fontSize):
# """
# :type fontSize: int
# :rtype int
# """
class Solution:
def maxFont(self, text: str, w: int, h: int, fonts: List[int], fontInfo : 'FontInfo') -> int:
def check(fs):
if fontInfo.getHeight(fs) > h:
return False
if sum(fontInfo.getWidth(fs, c) for c in text) > w:
return False
return True
l, r = -1, len(fonts) - 1
while l < r:
m = r - (r - l) // 2
if check(fonts[m]):
l = m
else:
r = m - 1
return fonts[l] if l > -1 else -1
| 32.622222
| 260
| 0.642711
| 456
| 2,936
| 4.138158
| 0.350877
| 0.029677
| 0.017488
| 0.015898
| 0.077371
| 0.077371
| 0.052994
| 0.052994
| 0.052994
| 0.052994
| 0
| 0.040534
| 0.260559
| 2,936
| 90
| 261
| 32.622222
| 0.82865
| 0.793597
| 0
| 0.125
| 0
| 0
| 0.013746
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c880853878e1cff80cb76bcab65d294bfff7d0f4
| 6,407
|
py
|
Python
|
climateeconomics/sos_wrapping/sos_wrapping_dice/tempchange/tempchange_discipline.py
|
os-climate/witness-core
|
3ef9a44d86804c5ad57deec3c9916348cb3bfbb8
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2022-01-14T06:37:42.000Z
|
2022-01-14T06:37:42.000Z
|
climateeconomics/sos_wrapping/sos_wrapping_dice/tempchange/tempchange_discipline.py
|
os-climate/witness-core
|
3ef9a44d86804c5ad57deec3c9916348cb3bfbb8
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
climateeconomics/sos_wrapping/sos_wrapping_dice/tempchange/tempchange_discipline.py
|
os-climate/witness-core
|
3ef9a44d86804c5ad57deec3c9916348cb3bfbb8
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from sos_trades_core.execution_engine.sos_discipline import SoSDiscipline
from climateeconomics.core.core_dice.tempchange_model import TempChange
from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart
from sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter
import pandas as pd
class TempChangeDiscipline(SoSDiscipline):
" Temperature evolution"
# ontology information
_ontology_data = {
'label': 'Temperature Change DICE Model',
'type': 'Research',
'source': 'SoSTrades Project',
'validated': '',
'validated_by': 'SoSTrades Project',
'last_modification_date': '',
'category': '',
'definition': '',
'icon': 'fas fa-thermometer-three-quarters fa-fw',
'version': '',
}
DESC_IN = {
'year_start': {'type': 'int', 'visibility': 'Shared', 'namespace': 'ns_dice'},
'year_end': {'type': 'int', 'visibility': 'Shared', 'namespace': 'ns_dice'},
'time_step': {'type': 'int', 'visibility': 'Shared', 'namespace': 'ns_dice'},
'init_temp_ocean': {'type': 'float', 'default': 0.00687},
'init_temp_atmo': {'type': 'float', 'default': 0.85},
'eq_temp_impact': {'type': 'float', 'default': 3.1},
'init_forcing_nonco': {'type': 'float', 'default': 0.5},
'hundred_forcing_nonco': {'type': 'float', 'default': 1 },
'climate_upper': {'type': 'float', 'default': 0.1005},
'transfer_upper': {'type': 'float', 'default': 0.088},
'transfer_lower': {'type': 'float', 'default': 0.025},
'forcing_eq_co2': {'type': 'float', 'default': 3.6813},
'lo_tocean': {'type': 'float', 'default': -1},
'up_tatmo': {'type': 'float', 'default': 12},
'up_tocean': {'type': 'float', 'default' : 20},
'carboncycle_df': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_scenario'}}
DESC_OUT = {
'temperature_df': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_scenario'}}
_maturity = 'Research'
def run(self):
''' model execution '''
# get inputs
in_dict = self.get_sosdisc_inputs()
# carboncycle_df = in_dict.pop('carboncycle_df')
# model execution
model = TempChange()
temperature_df = model.compute(in_dict)
# store output data
out_dict = {"temperature_df": temperature_df}
self.store_sos_outputs_values(out_dict)
def get_chart_filter_list(self):
# For the outputs, making a graph for tco vs year for each range and for specific
# value of ToT with a shift of five year between then
chart_filters = []
chart_list = ['temperature evolution']
# First filter to deal with the view : program or actor
chart_filters.append(ChartFilter(
'Charts', chart_list, chart_list, 'charts'))
return chart_filters
def get_post_processing_list(self, chart_filters=None):
# For the outputs, making a graph for tco vs year for each range and for specific
# value of ToT with a shift of five year between then
instanciated_charts = []
# Overload default value with chart filter
if chart_filters is not None:
for chart_filter in chart_filters:
if chart_filter.filter_key == 'charts':
chart_list = chart_filter.selected_values
if 'temperature evolution' in chart_list:
to_plot = ['temp_atmo', 'temp_ocean']
temperature_df = self.get_sosdisc_outputs('temperature_df')
temperature_df = resize_df(temperature_df)
legend = {'temp_atmo': 'atmosphere temperature',
'temp_ocean': 'ocean temperature'}
years = list(temperature_df.index)
year_start = years[0]
year_end = years[len(years) - 1]
max_value = 0
min_value = 0
for key in to_plot:
max_value = max(temperature_df[key].values.max(), max_value)
min_value = min(temperature_df[key].values.min(), min_value)
chart_name = 'temperature evolution over the years'
new_chart = TwoAxesInstanciatedChart('years', 'temperature evolution (degrees Celsius above preindustrial)',
[year_start - 5, year_end + 5], [
min_value * 0.9, max_value * 1.1],
chart_name)
for key in to_plot:
visible_line = True
ordonate_data = list(temperature_df[key])
new_series = InstanciatedSeries(
years, ordonate_data, legend[key], 'lines', visible_line)
new_chart.series.append(new_series)
instanciated_charts.append(new_chart)
return instanciated_charts
def resize_df(df):
index = df.index
i = len(index) - 1
key = df.keys()
to_check = df.loc[index[i], key[0]]
while to_check == 0:
i = i - 1
to_check = df.loc[index[i], key[0]]
size_diff = len(index) - i
new_df = pd.DataFrame()
if size_diff == 0:
new_df = df
else:
for element in key:
new_df[element] = df[element][0:i + 1]
new_df.index = index[0: i + 1]
return new_df
def resize_array(array):
i = len(array) - 1
to_check = array[i]
while to_check == 0:
i = i - 1
to_check = to_check = array[i]
size_diff = len(array) - i
new_array = array[0:i]
return new_array
def resize_index(index, array):
l = len(array)
new_index = index[0:l]
return new_index
| 33.025773
| 129
| 0.605119
| 774
| 6,407
| 4.808786
| 0.307494
| 0.029017
| 0.051585
| 0.027405
| 0.192638
| 0.158248
| 0.158248
| 0.12762
| 0.068243
| 0.055884
| 0
| 0.015368
| 0.278914
| 6,407
| 193
| 130
| 33.196891
| 0.79026
| 0.167005
| 0
| 0.070175
| 0
| 0
| 0.211059
| 0.013496
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.04386
| 0
| 0.184211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8813251417f083ef4764a6d0d80104c34d5a26a
| 56,368
|
py
|
Python
|
pymkm/pymkm_app.py
|
Guibod/pymkm
|
58ac805c8072979f3059c7faafc264386ae98141
|
[
"MIT"
] | null | null | null |
pymkm/pymkm_app.py
|
Guibod/pymkm
|
58ac805c8072979f3059c7faafc264386ae98141
|
[
"MIT"
] | null | null | null |
pymkm/pymkm_app.py
|
Guibod/pymkm
|
58ac805c8072979f3059c7faafc264386ae98141
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
The PyMKM example app.
"""
__author__ = "Andreas Ehrlund"
__version__ = "2.0.4"
__license__ = "MIT"
import os
import csv
import json
import shelve
import logging
import logging.handlers
import pprint
import uuid
import sys
from datetime import datetime
import micromenu
import progressbar
import requests
import tabulate as tb
from pkg_resources import parse_version
from .pymkm_helper import PyMkmHelper
from .pymkmapi import PyMkmApi, CardmarketError
class PyMkmApp:
logger = None
def __init__(self, config=None):
self.logger = logging.getLogger(__name__)
# self.logger.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
fh = logging.handlers.RotatingFileHandler(
f"log_pymkm.log", maxBytes=500000, backupCount=2
)
fh.setLevel(logging.WARNING)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
sh = logging.StreamHandler()
sh.setLevel(logging.ERROR) # This gets outputted to stdout
sh.setFormatter(formatter)
self.logger.addHandler(sh)
if config is None:
self.logger.debug(">> Loading config file")
try:
self.config = json.load(open("config.json"))
# Sync missing attributes to active config
template_config = json.load(open("config_template.json"))
template_config.update(self.config)
self.config = template_config
except FileNotFoundError:
self.logger.error(
"You must copy config_template.json to config.json and populate the fields."
)
sys.exit(0)
# if no UUID is present, generate one and add it to the file
if "uuid" not in self.config:
self.config["uuid"] = str(uuid.uuid4())
with open("config.json", "w") as json_config_file:
json.dump(self.config, json_config_file, indent=2)
else:
self.config = config
self.DEV_MODE = False
try:
self.DEV_MODE = self.config["dev_mode"]
except Exception as err:
pass
fh.setLevel(self.config["log_level"])
self.logger.setLevel(self.config["log_level"])
self.api = PyMkmApi(config=self.config)
def report(self, command):
uuid = self.config["uuid"]
# if self.config["reporting"] and not self.DEV_MODE:
# try:
# r = requests.post(
# "https://andli-stats-server.herokuapp.com/pymkm",
# json={"command": command, "uuid": uuid, "version": __version__},
# )
# except Exception as err:
# self.logger.error("Connection error to stats server.")
# pass
pass
def check_latest_version(self):
latest_version = None
try:
r = requests.get("https://api.github.com/repos/andli/pymkm/releases/latest")
latest_version = r.json()["tag_name"]
except Exception as err:
self.logger.error("Connection error with github.com")
if parse_version(__version__) < parse_version(latest_version):
return f"Go to Github and download version {latest_version}! It's better!"
else:
return None
def start(self, args=None):
if not len(sys.argv) > 1: # if args have been passed
while True:
top_message = self.check_latest_version()
if hasattr(self, "DEV_MODE") and self.DEV_MODE:
top_message = "dev mode"
menu = micromenu.Menu(
f"PyMKM {__version__}",
top_message,
f"API calls used today: {self.api.requests_count}/{self.api.requests_max}",
cycle=False,
)
menu.add_function_item(
"Update stock prices",
self.update_stock_prices_to_trend,
{"api": self.api, "cli_called": False},
)
menu.add_function_item(
"Update price for a product",
self.update_product_to_trend,
{"api": self.api},
)
menu.add_function_item(
"List competition for a product",
self.list_competition_for_product,
{"api": self.api},
)
menu.add_function_item(
"Find deals from a user",
self.find_deals_from_user,
{"api": self.api},
)
menu.add_function_item(
f"Show top {self.config['show_top_x_expensive_items']} expensive items in stock",
self.show_top_expensive_articles_in_stock,
{
"num_articles": self.config["show_top_x_expensive_items"],
"api": self.api,
},
)
menu.add_function_item(
"Wantslists cleanup suggestions",
self.clean_purchased_from_wantslists,
{"api": self.api},
)
menu.add_function_item(
"Show account info", self.show_account_info, {"api": self.api}
)
menu.add_function_item(
"Clear entire stock (WARNING)",
self.clear_entire_stock,
{"api": self.api},
)
menu.add_function_item(
f"Import stock from {self.config['csv_import_filename']}",
self.import_from_csv,
{"api": self.api},
)
menu.add_function_item(
f"Track price data to {self.config['csv_prices_filename']}",
self.track_prices_to_csv,
{"api": self.api},
)
if self.DEV_MODE:
menu.add_function_item(
f"⚠ Check product id", self.check_product_id, {"api": self.api},
)
menu.add_function_item(
f"⚠ Add fake stock", self.add_fake_stock, {"api": self.api},
)
if self.api.requests_count < self.api.requests_max:
break_signal = menu.show()
else:
menu.print_menu()
self.logger.error("Out of quota, exiting app.")
sys.exit(0)
if break_signal:
break
else:
# command line interface
if args.price_check_wantslist:
self.track_prices_to_csv(
self.api, args.price_check_wantslist, args.cached
)
if args.update_stock:
self.update_stock_prices_to_trend(
self.api, args.update_stock, args.cached, args.partial
)
def check_product_id(self, api):
""" Dev function check on a product id. """
pid = int(PyMkmHelper.prompt_string("pid"))
product_json = api.get_product(pid)
del product_json["product"]["reprint"]
del product_json["product"]["links"]
pp = pprint.PrettyPrinter()
pp.pprint(product_json)
def add_fake_stock(self, api):
""" Dev function to add fake stock. """
range_start = int(PyMkmHelper.prompt_string("Range pid start"))
range_end = int(PyMkmHelper.prompt_string("Range pid end"))
if PyMkmHelper.prompt_bool("Sure?"):
print("Adding fake stock...")
product_list = []
for product_no in range(range_start, range_end):
product_list.append(
{
"idProduct": product_no,
"idLanguage": 1,
"count": 1,
"price": 1,
"comments": "TEST ARTICLE DO NOT BUY",
"condition": "PO",
"isFoil": "false",
}
)
api.add_stock(product_list)
def clean_json_for_upload(self, not_uploadable_json):
for entry in not_uploadable_json:
del entry["price_diff"]
del entry["old_price"]
del entry["name"]
return not_uploadable_json
def update_stock_prices_to_trend(self, api, cli_called, cached=None, partial=0):
""" This function updates all prices in the user's stock to TREND. """
self.report("update stock price to trend")
stock_list = self.get_stock_as_array(self.api, cli_called, cached)
already_checked_articles = PyMkmHelper.read_from_cache(
self.config["local_cache_filename"], "partial_updated"
)
if already_checked_articles:
print(
f"{len(already_checked_articles)} articles found in previous updates, ignoring those."
)
partial_stock_update_size = 0
if partial > 0:
partial_stock_update_size = partial
elif not cli_called:
partial_status_string = ""
if already_checked_articles:
partial_status_string = (
f"({len(already_checked_articles)}/{len(stock_list)} done)"
)
partial_stock_update_size = PyMkmHelper.prompt_string(
f"Partial update? {partial_status_string} \n"
+ "If so, enter number of cards (or press Enter to update all remaining stock)"
)
if partial_stock_update_size != "":
partial_stock_update_size = int(partial_stock_update_size)
else:
partial_stock_update_size = 0
if cli_called or self.config["never_undercut_local_market"]:
undercut_local_market = False
else:
undercut_local_market = PyMkmHelper.prompt_bool(
"Try to undercut local market? (slower, more requests)"
)
uploadable_json, checked_articles = self.calculate_new_prices_for_stock(
stock_list,
undercut_local_market,
partial_stock_update_size,
already_checked_articles,
api=self.api,
)
cache_size = 0
if checked_articles:
cache_size = PyMkmHelper.append_to_cache(
self.config["local_cache_filename"],
"partial_updated",
checked_articles,
)
if cache_size == len(stock_list):
PyMkmHelper.clear_cache(
self.config["local_cache_filename"], "partial_updated"
)
print(
f"Entire stock updated in partial updates. Partial update data cleared."
)
if len(uploadable_json) > 0:
self.display_price_changes_table(uploadable_json)
if cli_called or PyMkmHelper.prompt_bool(
"Do you want to update these prices?"
):
print("Updating prices...")
api.set_stock(uploadable_json)
print("Prices updated.")
else:
print("Prices not updated.")
else:
print("No prices to update.")
self.logger.debug("-> update_stock_prices_to_trend: Done")
def __filter(self, article_list):
sticky_price_char = self.config["sticky_price_char"]
# if we find the sticky price marker, filter out articles
def filtered(stock_item):
if stock_item.get("comments"):
return stock_item.get("comments").startswith(sticky_price_char)
else:
return False
filtered_articles = [x for x in article_list if not filtered(x)]
return filtered_articles
def update_product_to_trend(self, api):
""" This function updates one product in the user's stock to TREND. """
self.report("update product price to trend")
search_string = PyMkmHelper.prompt_string("Search product name")
try:
articles = api.find_stock_article(search_string, 1)
except Exception as err:
print(err)
filtered_articles = self.__filter(articles)
### --- refactor?
if not filtered_articles:
print(f"{len(articles)} articles found, no editable prices.")
else:
if len(filtered_articles) > 1:
article = self.select_from_list_of_articles(filtered_articles)
else:
article = filtered_articles[0]
found_string = f"Found: {article['product']['enName']}"
if article["product"].get("expansion"):
found_string += f"[{article['product'].get('expansion')}] "
if article["isFoil"]:
found_string += f"[foil: {article['isFoil']}] "
if article["comments"]:
found_string += f"[comment: {article['comments']}] "
else:
found_string += "."
print(found_string)
undercut_local_market = PyMkmHelper.prompt_bool(
"Try to undercut local market? (slower, more requests)"
)
product = self.api.get_product(article["idProduct"])
r = self.update_price_for_article(
article, product, undercut_local_market, api=self.api
)
if r:
self.draw_price_changes_table([r])
print(
"\nTotal price difference: {}.".format(
str(
round(
sum(item["price_diff"] * item["count"] for item in [r]),
2,
)
)
)
)
if PyMkmHelper.prompt_bool("Do you want to update these prices?"):
# Update articles on MKM
print("Updating prices...")
api.set_stock(self.clean_json_for_upload([r]))
print("Price updated.")
else:
print("Prices not updated.")
else:
print("No prices to update.")
self.logger.debug("-> update_product_to_trend: Done")
def list_competition_for_product(self, api):
self.report("list competition for product")
print("Note: does not support playsets, booster displays etc (yet).")
search_string = PyMkmHelper.prompt_string("Search product name")
is_foil = PyMkmHelper.prompt_bool("Foil?")
try:
result = api.find_product(
search_string,
**{
# 'exact ': 'true',
"idGame": 1,
"idLanguage": 1,
# TODO: Add language support
},
)
except CardmarketError as err:
self.logger.error(err.mkm_msg())
print(err.mkm_msg())
else:
if result:
products = result
stock_list_products = [
x["idProduct"] for x in self.get_stock_as_array(api=self.api)
]
products = [
x for x in products if x["idProduct"] in stock_list_products
]
if len(products) == 0:
print("No matching cards in stock.")
else:
if len(products) > 1:
product = self.select_from_list_of_products(
[i for i in products if i["categoryName"] == "Magic Single"]
)
elif len(products) == 1:
product = products[0]
self.show_competition_for_product(
product["idProduct"], product["enName"], is_foil, api=self.api
)
else:
print("No results found.")
self.logger.debug("-> list_competition_for_product: Done")
def find_deals_from_user(self, api):
self.report("find deals from user")
search_string = PyMkmHelper.prompt_string("Enter username")
try:
result = api.find_user_articles(search_string)
except CardmarketError as err:
self.logger.error(err.mkm_msg())
print(err.mkm_msg())
else:
filtered_articles = [x for x in result if x.get("price") > 1]
# language from configured filter
language_filter_string = self.config["search_filters"]["language"]
if language_filter_string:
language_filter_code = api.get_language_code_from_string(
language_filter_string
)
if language_filter_code:
filtered_articles = [
x
for x in filtered_articles
if x.get("language").get("idLanguage") == language_filter_code
]
sorted_articles = sorted(
filtered_articles, key=lambda x: x["price"], reverse=True
)
print(
f"User '{search_string}' has {len(sorted_articles)} articles that meet the criteria."
)
num_searches = int(
PyMkmHelper.prompt_string(
f"Searching top X expensive cards for deals, choose X (1-{len(sorted_articles)})"
)
)
if 1 <= num_searches <= len(sorted_articles):
table_data = []
products_to_get = []
index = 0
bar = progressbar.ProgressBar(max_value=num_searches)
bar.update(index)
products_to_get = [
x["idProduct"] for x in sorted_articles[:num_searches]
]
products = api.get_items_async("products", products_to_get)
for article in sorted_articles[:num_searches]:
try:
p = next(
x
for x in products
if x["product"]["idProduct"] == article["idProduct"]
)
except StopIteration:
# Stock item not found in update batch, continuing
continue
name = p["product"]["enName"]
expansion = p["product"].get("expansion")
price = float(article["price"])
if expansion:
expansion_name = expansion.get("enName")
else:
expansion_name = "N/A"
if article.get("isFoil"):
market_price = p["product"]["priceGuide"]["TRENDFOIL"]
else:
market_price = p["product"]["priceGuide"]["TREND"]
if market_price > 0:
price_diff = price - market_price
percent_deal = round(-100 * (price_diff / market_price))
if price_diff < -1 or percent_deal >= 10:
table_data.append(
[
name,
expansion_name,
article.get("condition"),
article.get("language").get("languageName"),
"\u2713" if article.get("isFoil") else "",
"\u2713" if article.get("isPlayset") else "",
price,
market_price,
price_diff,
percent_deal,
]
)
index += 1
bar.update(index)
bar.finish()
if table_data:
print("Found some interesting prices:")
print(
tb.tabulate(
sorted(table_data, key=lambda x: x[9], reverse=True),
headers=[
"Name",
"Expansion",
"Condition",
"Language",
"Foil",
"Playset",
"Price",
"Market price",
"Market diff",
"Deal %",
],
tablefmt="simple",
)
)
else:
print("Found no deals. :(")
else:
print("Invalid number.")
self.logger.debug("-> find_deals_from_user: Done")
def show_top_expensive_articles_in_stock(self, num_articles, api):
self.report("show top expensive in stock")
stock_list = self.get_stock_as_array(api=self.api)
table_data = []
total_price = 0
for article in stock_list:
name = article["product"]["enName"]
expansion = article.get("product").get("expansion")
foil = article.get("isFoil")
playset = article.get("isPlayset")
condition = article.get("condition")
language_code = article.get("language")
language_name = language_code.get("languageName")
price = article.get("price")
table_data.append(
[
name,
expansion,
"\u2713" if foil else "",
"\u2713" if playset else "",
language_name,
condition,
price,
]
)
total_price += price
if len(table_data) > 0:
print(
f"Top {str(num_articles)} most expensive articles in stock (total {len(stock_list)} items):\n"
)
print(
tb.tabulate(
sorted(table_data, key=lambda x: x[6], reverse=True)[:num_articles],
headers=[
"Name",
"Expansion",
"Foil",
"Playset",
"Language",
"Condition",
"Price",
],
tablefmt="simple",
)
)
print("\nTotal stock value: {}".format(str(total_price)))
return None
def track_prices_to_csv(self, api, wantslist_name=None, cached=False):
self.report("track prices")
wantslists, wantslists_lists = self.get_wantslists_data(api, cached)
if wantslist_name is None:
selected_list = self.select_from_list_of_wantslists(wantslists)
selected_list_id = selected_list["idWantslist"]
else:
selected_list_id = next(
x["idWantslist"] for x in wantslists if x["name"] == wantslist_name
)
# TODO: fails for metaproduct
products_to_get = [
x["idProduct"]
for x in wantslists_lists[selected_list_id]
if x["type"] == "product"
]
for x in wantslists_lists[selected_list_id]:
if x["type"] == "metaproduct":
self.logger.warning(
f"Wantslist contains metaproduct ({x['metaproduct']['enName']}) which cannot be used to get prices."
)
updated_products = []
try:
updated_products = api.get_items_async("products", products_to_get)
except Exception as err:
pass
# Write to CSV:
if len(updated_products) > 0:
# if blank, then header: datetime, productid, priceguide labels
example_priceguide = updated_products[0]["product"]["priceGuide"]
priceguide_header_items = [k for k in example_priceguide.keys()]
header_list = [
"datetime",
"product id",
"name",
"expansion",
]
header_list.extend(priceguide_header_items)
data_array = []
for product in updated_products:
price_data_exploded = [
k for k in product["product"]["priceGuide"].values()
]
data_row = [
datetime.now().isoformat(" "),
product["product"]["idProduct"],
product["product"]["enName"],
product["product"]["expansion"]["enName"],
]
data_row.extend(price_data_exploded)
data_array.append(data_row)
self.write_to_csv(header_list, data_array)
def write_to_csv(self, header_list, data_array):
if len(data_array) > 0:
try:
with open(
self.config["csv_prices_filename"],
"a",
newline="",
encoding="utf-8",
) as csv_a, open(self.config["csv_prices_filename"], "r",) as csv_r:
csv_reader = csv.reader(csv_r)
row_count = sum(1 for row in csv_reader)
csv_writer = csv.writer(csv_a, delimiter=";")
if row_count == 0:
csv_writer.writerow(header_list)
csv_writer.writerows(data_array)
self.logger.debug(
f"write_to_csv:: {len(data_array)} lines written to {self.config['csv_prices_filename']}."
)
print(
f"Wrote {len(data_array)} price updates to {self.config['csv_prices_filename']}."
)
except Exception as err:
print(err.value)
def clean_purchased_from_wantslists(self, api):
self.report("clean wantslists")
print("This will show items in your wantslists you have already received.")
wantslists, wantslists_lists = self.get_wantslists_data(api)
try:
print("Gettings received orders from Cardmarket...")
received_orders = api.get_orders("buyer", "received", start=1)
except Exception as err:
print(err)
if wantslists_lists and received_orders:
purchased_product_ids = []
purchased_products = []
for (
order
) in received_orders: # TODO: foil in purchase removes non-foil in wants
purchased_product_ids.extend(
[i["idProduct"] for i in order.get("article")]
)
purchased_products.extend(
{
"id": i["idProduct"],
"foil": i.get("isFoil"),
"count": i["count"],
"date": order["state"]["dateReceived"],
}
for i in order.get("article")
)
purchased_products = sorted(
purchased_products, key=lambda t: t["date"], reverse=True
)
total_number_of_items = sum([len(x) for x in wantslists_lists.values()])
index = 0
print("Matching received purchases with wantslists...")
bar = progressbar.ProgressBar(max_value=total_number_of_items)
matches = []
for key, articles in wantslists_lists.items():
metaproducts_article_list = [
x for x in articles if x.get("type") == "metaproduct"
]
metaproducts_to_get = [
x["idMetaproduct"] for x in metaproducts_article_list
]
metaproduct_list = api.get_items_async(
"metaproducts", metaproducts_to_get
)
for article in articles:
a_type = article.get("type")
a_foil = article.get("isFoil") == True
product_matches = []
if a_type == "metaproduct":
try:
metaproduct = next(
x
for x in metaproduct_list
if x["metaproduct"]["idMetaproduct"]
== article["idMetaproduct"]
)
except StopIteration:
# Stock item not found in update batch, continuing
continue
metaproduct_product_ids = [
i["idProduct"] for i in metaproduct["product"]
]
product_matches = [
i
for i in purchased_products
if i["id"] in metaproduct_product_ids
and i["foil"] == a_foil
]
else:
a_product_id = article.get("idProduct")
product_matches = [
i
for i in purchased_products
if i["id"] == a_product_id and i["foil"] == a_foil
]
if product_matches:
match = {
"wantlist_id": key,
"wantlist_name": wantslists[key],
"date": product_matches[0]["date"],
"is_foil": a_foil,
"count": sum([x.get("count") for x in product_matches]),
}
if a_type == "product":
match.update(
{
"product_id": a_product_id,
"product_name": article.get("product").get(
"enName"
),
"expansion_name": article.get("product").get(
"expansionName"
),
}
)
elif a_type == "metaproduct":
match.update(
{
"metaproduct_id": article.get("idMetaproduct"),
"product_name": article.get("metaproduct").get(
"enName"
),
"expansion_name": article.get("metaproduct").get(
"expansionName"
),
}
)
matches.append(match)
index += 1
bar.update(index)
bar.finish()
if matches:
print(
tb.tabulate(
[
[
item["wantlist_name"],
item["count"],
"\u2713" if item["is_foil"] else "",
item["product_name"],
item["expansion_name"],
item["date"],
]
for item in matches
],
headers=[
"Wantlist",
"# bought",
"Foil",
"Name",
"Expansion",
"Date (last) received",
],
tablefmt="simple",
)
)
else:
print("No cleanup needed.")
else:
print("No wantslists or received orders.")
def show_account_info(self, api):
self.report("show account info")
pp = pprint.PrettyPrinter()
pp.pprint(api.get_account())
self.logger.debug("-> show_account_info: Done")
def clear_entire_stock(self, api):
self.report("clear entire stock")
stock_list = self.get_stock_as_array(api=self.api)
if PyMkmHelper.prompt_bool(
"Do you REALLY want to clear your entire stock ({} items)?".format(
len(stock_list)
)
):
# for article in stock_list:
# article['count'] = 0
delete_list = [
{"count": x["count"], "idArticle": x["idArticle"]} for x in stock_list
]
print("Clearing stock...")
api.delete_stock(delete_list)
self.logger.debug("-> clear_entire_stock: done")
print("Stock cleared.")
PyMkmHelper.clear_cache(self.config["local_cache_filename"], "stock")
else:
print("Aborted.")
def import_from_csv(self, api):
self.report("import from csv")
print(
"Note the required format: Card, Set name, Quantity, Foil, Language (with header row)."
)
problem_cards = []
with open(self.config["csv_import_filename"], newline="") as csvfile:
csv_reader = csvfile.readlines()
index = 0
card_rows = (sum(1 for row in csv_reader)) - 1
bar = progressbar.ProgressBar(max_value=card_rows)
self.logger.debug(f"-> import_from_csv: {card_rows} cards in csv file.")
csvfile.seek(0)
for row in csv_reader:
row = row.rstrip()
row_array = row.split(",")
if index > 0:
row_array = [x.strip('"') for x in row_array]
try:
(name, set_name, count, foil, language, *other) = row_array
except Exception as err:
problem_cards.append(row_array)
else:
foil = True if foil.lower() == "foil" else False
if not self.match_card_and_add_stock(
api, name, set_name, count, foil, language, *other
):
problem_cards.append(row_array)
bar.update(index)
index += 1
bar.finish()
if len(problem_cards) > 0:
try:
with open(
"failed_imports.csv", "w", newline="", encoding="utf-8"
) as csvfile:
csv_writer = csv.writer(csvfile)
csv_writer.writerows(problem_cards)
self.logger.debug(
f"import_from_csv:: {len(problem_cards)} failed imports."
)
print(
f"Wrote {len(problem_cards)} failed imports to failed_imports.csv"
)
print("Report failures as an issue in the pymkm GitHub repo, please!")
except Exception as err:
print(err.value)
else:
print("All cards added successfully")
# End of menu item functions ============================================
def get_wantslists_data(self, api, cached=False):
# Check for cached wantslists
local_wantslists_cache = None
PyMkmHelper.read_from_cache(self.config["local_cache_filename"], "wantslists")
local_wantslists_lists_cache = None
PyMkmHelper.read_from_cache(
self.config["local_cache_filename"], "wantslists_lists"
)
if local_wantslists_cache:
if cached or PyMkmHelper.prompt_bool(
f"Cached wantslists ({len(local_wantslists_cache)} items) found, use it? (if not, then it will be cleared)"
):
return local_wantslists_cache, local_wantslists_lists_cache
else:
PyMkmHelper.clear_cache(
self.config["local_cache_filename"], "wantslists"
)
PyMkmHelper.clear_cache(
self.config["local_cache_filename"], "wantslists_lists"
)
self.get_wantslists_data(api)
else: # no local cache
wantslists = []
wantslists_lists = {}
try:
print("Gettings wantslists from Cardmarket...")
wantslists = api.get_wantslists()
wantslists_lists = {
item["idWantslist"]: api.get_wantslist_items(item["idWantslist"])[
"item"
]
for item in wantslists
}
except Exception as err:
print(err)
PyMkmHelper.store_to_cache(
self.config["local_cache_filename"], "wantslists", wantslists
)
PyMkmHelper.store_to_cache(
self.config["local_cache_filename"],
"wantslists_lists",
wantslists_lists,
)
return wantslists, wantslists_lists
def match_card_and_add_stock(
self, api, name, set_name, count, foil, language, *other
):
if all(v != "" for v in [name, set_name, count]):
try:
possible_products = api.find_product(name, idGame="1") # ["product"]
except CardmarketError as err:
self.logger.error(err.mkm_msg())
print(err.mkm_msg())
except Exception as err:
return False
else:
if len(possible_products) == 0:
# no viable match
return False
else:
product_match = [
x
for x in possible_products
if x["categoryName"] == "Magic Single"
and self.card_equals(
x["enName"], x["expansionName"], name, set_name
)
]
if len(product_match) == 1:
language_id = (
1 if language == "" else api.languages.index(language) + 1
)
product = api.get_product(product_match[0]["idProduct"])
price = self.get_price_for_product(
product,
product_match[0]["rarity"],
self.config["csv_import_condition"],
foil,
False,
language_id=language_id,
api=self.api,
)
card = {
"idProduct": product_match[0]["idProduct"],
"idLanguage": language_id,
"count": count,
"price": str(price),
"condition": self.config["csv_import_condition"],
"isFoil": ("true" if foil else "false"),
}
api.add_stock([card])
return True
else:
# no single matching card
return False
else:
# incomplete data from card scanner
return False
def card_equals(self, db_cardname, db_setname, local_cardname, local_setname):
# TODO: add some sort of string distance like Levenshtein
filtered_db_cardname = db_cardname.replace(",", "")
filtered_db_cardname = filtered_db_cardname.replace("Æ", "Ae")
if db_setname != local_setname:
return False
else:
# filter for flip card / split card names
if filtered_db_cardname == local_cardname or (
"/" in filtered_db_cardname
and filtered_db_cardname.startswith(local_cardname)
):
return True
else:
return False
def select_from_list_of_wantslists(self, wantslists):
index = 1
for wantlist in wantslists:
print(f"{index}: {wantlist['name']} ({wantlist['game']['abbreviation']})")
index += 1
choice = int(input("Choose wantslist: "))
return wantslists[choice - 1]
def select_from_list_of_products(self, products):
index = 1
for product in products:
print(
"{}: {} [{}] {}".format(
index,
product["enName"],
product["expansionName"],
product["rarity"],
)
)
index += 1
choice = ""
while not isinstance(choice, int) or choice > len(products):
try:
choice = int(input("Choose card: "))
except ValueError as err:
print("Not a number.")
return products[choice - 1]
def select_from_list_of_articles(self, articles):
index = 1
for article in articles:
product = article["product"]
print(
f'{index}: {product["enName"]}[{product["expansion"]}], foil: {article["isFoil"]}, comment: {article["comments"]}'
)
index += 1
choice = int(input("Choose card: "))
return articles[choice - 1]
def show_competition_for_product(self, product_id, product_name, is_foil, api):
print("Selected product: {}".format(product_name))
table_data_local, table_data = self.get_competition(api, product_id, is_foil)
if table_data_local:
self.print_product_top_list("Local competition:", table_data_local, 4, 20)
if table_data:
self.print_product_top_list("Top 20 cheapest:", table_data, 4, 20)
else:
print("No prices found.")
def get_competition(self, api, product_id, is_foil):
# TODO: Add support for playsets
# TODO: Add support for card condition
self.account = api.get_account()["account"]
country_code = self.account["country"]
config = self.config
is_altered = config["search_filters"]["isAltered"]
is_signed = config["search_filters"]["isSigned"]
min_condition = config["search_filters"]["minCondition"]
user_type = config["search_filters"]["userType"]
id_language = config["search_filters"]["idLanguage"]
articles = api.get_articles(
product_id,
**{
"isFoil": str(is_foil).lower(),
"isAltered": is_altered,
"isSigned": is_signed,
"minCondition": min_condition,
"country": country_code,
"userType": user_type,
"idLanguage": id_language,
},
)
table_data = []
table_data_local = []
for article in articles:
username = article["seller"]["username"]
if article["seller"]["username"] == self.account["username"]:
username = "-> " + username
item = [
username,
article["seller"]["address"]["country"],
article["condition"],
article["language"]["languageName"],
article["count"],
article["price"],
]
if article["seller"]["address"]["country"] == country_code:
table_data_local.append(item)
table_data.append(item)
return table_data_local, table_data
def print_product_top_list(self, title_string, table_data, sort_column, rows):
print(70 * "-")
print("{} \n".format(title_string))
print(
tb.tabulate(
sorted(table_data, key=lambda x: x[sort_column], reverse=False)[:rows],
headers=[
"Username",
"Country",
"Condition",
"Language",
"Count",
"Price",
],
tablefmt="simple",
)
)
print(70 * "-")
print(
"Total average price: {}, Total median price: {}, Total # of articles: {}\n".format(
str(PyMkmHelper.calculate_average(table_data, 4, 5)),
str(PyMkmHelper.calculate_median(table_data, 4, 5)),
str(len(table_data)),
)
)
def calculate_new_prices_for_stock(
self,
stock_list,
undercut_local_market,
partial_stock_update_size,
already_checked_articles,
api,
):
filtered_stock_list = self.__filter(stock_list)
sticky_count = len(stock_list) - len(filtered_stock_list)
# articles_in_shoppingcarts = api.get_articles_in_shoppingcarts()
if already_checked_articles:
filtered_stock_list = [
x
for x in filtered_stock_list
if x["idArticle"] not in already_checked_articles
]
if len(filtered_stock_list) == 0:
PyMkmHelper.clear_cache(
self.config["local_cache_filename"], "partial_updated"
)
print(
f"Entire stock updated in partial updates. Partial update data cleared."
)
return [], []
if partial_stock_update_size:
filtered_stock_list = filtered_stock_list[:partial_stock_update_size]
result_json = []
checked_articles = []
total_price = 0
index = 0
bar = progressbar.ProgressBar(max_value=len(filtered_stock_list))
bar.update(index)
products_to_get = [x["idProduct"] for x in filtered_stock_list]
product_list = api.get_items_async("products", products_to_get)
product_list = [x for x in product_list if x]
for article in filtered_stock_list:
try:
product = next(
x
for x in product_list
if x["product"]["idProduct"] == article["idProduct"]
)
except StopIteration:
# Stock item not found in update batch, continuing
self.logger.error(
f"aid {article['idArticle']} pid {article['idProduct']} - {article['product']['enName']} {article['product']['expansion']} failed to find a product"
)
continue
checked_articles.append(article.get("idArticle"))
updated_article = self.update_price_for_article(
article, product, undercut_local_market, api=self.api
)
if updated_article:
result_json.append(updated_article)
total_price += updated_article.get("price")
else:
total_price += article.get("price")
index += 1
bar.update(index)
bar.finish()
print("Value in this update: {}".format(str(round(total_price, 2))))
if len(stock_list) != len(filtered_stock_list):
print(f"Note: {sticky_count} items filtered out because of sticky prices.")
return result_json, checked_articles
def update_price_for_article(
self, article, product, undercut_local_market=False, api=None
):
new_price = self.get_price_for_product(
product,
article["product"].get("rarity"),
article.get("condition"),
article.get("isFoil", False),
article.get("isPlayset", False),
language_id=article["language"]["idLanguage"],
undercut_local_market=undercut_local_market,
api=self.api,
)
if new_price:
price_diff = new_price - article["price"]
if price_diff != 0:
return {
"name": article["product"]["enName"],
"isFoil": article.get("isFoil", False),
"isPlayset": article.get("isPlayset", False),
"language": article["language"]["languageName"],
"condition": article["condition"],
"old_price": article["price"],
"price": new_price,
"price_diff": price_diff,
"idArticle": article["idArticle"],
"count": article["count"],
}
def get_rounding_limit_for_rarity(self, rarity, product_id):
rounding_limit = float(self.config["price_limit_by_rarity"]["default"])
try:
rounding_limit = float(self.config["price_limit_by_rarity"][rarity.lower()])
except KeyError as err:
print(
f"ERROR: Unknown rarity '{rarity}' (pid: {product_id}). Using default rounding."
)
return rounding_limit
def get_discount_for_condition(self, condition):
try:
discount = float(self.config["discount_by_condition"][condition])
except KeyError as err:
print(f"ERROR: Unknown condition '{condition}'.")
raise err
else:
return discount
def get_price_for_product(
self,
product,
rarity,
condition,
is_foil,
is_playset,
language_id=1,
undercut_local_market=False,
api=None,
):
rounding_limit = self.get_rounding_limit_for_rarity(
rarity, product["product"]["idProduct"]
)
if not is_foil:
trend_price = product["product"]["priceGuide"]["TREND"]
else:
trend_price = product["product"]["priceGuide"]["TRENDFOIL"]
# Set competitive price for region
if undercut_local_market:
table_data_local, table_data = self.get_competition(
api, product["product"]["idProduct"], is_foil
)
if len(table_data_local) > 0:
# Undercut if there is local competition
lowest_in_country = PyMkmHelper.get_lowest_price_from_table(
table_data_local, 4
)
new_price = max(
rounding_limit,
min(trend_price, lowest_in_country - rounding_limit),
)
else:
# No competition in our country, set price a bit higher.
new_price = trend_price * 1.2
else: # don't try to undercut local market
new_price = trend_price
if new_price is None:
raise ValueError("No price found!")
else:
if is_playset:
new_price = 4 * new_price
old_price = new_price
# Apply condition discount
if condition:
new_price = new_price * self.get_discount_for_condition(condition)
# Round
new_price = PyMkmHelper.round_up_to_multiple_of_lower_limit(
rounding_limit, new_price
)
return new_price
def display_price_changes_table(self, changes_json):
num_items = self.config["show_num_best_worst_items"]
print("\nBest diffs:\n")
sorted_best = sorted(changes_json, key=lambda x: x["price_diff"], reverse=True)[
:num_items
]
self.draw_price_changes_table(i for i in sorted_best if i["price_diff"] > 0)
print("\nWorst diffs:\n")
sorted_worst = sorted(changes_json, key=lambda x: x["price_diff"])[:num_items]
self.draw_price_changes_table(i for i in sorted_worst if i["price_diff"] < 0)
print(
"\nTotal price difference: {}.".format( # TODO: fix bug where summary is wrong
str(
round(
sum(item["price_diff"] * item["count"] for item in sorted_best),
2,
)
)
)
)
def draw_price_changes_table(self, sorted_best):
print(
tb.tabulate(
[
[
item["count"],
item["name"],
"\u2713" if item["isFoil"] else "",
"\u2713" if item["isPlayset"] else "",
item["condition"],
item["language"],
item["old_price"],
item["price"],
item["price_diff"],
]
for item in sorted_best
],
headers=[
"Count",
"Name",
"Foil",
"Playset",
"Condition",
"Language",
"Old price",
"New price",
"Diff",
],
tablefmt="simple",
)
)
def get_stock_as_array(self, api, cli_called=False, cached=None):
# Check for cached stock
local_stock_cache = None
local_stock_cache = PyMkmHelper.read_from_cache(
self.config["local_cache_filename"], "stock"
)
if local_stock_cache:
if not cli_called:
if PyMkmHelper.prompt_bool(
f"Cached stock ({len(local_stock_cache)} items) found, use it? Note that prices may be outdated."
):
return local_stock_cache
else:
if cached:
return local_stock_cache
PyMkmHelper.clear_cache(self.config["local_cache_filename"], "stock")
print(
"Getting your stock from Cardmarket (the API can be slow for large stock)..."
)
try:
d = api.get_stock()
except CardmarketError as err:
self.logger.error(err.mkm_msg())
print(err.mkm_msg())
sys.exit(0)
# except Exception as err:
# msg = f"No response from API. Error: {err}"
# print(msg)
# self.logger.error(msg)
# sys.exit(0)
else:
keys = [
"idArticle",
"idProduct",
"product",
"count",
"comments",
"price",
"condition",
"isFoil",
"isPlayset",
"isSigned",
"language",
]
stock_list = [
{x: y for x, y in article.items() if x in keys} for article in d
]
print("Stock fetched.")
PyMkmHelper.store_to_cache(
self.config["local_cache_filename"], "stock", stock_list
)
return stock_list
| 38.319511
| 168
| 0.482064
| 5,261
| 56,368
| 4.942026
| 0.105303
| 0.018077
| 0.005077
| 0.010769
| 0.327077
| 0.239846
| 0.199577
| 0.163731
| 0.127269
| 0.098923
| 0
| 0.004573
| 0.425809
| 56,368
| 1,470
| 169
| 38.345578
| 0.798678
| 0.037539
| 0
| 0.287175
| 0
| 0.003934
| 0.154995
| 0.018795
| 0
| 0
| 0
| 0.00068
| 0
| 1
| 0.029111
| false
| 0.00236
| 0.02203
| 0
| 0.076318
| 0.063729
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8829aec3d5b9877236b2115916c5ca2a14ab73b
| 333
|
py
|
Python
|
Datasets/Terrain/us_ned_physio_diversity.py
|
monocilindro/qgis-earthengine-examples
|
82aea8926d34ed3f4ad4a4a345ddbd225819d28f
|
[
"MIT"
] | 646
|
2019-12-03T06:09:03.000Z
|
2022-03-28T03:37:08.000Z
|
Datasets/Terrain/us_ned_physio_diversity.py
|
csaybar/qgis-earthengine-examples
|
ba8942683834d2847ff3246bdd1859b36e50fe44
|
[
"MIT"
] | 10
|
2019-12-30T03:42:44.000Z
|
2021-05-22T07:34:07.000Z
|
Datasets/Terrain/us_ned_physio_diversity.py
|
csaybar/qgis-earthengine-examples
|
ba8942683834d2847ff3246bdd1859b36e50fe44
|
[
"MIT"
] | 219
|
2019-12-06T02:20:53.000Z
|
2022-03-30T15:14:27.000Z
|
import ee
from ee_plugin import Map
dataset = ee.Image('CSP/ERGo/1_0/US/physioDiversity')
physiographicDiversity = dataset.select('b1')
physiographicDiversityVis = {
'min': 0.0,
'max': 1.0,
}
Map.setCenter(-94.625, 39.825, 7)
Map.addLayer(
physiographicDiversity, physiographicDiversityVis,
'Physiographic Diversity')
| 23.785714
| 54
| 0.738739
| 40
| 333
| 6.1
| 0.7
| 0.016393
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062069
| 0.129129
| 333
| 13
| 55
| 25.615385
| 0.77931
| 0
| 0
| 0
| 0
| 0
| 0.186186
| 0.093093
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8845f1c14219b145ec8b7fa1bba57f5b2418dfb
| 497
|
py
|
Python
|
bin/base64util.py
|
SnowleopardXI/stash
|
a14f016e5b568095af8d1e78addedc562e3cde70
|
[
"MIT"
] | null | null | null |
bin/base64util.py
|
SnowleopardXI/stash
|
a14f016e5b568095af8d1e78addedc562e3cde70
|
[
"MIT"
] | null | null | null |
bin/base64util.py
|
SnowleopardXI/stash
|
a14f016e5b568095af8d1e78addedc562e3cde70
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import base64
print('Choose your choice:')
n='''
1:Encode string to base64
2:Decode base64 to string
'''
c=int(eval(input(n))) #定义菜单变量
if c == 1: #进入菜单1的判断
print('Type string to be encoded:')
inp=input()
out = str(base64.encodebytes(inp.encode("utf-8")), "utf-8")
print(out) # 去掉编码结果前的 b
if c == 2:
print('Type string to be decoded:')
inp2=bytes(input(),('utf-8'))
dec = base64.decodebytes(inp2)
print(dec.decode())
| 24.85
| 63
| 0.593561
| 73
| 497
| 4.041096
| 0.506849
| 0.054237
| 0.101695
| 0.115254
| 0.128814
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054545
| 0.225352
| 497
| 19
| 64
| 26.157895
| 0.711688
| 0.092555
| 0
| 0
| 0
| 0
| 0.365471
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0.294118
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c88551ac723dd08106aa9434592b74d5d60bf757
| 2,614
|
py
|
Python
|
linefinder/job_scripts/linefinder_sightlines.py
|
zhafen/linefinder
|
0f4f36a83246f1b833d0c281e635d86be3d1eb95
|
[
"MIT"
] | null | null | null |
linefinder/job_scripts/linefinder_sightlines.py
|
zhafen/linefinder
|
0f4f36a83246f1b833d0c281e635d86be3d1eb95
|
[
"MIT"
] | 12
|
2018-08-26T14:10:18.000Z
|
2021-04-15T21:48:58.000Z
|
linefinder/job_scripts/linefinder_sightlines.py
|
zhafen/linefinder
|
0f4f36a83246f1b833d0c281e635d86be3d1eb95
|
[
"MIT"
] | 1
|
2021-05-19T16:45:21.000Z
|
2021-05-19T16:45:21.000Z
|
import linefinder.linefinder as linefinder
import linefinder.config as linefinder_config
import linefinder.utils.file_management as file_management
########################################################################
sim_name = 'm12i'
'''The simulation to run tracking on.'''
tag = '{}_sightline'.format( sim_name )
'''Identifying tag used as part of the filenames.
E.g. the IDs file will have the format `ids_{}.hdf5.format( tag )`.
'''
# Tracking Parameters
tracker_kwargs = {
# What particle types to track. Typically just stars and gas.
'p_types': [ 0, 4,],
# What snapshots to compile the particle tracks for.
'snum_start': 1,
'snum_end': 600,
'snum_step': 1,
}
file_manager = file_management.FileManager()
sampler_kwargs = {
'ignore_duplicates': True,
'p_types': [ 0, 4 ],
'snapshot_kwargs': {
'sdir': file_manager.get_sim_dir( sim_name ),
'halo_data_dir': file_manager.get_halo_dir( sim_name ),
'main_halo_id': linefinder_config.MAIN_MT_HALO_ID[sim_name],
'ahf_index': 600,
'length_scale_used': 'R_vir',
}
}
visualization_kwargs = {
'install_firefly': True,
'export_to_firefly_kwargs': {
'firefly_dir': '/work/03057/zhafen/firefly_repos/sightline',
'classifications': [
'is_in_CGM',
'is_CGM_IGM_accretion',
'is_CGM_wind',
'is_CGM_satellite_wind',
'is_CGM_satellite_ISM',
],
'classification_ui_labels': [ 'All', 'IGMAcc', 'Wind', 'SatWind', 'Sat' ],
'tracked_properties': [
'logT',
'logZ',
'logDen',
'vr_div_v_cool',
'logvr_div_v_cool_offset',
],
'tracked_filter_flags': [ True, ] * 5,
'tracked_colormap_flags': [ True, ] * 5,
'snum': 465,
},
}
# This is the actual function that runs linefinder.
# In general you don't need to touch this function but if you want to,
# for example, turn off one of the steps because you're rerunning and you
# already did that step, you can do so below.
linefinder.run_linefinder_jug(
sim_name = sim_name,
tag = tag,
galdef = '_galdefv3',
# The galdef is a set of parameters used for the galaxy linking and
# classification steps. Don't touch this unless you know what you're doing.
tracker_kwargs = tracker_kwargs,
sampler_kwargs = sampler_kwargs,
visualization_kwargs = visualization_kwargs,
run_id_selecting = False,
run_id_sampling = False,
run_tracking = False,
run_galaxy_linking = False,
run_classifying = False,
)
| 30.045977
| 82
| 0.630451
| 326
| 2,614
| 4.773006
| 0.484663
| 0.031491
| 0.008997
| 0.010283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013046
| 0.237567
| 2,614
| 86
| 83
| 30.395349
| 0.767687
| 0.193191
| 0
| 0.033333
| 0
| 0
| 0.271704
| 0.083601
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8864bea2e2f25d967c38986aef9fb5517d5143b
| 285
|
py
|
Python
|
SwordToOffer/SwordToOffer-PythonSolution/47_Sum_Solution.py
|
dingchaofan/AlgorithmSolution
|
46198e3f0dbda867e7b75f0d0e52be5f0181238a
|
[
"MIT"
] | 1
|
2020-06-23T02:18:39.000Z
|
2020-06-23T02:18:39.000Z
|
SwordToOffer/SwordToOffer-PythonSolution/47_Sum_Solution.py
|
dingchaofan/AlgorithmSolution
|
46198e3f0dbda867e7b75f0d0e52be5f0181238a
|
[
"MIT"
] | null | null | null |
SwordToOffer/SwordToOffer-PythonSolution/47_Sum_Solution.py
|
dingchaofan/AlgorithmSolution
|
46198e3f0dbda867e7b75f0d0e52be5f0181238a
|
[
"MIT"
] | 1
|
2021-01-11T12:07:03.000Z
|
2021-01-11T12:07:03.000Z
|
# 47. 求1+2+3+...+n
# 求1+2+3+...+n,要求不能使用乘除法、for、while、if、else、switch、case等关键字及条件判断语句(A?B:C)。
# -*- coding:utf-8 -*-
class Solution:
def Sum_Solution(self, n):
# write code here
res = n
if(res):
res += self.Sum_Solution(n-1)
return res
| 21.923077
| 73
| 0.540351
| 44
| 285
| 3.454545
| 0.659091
| 0.039474
| 0.052632
| 0.065789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048077
| 0.270175
| 285
| 13
| 74
| 21.923077
| 0.682692
| 0.438596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c889096998408750f88d5b4c179ee06539614ee4
| 48,562
|
py
|
Python
|
hawc_hal/HAL.py
|
torresramiro350/hawc_hal
|
048536df22bdfa3ace2925e60d802beb76775849
|
[
"BSD-3-Clause"
] | null | null | null |
hawc_hal/HAL.py
|
torresramiro350/hawc_hal
|
048536df22bdfa3ace2925e60d802beb76775849
|
[
"BSD-3-Clause"
] | null | null | null |
hawc_hal/HAL.py
|
torresramiro350/hawc_hal
|
048536df22bdfa3ace2925e60d802beb76775849
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division
from builtins import str
from builtins import range
from astropy.utils.misc import isiterable
from past.utils import old_div
import copy
import collections
import numpy as np
import healpy as hp
import astropy.units as u
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.stats import poisson
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import convolve_fft as convolve
from astropy.coordinates import Angle
from threeML.plugin_prototype import PluginPrototype
from threeML.utils.statistics.gammaln import logfactorial
from threeML.parallel import parallel_client
from threeML.io.logging import setup_logger
log = setup_logger(__name__)
log.propagate = False
from tqdm.auto import tqdm
from astromodels import Parameter
from hawc_hal.maptree import map_tree_factory
from hawc_hal.maptree.map_tree import MapTree
from hawc_hal.maptree.data_analysis_bin import DataAnalysisBin
from hawc_hal.response import hawc_response_factory
from hawc_hal.convolved_source import ConvolvedPointSource, \
ConvolvedExtendedSource3D, ConvolvedExtendedSource2D, ConvolvedSourcesContainer
from hawc_hal.healpix_handling import FlatSkyToHealpixTransform
from hawc_hal.healpix_handling import SparseHealpix
from hawc_hal.healpix_handling import get_gnomonic_projection
from hawc_hal.psf_fast import PSFConvolutor
from hawc_hal.log_likelihood import log_likelihood
from hawc_hal.util import ra_to_longitude
class HAL(PluginPrototype):
"""
The HAWC Accelerated Likelihood plugin for 3ML.
:param name: name for the plugin
:param maptree: Map Tree (either ROOT or hdf5 format)
:param response: Response of HAWC (either ROOT or hd5 format)
:param roi: a ROI instance describing the Region Of Interest
:param flat_sky_pixels_size: size of the pixel for the flat sky projection (Hammer Aitoff)
"""
def __init__(self, name, maptree, response_file, roi, flat_sky_pixels_size=0.17):
# Store ROI
self._roi = roi
# Set up the flat-sky projection
self.flat_sky_pixels_size=flat_sky_pixels_size
self._flat_sky_projection = self._roi.get_flat_sky_projection(self.flat_sky_pixels_size)
# Read map tree (data)
self._maptree = map_tree_factory(maptree, roi=self._roi)
# Read detector response_file
self._response = hawc_response_factory(response_file)
# Use a renormalization of the background as nuisance parameter
# NOTE: it is fixed to 1.0 unless the user explicitly sets it free (experimental)
self._nuisance_parameters = collections.OrderedDict()
#self._nuisance_parameters['%s_bkg_renorm' % name] = Parameter('%s_bkg_renorm' % name, 1.0,
self._nuisance_parameters[f'{name}_bkg_renorm'] = Parameter(f'{name}_bkg_renorm', 1.0,
min_value=0.5, max_value=1.5,
delta=0.01,
desc="Renormalization for background map",
free=False,
is_normalization=False)
# Instance parent class
super(HAL, self).__init__(name, self._nuisance_parameters)
self._likelihood_model = None
# These lists will contain the maps for the point sources
self._convolved_point_sources = ConvolvedSourcesContainer()
# and this one for extended sources
self._convolved_ext_sources = ConvolvedSourcesContainer()
# All energy/nHit bins are loaded in memory
self._all_planes = list(self._maptree.analysis_bins_labels)
# The active planes list always contains the list of *indexes* of the active planes
self._active_planes = None
# Set up the transformations from the flat-sky projection to Healpix, as well as the list of active pixels
# (one for each energy/nHit bin). We make a separate transformation because different energy bins might have
# different nsides
self._active_pixels = collections.OrderedDict()
self._flat_sky_to_healpix_transform = collections.OrderedDict()
for bin_id in self._maptree:
this_maptree = self._maptree[bin_id]
this_nside = this_maptree.nside
this_active_pixels = roi.active_pixels(this_nside)
this_flat_sky_to_hpx_transform = FlatSkyToHealpixTransform(self._flat_sky_projection.wcs,
'icrs',
this_nside,
this_active_pixels,
(self._flat_sky_projection.npix_width,
self._flat_sky_projection.npix_height),
order='bilinear')
self._active_pixels[bin_id] = this_active_pixels
self._flat_sky_to_healpix_transform[bin_id] = this_flat_sky_to_hpx_transform
# This will contain a list of PSF convolutors for extended sources, if there is any in the model
self._psf_convolutors = None
# Pre-compute the log-factorial factor in the likelihood, so we do not keep to computing it over and over
# again.
self._log_factorials = collections.OrderedDict()
# We also apply a bias so that the numerical value of the log-likelihood stays small. This helps when
# fitting with algorithms like MINUIT because the convergence criterium involves the difference between
# two likelihood values, which would be affected by numerical precision errors if the two values are
# too large
self._saturated_model_like_per_maptree = collections.OrderedDict()
# The actual computation is in a method so we can recall it on clone (see the get_simulated_dataset method)
self._compute_likelihood_biases()
# This will save a clone of self for simulations
self._clone = None
# Integration method for the PSF (see psf_integration_method)
self._psf_integration_method = "exact"
@property
def psf_integration_method(self):
"""
Get or set the method for the integration of the PSF.
* "exact" is more accurate but slow, if the position is free to vary it adds a lot of time to the fit. This is
the default, to be used when the position of point sources are fixed. The computation in that case happens only
once so the impact on the run time is negligible.
* "fast" is less accurate (up to an error of few percent in flux) but a lot faster. This should be used when
the position of the point source is free, because in that case the integration of the PSF happens every time
the position changes, so several times during the fit.
If you have a fit with a free position, use "fast". When the position is found, you can fix it, switch to
"exact" and redo the fit to obtain the most accurate measurement of the flux. For normal sources the difference
will be small, but for very bright sources it might be up to a few percent (most of the time < 1%). If you are
interested in the localization contour there is no need to rerun with "exact".
:param mode: either "exact" or "fast"
:return: None
"""
return self._psf_integration_method
@psf_integration_method.setter
def psf_integration_method(self, mode):
assert mode.lower() in ["exact", "fast"], (
"PSF integration method must be either 'exact' or 'fast'"
)
self._psf_integration_method = mode.lower()
def _setup_psf_convolutors(self):
central_response_bins = self._response.get_response_dec_bin(self._roi.ra_dec_center[1])
self._psf_convolutors = collections.OrderedDict()
for bin_id in central_response_bins:
#Only set up PSF convolutors for active bins.
if bin_id in self._active_planes:
self._psf_convolutors[bin_id] = PSFConvolutor(central_response_bins[bin_id].psf,
self._flat_sky_projection)
def _compute_likelihood_biases(self):
for bin_label in self._maptree:
data_analysis_bin = self._maptree[bin_label]
this_log_factorial = np.sum(logfactorial(data_analysis_bin.observation_map.as_partial().astype(int)))
self._log_factorials[bin_label] = this_log_factorial
# As bias we use the likelihood value for the saturated model
obs = data_analysis_bin.observation_map.as_partial()
bkg = data_analysis_bin.background_map.as_partial()
sat_model = np.clip(obs - bkg, 1e-50, None).astype(np.float64)
self._saturated_model_like_per_maptree[bin_label] = log_likelihood(obs, bkg, sat_model) - this_log_factorial
def get_saturated_model_likelihood(self):
"""
Returns the likelihood for the saturated model (i.e. a model exactly equal to observation - background).
:return:
"""
return sum(self._saturated_model_like_per_maptree.values())
def set_active_measurements(self, bin_id_min=None, bin_id_max=None, bin_list=None):
"""
Set the active analysis bins to use during the analysis. It can be used in two ways:
- Specifying a range: if the response and the maptree allows it, you can specify a minimum id and a maximum id
number. This only works if the analysis bins are numerical, like in the normal fHit analysis. For example:
> set_active_measurement(bin_id_min=1, bin_id_max=9)
- Specifying a list of bins as strings. This is more powerful, as allows to select any bins, even
non-contiguous bins. For example:
> set_active_measurement(bin_list=[list])
:param bin_id_min: minimum bin (only works for fHit analysis. For the others, use bin_list)
:param bin_id_max: maximum bin (only works for fHit analysis. For the others, use bin_list)
:param bin_list: a list of analysis bins to use
:return: None
"""
# Check for legal input
if bin_id_min is not None:
assert bin_id_max is not None, (
"If you provide a minimum bin, you also need to provide a maximum bin."
)
# Make sure they are integers
bin_id_min = int(bin_id_min)
bin_id_max = int(bin_id_max)
self._active_planes = []
for this_bin in range(bin_id_min, bin_id_max + 1):
this_bin = str(this_bin)
if this_bin not in self._all_planes:
raise ValueError(f"Bin {this_bin} is not contained in this maptree.")
self._active_planes.append(this_bin)
else:
assert bin_id_max is None, (
"If you provie a maximum bin, you also need to provide a minimum bin."
)
assert bin_list is not None
self._active_planes = []
for this_bin in bin_list:
if not this_bin in self._all_planes:
raise ValueError(f"Bin {this_bin} is not contained in this maptree.")
self._active_planes.append(this_bin)
if self._likelihood_model:
self.set_model( self._likelihood_model )
def display(self, verbose=False):
"""
Prints summary of the current object content.
"""
log.info("Region of Interest: ")
log.info("-------------------")
self._roi.display()
log.info("")
log.info("Flat sky projection: ")
log.info("--------------------")
log.info(
f"Width x height {self._flat_sky_projection.npix_width} x {self._flat_sky_projection.npix_height} px"
)
#log.info("Width x height: %s x %s px" % (self._flat_sky_projection.npix_width,
# self._flat_sky_projection.npix_height))
log.info(f"Pixel sizes: {self._flat_sky_projection.pixel_size} deg")
#log.info("Pixel sizes: %s deg" % self._flat_sky_projection.pixel_size)
log.info("")
log.info("Response: ")
log.info("---------")
self._response.display(verbose)
log.info("")
log.info("Map Tree: ")
log.info("----------")
self._maptree.display()
log.info("")
#log.info("Active energy/nHit planes ({}):".format(len(self._active_planes)))
log.info(f"Active energy/nHit planes ({len(self._active_planes)}):")
log.info("-------------------------------")
log.info(self._active_planes)
def set_model(self, likelihood_model_instance):
"""
Set the model to be used in the joint minimization. Must be a LikelihoodModel instance.
"""
self._likelihood_model = likelihood_model_instance
# Reset
self._convolved_point_sources.reset()
self._convolved_ext_sources.reset()
# For each point source in the model, build the convolution class
for source in list(self._likelihood_model.point_sources.values()):
this_convolved_point_source = ConvolvedPointSource(source, self._response, self._flat_sky_projection)
self._convolved_point_sources.append(this_convolved_point_source)
# Samewise for extended sources
ext_sources = list(self._likelihood_model.extended_sources.values())
# NOTE: ext_sources evaluate to False if empty
if ext_sources:
# We will need to convolve
self._setup_psf_convolutors()
for source in ext_sources:
if source.spatial_shape.n_dim == 2:
this_convolved_ext_source = ConvolvedExtendedSource2D(source,
self._response,
self._flat_sky_projection)
else:
this_convolved_ext_source = ConvolvedExtendedSource3D(source,
self._response,
self._flat_sky_projection)
self._convolved_ext_sources.append(this_convolved_ext_source)
def get_excess_background(self, ra, dec, radius):
"""
Calculates area, excess (data - background) and model counts of source at different
distance from the source.
:param: radius: radial distance away from the center (degrees).
:returns: tuple of numpy.ndarrays for areas, excess, model, and background
this information is used in the get_radial_profile function.
"""
radius_radians = np.deg2rad(radius)
total_counts = np.zeros(len(self._active_planes), dtype=float)
background = np.zeros_like(total_counts)
observation = np.zeros_like(total_counts)
model = np.zeros_like(total_counts)
signal = np.zeros_like(total_counts)
area = np.zeros_like(total_counts)
n_point_sources = self._likelihood_model.get_number_of_point_sources()
n_ext_sources = self._likelihood_model.get_number_of_extended_sources()
longitude = ra_to_longitude(ra)
latitude = dec
center = hp.ang2vec(longitude, latitude, lonlat=True)
for i, energy_id in enumerate(self._active_planes):
data_analysis_bin = self._maptree[energy_id]
this_nside = data_analysis_bin.observation_map.nside
pixels_at_radius = hp.query_disc(
this_nside,
center,
radius_radians,
inclusive=False,
)
# calculate the areas per bin by the product
# of pixel area by the number of pixels at each radial bin
area[i] = hp.nside2pixarea(this_nside)*pixels_at_radius.shape[0]
# NOTE: select active pixels according to each radial bin
bin_active_pixel_indexes = np.searchsorted(self._active_pixels[energy_id], pixels_at_radius)
# obtain the excess, background, and expected excess at each radial bin
data = data_analysis_bin.observation_map.as_partial()
bkg = data_analysis_bin.background_map.as_partial()
mdl = self._get_model_map(energy_id, n_point_sources, n_ext_sources).as_partial()
bin_data = np.array([data[i] for i in bin_active_pixel_indexes])
bin_bkg = np.array([bkg[i] for i in bin_active_pixel_indexes])
bin_model = np.array([mdl[i] for i in bin_active_pixel_indexes])
this_data_tot = np.sum(bin_data)
this_bkg_tot = np.sum(bin_bkg)
this_model_tot = np.sum(bin_model)
background[i] = this_bkg_tot
observation[i] = this_data_tot
model[i] = this_model_tot
signal[i] = this_data_tot - this_bkg_tot
return area, signal, model, background
def get_radial_profile(
self,
ra,
dec,
active_planes=None,
max_radius=3.0,
n_radial_bins=30,
model_to_subtract=None,
subtract_model_from_model=False,
):
"""
Calculates radial profiles of data - background & model.
:param ra: R.A. of origin for radial profile.
:param dec: Declination of origin of radial profile.
:param active_planes: List of analysis over which to average; if None, use HAWC default (bins 1-9).
:param: max_radius: Radius up to which the radial profile is evaluated;
for the disk to calculate the gamma/hadron weights (Default: 3.0).
:param n_radial_bins: Number of bins for the radial profile (Default: 30).
:param model_to_subtract: Another model that is to be subtracted from the data excess (Default: None).
:param subtract_model_from_model: If True and model_to_subtract is not None,
subtract model from model too (Defalt: False).
:return: np.arrays with the radii, model profile, data profile, data uncertainty, and
list of analysis bins used.
"""
# default is to use all active bins
if active_planes is None:
active_planes = self._active_planes
# Make sure we use bins with data
good_planes = [plane_id in active_planes for plane_id in self._active_planes]
plane_ids = set(active_planes) & set(self._active_planes)
delta_r = 1.0*max_radius/n_radial_bins
radii = np.array([delta_r*(r + 0.5) for r in range(0, n_radial_bins)])
# Get area of all pixels in a given circle
# The area of each ring is then given by the difference between two
# subsequent circe areas.
area = np.array(
[self.get_excess_background(ra, dec, r + 0.5*delta_r)[0] for r in radii ]
)
temp = area[1:] - area[:-1]
area[1:] = temp
# model
# convert 'top hat' excess into 'ring' excesses.
model = np.array(
[self.get_excess_background(ra, dec, r + 0.5*delta_r)[2] for r in radii]
)
temp = model[1:] - model[:-1]
model[1:] = temp
# signals
signal = np.array(
[self.get_excess_background(ra, dec, r + 0.5*delta_r)[1] for r in radii]
)
temp = signal[1:] - signal[:-1]
signal[1:] = temp
# backgrounds
bkg = np.array(
[self.get_excess_background(ra, dec, r + 0.5*delta_r)[3] for r in radii]
)
temp = bkg[1:] - bkg[:-1]
bkg[1:] = temp
counts = signal + bkg
if model_to_subtract is not None:
this_model = copy.deepcopy(self._likelihood_model)
self.set_model(model_to_subtract)
model_subtract = np.array(
[self.get_excess_background(ra, dec, r + 0.5*delta_r)[2] for r in radii]
)
temp = model_subtract[1:] - model_subtract[:-1]
model_subtract[1:] = temp
signal -= model_subtract
if subtract_model_from_model:
model -= model_subtract
self.set_model(this_model)
# NOTE: weights are calculated as expected number of gamma-rays/number of background counts.
# here, use max_radius to evaluate the number of gamma-rays/bkg counts.
# The weights do not depend on the radius, but fill a matrix anyway so
# there's no confusion when multiplying them to the data later.
# Weight is normalized (sum of weights over the bins = 1).
total_excess = np.array(
self.get_excess_background(ra, dec, max_radius)[1]
)[good_planes]
total_model = np.array(
self.get_excess_background(ra, dec, max_radius)[2]
)[good_planes]
total_bkg = np.array(
self.get_excess_background(ra, dec, max_radius)[3]
)[good_planes]
w = np.divide(total_model, total_bkg)
weight = np.array([w/np.sum(w) for r in radii])
# restric profiles to the user-specified analysis bins
area = area[:, good_planes]
signal = signal[:, good_planes]
model = model[:, good_planes]
counts = counts[:, good_planes]
bkg = bkg[:, good_planes]
# average over the analysis bins
excess_data = np.average(signal/area, weights=weight, axis=1)
excess_error = np.sqrt(np.sum(counts*weight*weight/(area*area), axis=1))
excess_model = np.average(model/area, weights=weight, axis=1)
return radii, excess_model, excess_data, excess_error, sorted(plane_ids)
def plot_radial_profile(
self,
ra,
dec,
active_planes=None,
max_radius=3.0,
n_radial_bins=30,
model_to_subtract=None,
subtract_model_from_model=False
):
"""
Plots radial profiles of data - background & model.
:param ra: R.A. of origin for radial profile.
:param dec: Declination of origin of radial profile.
:param active_planes: List of analysis bins over which to average;
if None, use HAWC default (bins 1-9).
:param max_radius: Radius up to which the radial profile is evaluated; also
used as the radius for the disk to calculate the gamma/hadron weights. Default: 3.0
:param model_to_subtract: Another model that is to be subtracted from the data excess (Default: None).
:param subtract_model_from_model: If True and model_to_subtract is not None, subtract from model too (Default: False).
:return: plot of data - background vs model radial profiles.
"""
(
radii,
excess_model,
excess_data,
excess_error,
plane_ids,
) = self.get_radial_profile(
ra,
dec,
active_planes,
max_radius,
n_radial_bins,
model_to_subtract,
subtract_model_from_model,
)
#font = {
# "family":"serif",
# "weight":"regular",
# "size":12
#}
#mpl.rc("font", **font)
fig, ax = plt.subplots(figsize=(10,8))
plt.errorbar(
radii,
excess_data,
yerr=excess_error,
capsize=0,
color="black",
label="Excess (data-bkg)",
fmt=".",
)
plt.plot(radii, excess_model, color="red", label="Model")
plt.legend(bbox_to_anchor=(1.0, 1.0), loc="upper right", numpoints=1)
plt.axhline(0, color="deepskyblue", linestyle="--")
x_limits=[0, max_radius]
plt.xlim(x_limits)
plt.ylabel(r"Apparent Radial Excess [sr$^{-1}$]")
plt.xlabel(
f"Distance from source at ({ra:0.2f} $^{{\circ}}$, {dec:0.2f} $^{{\circ}}$)"
)
if len(plane_ids) == 1:
title = f"Radial Profile, bin {plane_ids[0]}"
else:
tmptitle=f"Radial Profile, bins \n{plane_ids}"
width=70
title="\n".join(
tmptitle[i:i+width] for i in range(0, len(tmptitle), width)
)
title=tmptitle
plt.title(title)
ax.grid(True)
try:
plt.tight_layout()
except:
pass
return fig
def display_spectrum(self):
"""
Make a plot of the current spectrum and its residuals (integrated over space)
:return: a matplotlib.Figure
"""
n_point_sources = self._likelihood_model.get_number_of_point_sources()
n_ext_sources = self._likelihood_model.get_number_of_extended_sources()
total_counts = np.zeros(len(self._active_planes), dtype=float)
total_model = np.zeros_like(total_counts)
model_only = np.zeros_like(total_counts)
net_counts = np.zeros_like(total_counts)
yerr_low = np.zeros_like(total_counts)
yerr_high = np.zeros_like(total_counts)
for i, energy_id in enumerate(self._active_planes):
data_analysis_bin = self._maptree[energy_id]
this_model_map_hpx = self._get_expectation(data_analysis_bin, energy_id, n_point_sources, n_ext_sources)
this_model_tot = np.sum(this_model_map_hpx)
this_data_tot = np.sum(data_analysis_bin.observation_map.as_partial())
this_bkg_tot = np.sum(data_analysis_bin.background_map.as_partial())
total_counts[i] = this_data_tot
net_counts[i] = this_data_tot - this_bkg_tot
model_only[i] = this_model_tot
this_wh_model = this_model_tot + this_bkg_tot
total_model[i] = this_wh_model
if this_data_tot >= 50.0:
# Gaussian limit
# Under the null hypothesis the data are distributed as a Gaussian with mu = model
# and sigma = sqrt(model)
# NOTE: since we neglect the background uncertainty, the background is part of the
# model
yerr_low[i] = np.sqrt(this_data_tot)
yerr_high[i] = np.sqrt(this_data_tot)
else:
# Low-counts
# Under the null hypothesis the data are distributed as a Poisson distribution with
# mean = model, plot the 68% confidence interval (quantile=[0.16,1-0.16]).
# NOTE: since we neglect the background uncertainty, the background is part of the
# model
quantile = 0.16
mean = this_wh_model
y_low = poisson.isf(1-quantile, mu=mean)
y_high = poisson.isf(quantile, mu=mean)
yerr_low[i] = mean-y_low
yerr_high[i] = y_high-mean
residuals = old_div((total_counts - total_model), np.sqrt(total_model))
residuals_err = [old_div(yerr_high, np.sqrt(total_model)),
old_div(yerr_low, np.sqrt(total_model))]
yerr = [yerr_high, yerr_low]
return self._plot_spectrum(net_counts, yerr, model_only, residuals, residuals_err)
def _plot_spectrum(self, net_counts, yerr, model_only, residuals, residuals_err):
fig, subs = plt.subplots(2, 1, gridspec_kw={'height_ratios': [2, 1], 'hspace': 0}, figsize=(12,6))
planes = np.array(self._active_planes)
subs[0].errorbar(planes, net_counts, yerr=yerr,
capsize=0,
color='black', label='Net counts', fmt='.')
subs[0].plot(planes, model_only, label='Convolved model')
subs[0].legend(bbox_to_anchor=(1.0, 1.0), loc="upper right",
numpoints=1)
# Residuals
subs[1].axhline(0, linestyle='--')
subs[1].errorbar(
planes, residuals,
yerr=residuals_err,
capsize=0, fmt='.'
)
y_limits = [min(net_counts[net_counts > 0]) / 2., max(net_counts) * 2.]
subs[0].set_yscale("log", nonpositive='clip')
subs[0].set_ylabel("Counts per bin")
subs[0].set_xticks([])
subs[1].set_xlabel("Analysis bin")
subs[1].set_ylabel(r"$\frac{{cts - mod - bkg}}{\sqrt{mod + bkg}}$")
subs[1].set_xticks(planes)
subs[1].set_xticklabels(self._active_planes)
subs[0].set_ylim(y_limits)
return fig
def get_log_like(self):
"""
Return the value of the log-likelihood with the current values for the
parameters
"""
n_point_sources = self._likelihood_model.get_number_of_point_sources()
n_ext_sources = self._likelihood_model.get_number_of_extended_sources()
# Make sure that no source has been added since we filled the cache
assert (n_point_sources == self._convolved_point_sources.n_sources_in_cache and
n_ext_sources == self._convolved_ext_sources.n_sources_in_cache), (
"The number of sources has changed. Please re-assign the model to the plugin."
)
#assert n_point_sources == self._convolved_point_sources.n_sources_in_cache and \
# n_ext_sources == self._convolved_ext_sources.n_sources_in_cache, \
# "The number of sources has changed. Please re-assign the model to the plugin."
# This will hold the total log-likelihood
total_log_like = 0
for bin_id in self._active_planes:
data_analysis_bin = self._maptree[bin_id]
this_model_map_hpx = self._get_expectation(data_analysis_bin, bin_id, n_point_sources, n_ext_sources)
# Now compare with observation
bkg_renorm = list(self._nuisance_parameters.values())[0].value
obs = data_analysis_bin.observation_map.as_partial() # type: np.array
bkg = data_analysis_bin.background_map.as_partial() * bkg_renorm # type: np.array
this_pseudo_log_like = log_likelihood(obs,
bkg,
this_model_map_hpx)
total_log_like += this_pseudo_log_like - self._log_factorials[bin_id] \
- self._saturated_model_like_per_maptree[bin_id]
return total_log_like
def write(self, response_file_name, map_tree_file_name):
"""
Write this dataset to disk in HDF format.
:param response_file_name: filename for the response
:param map_tree_file_name: filename for the map tree
:return: None
"""
self._maptree.write(map_tree_file_name)
self._response.write(response_file_name)
def get_simulated_dataset(self, name):
"""
Return a simulation of this dataset using the current model with current parameters.
:param name: new name for the new plugin instance
:return: a HAL instance
"""
# First get expectation under the current model and store them, if we didn't do it yet
if self._clone is None:
n_point_sources = self._likelihood_model.get_number_of_point_sources()
n_ext_sources = self._likelihood_model.get_number_of_extended_sources()
expectations = collections.OrderedDict()
for bin_id in self._maptree:
data_analysis_bin = self._maptree[bin_id]
if bin_id not in self._active_planes:
expectations[bin_id] = None
else:
expectations[bin_id] = self._get_expectation(data_analysis_bin, bin_id,
n_point_sources, n_ext_sources) + \
data_analysis_bin.background_map.as_partial()
if parallel_client.is_parallel_computation_active():
# Do not clone, as the parallel environment already makes clones
clone = self
else:
clone = copy.deepcopy(self)
self._clone = (clone, expectations)
# Substitute the observation and background for each data analysis bin
for bin_id in self._clone[0]._maptree:
data_analysis_bin = self._clone[0]._maptree[bin_id]
if bin_id not in self._active_planes:
continue
else:
# Active plane. Generate new data
expectation = self._clone[1][bin_id]
new_data = np.random.poisson(expectation, size=(1, expectation.shape[0])).flatten()
# Substitute data
data_analysis_bin.observation_map.set_new_values(new_data)
# Now change name and return
self._clone[0]._name = name
# Adjust the name of the nuisance parameter
old_name = list(self._clone[0]._nuisance_parameters.keys())[0]
new_name = old_name.replace(self.name, name)
self._clone[0]._nuisance_parameters[new_name] = self._clone[0]._nuisance_parameters.pop(old_name)
# Recompute biases
self._clone[0]._compute_likelihood_biases()
return self._clone[0]
def _get_expectation(self, data_analysis_bin, energy_bin_id, n_point_sources, n_ext_sources):
# Compute the expectation from the model
this_model_map = None
for pts_id in range(n_point_sources):
this_conv_src = self._convolved_point_sources[pts_id]
expectation_per_transit = this_conv_src.get_source_map(energy_bin_id,
tag=None,
psf_integration_method=self._psf_integration_method)
expectation_from_this_source = expectation_per_transit * data_analysis_bin.n_transits
if this_model_map is None:
# First addition
this_model_map = expectation_from_this_source
else:
this_model_map += expectation_from_this_source
# Now process extended sources
if n_ext_sources > 0:
this_ext_model_map = None
for ext_id in range(n_ext_sources):
this_conv_src = self._convolved_ext_sources[ext_id]
expectation_per_transit = this_conv_src.get_source_map(energy_bin_id)
if this_ext_model_map is None:
# First addition
this_ext_model_map = expectation_per_transit
else:
this_ext_model_map += expectation_per_transit
# Now convolve with the PSF
if this_model_map is None:
# Only extended sources
this_model_map = (self._psf_convolutors[energy_bin_id].extended_source_image(this_ext_model_map) *
data_analysis_bin.n_transits)
else:
this_model_map += (self._psf_convolutors[energy_bin_id].extended_source_image(this_ext_model_map) *
data_analysis_bin.n_transits)
# Now transform from the flat sky projection to HEALPiX
if this_model_map is not None:
# First divide for the pixel area because we need to interpolate brightness
#this_model_map = old_div(this_model_map, self._flat_sky_projection.project_plane_pixel_area)
this_model_map = this_model_map/self._flat_sky_projection.project_plane_pixel_area
this_model_map_hpx = self._flat_sky_to_healpix_transform[energy_bin_id](this_model_map, fill_value=0.0)
# Now multiply by the pixel area of the new map to go back to flux
this_model_map_hpx *= hp.nside2pixarea(data_analysis_bin.nside, degrees=True)
else:
# No sources
this_model_map_hpx = 0.0
return this_model_map_hpx
@staticmethod
def _represent_healpix_map(fig, hpx_map, longitude, latitude, xsize, resolution, smoothing_kernel_sigma):
proj = get_gnomonic_projection(fig, hpx_map,
rot=(longitude, latitude, 0.0),
xsize=xsize,
reso=resolution)
if smoothing_kernel_sigma is not None:
# Get the sigma in pixels
sigma = old_div(smoothing_kernel_sigma * 60, resolution)
proj = convolve(list(proj),
Gaussian2DKernel(sigma),
nan_treatment='fill',
preserve_nan=True)
return proj
def display_fit(self, smoothing_kernel_sigma=0.1, display_colorbar=False):
"""
Make a figure containing 4 maps for each active analysis bins with respectively model, data,
background and residuals. The model, data and residual maps are smoothed, the background
map is not.
:param smoothing_kernel_sigma: sigma for the Gaussian smoothing kernel, for all but
background maps
:param display_colorbar: whether or not to display the colorbar in the residuals
:return: a matplotlib.Figure
"""
n_point_sources = self._likelihood_model.get_number_of_point_sources()
n_ext_sources = self._likelihood_model.get_number_of_extended_sources()
# This is the resolution (i.e., the size of one pixel) of the image
resolution = 3.0 # arcmin
# The image is going to cover the diameter plus 20% padding
xsize = self._get_optimal_xsize(resolution)
n_active_planes = len(self._active_planes)
n_columns = 4
fig, subs = plt.subplots(n_active_planes, n_columns,
figsize=(2.7 * n_columns, n_active_planes * 2), squeeze=False)
prog_bar = tqdm(total = len(self._active_planes), desc="Smoothing planes")
images = ['None'] * n_columns
for i, plane_id in enumerate(self._active_planes):
data_analysis_bin = self._maptree[plane_id]
# Get the center of the projection for this plane
this_ra, this_dec = self._roi.ra_dec_center
# Make a full healpix map for a second
whole_map = self._get_model_map(plane_id, n_point_sources, n_ext_sources).as_dense()
# Healpix uses longitude between -180 and 180, while R.A. is between 0 and 360. We need to fix that:
longitude = ra_to_longitude(this_ra)
# Declination is already between -90 and 90
latitude = this_dec
# Background and excess maps
bkg_subtracted, _, background_map = self._get_excess(data_analysis_bin, all_maps=True)
# Make all the projections: model, excess, background, residuals
proj_model = self._represent_healpix_map(fig, whole_map,
longitude, latitude,
xsize, resolution, smoothing_kernel_sigma)
# Here we removed the background otherwise nothing is visible
# Get background (which is in a way "part of the model" since the uncertainties are neglected)
proj_data = self._represent_healpix_map(fig, bkg_subtracted,
longitude, latitude,
xsize, resolution, smoothing_kernel_sigma)
# No smoothing for this one (because a goal is to check it is smooth).
proj_bkg = self._represent_healpix_map(fig, background_map,
longitude, latitude,
xsize, resolution, None)
proj_residuals = proj_data - proj_model
# Common color scale range for model and excess maps
vmin = min(np.nanmin(proj_model), np.nanmin(proj_data))
vmax = max(np.nanmax(proj_model), np.nanmax(proj_data))
# Plot model
images[0] = subs[i][0].imshow(proj_model, origin='lower', vmin=vmin, vmax=vmax)
subs[i][0].set_title('model, bin {}'.format(data_analysis_bin.name))
# Plot data map
images[1] = subs[i][1].imshow(proj_data, origin='lower', vmin=vmin, vmax=vmax)
subs[i][1].set_title('excess, bin {}'.format(data_analysis_bin.name))
# Plot background map.
images[2] = subs[i][2].imshow(proj_bkg, origin='lower')
subs[i][2].set_title('background, bin {}'.format(data_analysis_bin.name))
# Now residuals
images[3] = subs[i][3].imshow(proj_residuals, origin='lower')
subs[i][3].set_title('residuals, bin {}'.format(data_analysis_bin.name))
# Remove numbers from axis
for j in range(n_columns):
subs[i][j].axis('off')
if display_colorbar:
for j, image in enumerate(images):
plt.colorbar(image, ax=subs[i][j])
prog_bar.update(1)
fig.set_tight_layout(True)
return fig
def _get_optimal_xsize(self, resolution):
return 2.2 * self._roi.data_radius.to("deg").value / (resolution / 60.0)
def display_stacked_image(self, smoothing_kernel_sigma=0.5):
"""
Display a map with all active analysis bins stacked together.
:param smoothing_kernel_sigma: sigma for the Gaussian smoothing kernel to apply
:return: a matplotlib.Figure instance
"""
# This is the resolution (i.e., the size of one pixel) of the image in arcmin
resolution = 3.0
# The image is going to cover the diameter plus 20% padding
xsize = self._get_optimal_xsize(resolution)
active_planes_bins = [self._maptree[x] for x in self._active_planes]
# Get the center of the projection for this plane
this_ra, this_dec = self._roi.ra_dec_center
# Healpix uses longitude between -180 and 180, while R.A. is between 0 and 360. We need to fix that:
longitude = ra_to_longitude(this_ra)
# Declination is already between -90 and 90
latitude = this_dec
total = None
for i, data_analysis_bin in enumerate(active_planes_bins):
# Plot data
background_map = data_analysis_bin.background_map.as_dense()
this_data = data_analysis_bin.observation_map.as_dense() - background_map
idx = np.isnan(this_data)
# this_data[idx] = hp.UNSEEN
if i == 0:
total = this_data
else:
# Sum only when there is no UNSEEN, so that the UNSEEN pixels will stay UNSEEN
total[~idx] += this_data[~idx]
delta_coord = (self._roi.data_radius.to("deg").value * 2.0) / 15.0
fig, sub = plt.subplots(1, 1)
proj = self._represent_healpix_map(fig, total, longitude, latitude, xsize, resolution, smoothing_kernel_sigma)
cax = sub.imshow(proj, origin='lower')
fig.colorbar(cax)
sub.axis('off')
hp.graticule(delta_coord, delta_coord)
return fig
def inner_fit(self):
"""
This is used for the profile likelihood. Keeping fixed all parameters in the
LikelihoodModel, this method minimize the logLike over the remaining nuisance
parameters, i.e., the parameters belonging only to the model for this
particular detector. If there are no nuisance parameters, simply return the
logLike value.
"""
return self.get_log_like()
def get_number_of_data_points(self):
"""
Return the number of active bins across all active analysis bins
:return: number of active bins
"""
n_points = 0
for bin_id in self._maptree:
n_points += self._maptree[bin_id].observation_map.as_partial().shape[0]
return n_points
def _get_model_map(self, plane_id, n_pt_src, n_ext_src):
"""
This function returns a model map for a particular bin
"""
if plane_id not in self._active_planes:
raise ValueError(
f"{plane_id} not a plane in the current model"
)
model_map = SparseHealpix(self._get_expectation(self._maptree[plane_id], plane_id, n_pt_src, n_ext_src),
self._active_pixels[plane_id],
self._maptree[plane_id].observation_map.nside)
return model_map
def _get_excess(self, data_analysis_bin, all_maps=True):
"""
This function returns the excess counts for a particular bin
if all_maps=True, also returns the data and background maps
"""
data_map = data_analysis_bin.observation_map.as_dense()
bkg_map = data_analysis_bin.background_map.as_dense()
excess = data_map - bkg_map
if all_maps:
return excess, data_map, bkg_map
return excess
def _write_a_map(self, file_name, which, fluctuate=False, return_map=False):
"""
This writes either a model map or a residual map, depending on which one is preferred
"""
which = which.lower()
assert which in ['model', 'residual']
n_pt = self._likelihood_model.get_number_of_point_sources()
n_ext = self._likelihood_model.get_number_of_extended_sources()
map_analysis_bins = collections.OrderedDict()
if fluctuate:
poisson_set = self.get_simulated_dataset("model map")
for plane_id in self._active_planes:
data_analysis_bin = self._maptree[plane_id]
bkg = data_analysis_bin.background_map
obs = data_analysis_bin.observation_map
if fluctuate:
model_excess = poisson_set._maptree[plane_id].observation_map \
- poisson_set._maptree[plane_id].background_map
else:
model_excess = self._get_model_map(plane_id, n_pt, n_ext)
if which == 'residual':
bkg += model_excess
if which == 'model':
obs = model_excess + bkg
this_bin = DataAnalysisBin(plane_id,
observation_hpx_map=obs,
background_hpx_map=bkg,
active_pixels_ids=self._active_pixels[plane_id],
n_transits=data_analysis_bin.n_transits,
scheme='RING')
map_analysis_bins[plane_id] = this_bin
# save the file
new_map_tree = MapTree(map_analysis_bins, self._roi)
new_map_tree.write(file_name)
if return_map:
return new_map_tree
def write_model_map(self, file_name, poisson_fluctuate=False, test_return_map=False):
"""
This function writes the model map to a file.
The interface is based off of HAWCLike for consistency
"""
if test_return_map:
log.warning("test_return_map=True should only be used for testing purposes!")
return self._write_a_map(file_name, 'model', poisson_fluctuate, test_return_map)
def write_residual_map(self, file_name, test_return_map=False):
"""
This function writes the residual map to a file.
The interface is based off of HAWCLike for consistency
"""
if test_return_map:
log.warning("test_return_map=True should only be used for testing purposes!")
return self._write_a_map(file_name, 'residual', False, test_return_map)
| 38.20771
| 126
| 0.609942
| 6,121
| 48,562
| 4.574743
| 0.123182
| 0.008214
| 0.02357
| 0.011999
| 0.393543
| 0.337833
| 0.285908
| 0.242376
| 0.210842
| 0.2017
| 0
| 0.008056
| 0.314958
| 48,562
| 1,270
| 127
| 38.237795
| 0.833679
| 0.254438
| 0
| 0.183962
| 0
| 0.001572
| 0.043765
| 0.005003
| 0
| 0
| 0
| 0
| 0.009434
| 1
| 0.045597
| false
| 0.001572
| 0.051887
| 0.001572
| 0.133648
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c88aff50b9e6ce0d5c309be594a03b1f208a90db
| 15,227
|
py
|
Python
|
sshcustodian/sshcustodian.py
|
jkglasbrenner/sshcustodian
|
870d1088f27e1528e27f94f55f2efad7dad32d5d
|
[
"MIT"
] | null | null | null |
sshcustodian/sshcustodian.py
|
jkglasbrenner/sshcustodian
|
870d1088f27e1528e27f94f55f2efad7dad32d5d
|
[
"MIT"
] | null | null | null |
sshcustodian/sshcustodian.py
|
jkglasbrenner/sshcustodian
|
870d1088f27e1528e27f94f55f2efad7dad32d5d
|
[
"MIT"
] | null | null | null |
# File: sshcustodian/sshcustodian.py
# -*- coding: utf-8 -*-
# Python 2/3 Compatibility
from __future__ import (unicode_literals, division, absolute_import,
print_function)
from six.moves import filterfalse
"""
This module creates a subclass of the main Custodian class in the Custodian
project (github.com/materialsproject/custodian), which is a wrapper that
manages jobs running on computing clusters. The Custodian module is part of The
Materials Project (materialsproject.org/).
This subclass adds the functionality to copy the temporary directory created
via monty to the scratch partitions on slave compute nodes, provided that the
cluster's filesystem is configured in this way. The implementation invokes a
subprocess to utilize the ssh executable installed on the cluster, so it is not
particularly elegant or platform independent, nor is this solution likely to be
general to all clusters. This is why this modification has not been submitted
as a pull request to the main Custodian project.
"""
# Import modules
import logging
import subprocess
import sys
import datetime
import time
import os
import re
from itertools import islice, groupby
from socket import gethostname
from monty.tempfile import ScratchDir
from monty.shutil import gzip_dir
from monty.json import MontyEncoder
from monty.serialization import dumpfn
from custodian.custodian import Custodian
from custodian.custodian import CustodianError
# Module-level logger
logger = logging.getLogger(__name__)
class SSHCustodian(Custodian):
"""
The SSHCustodian class modifies the Custodian class from the custodian
module to be able to handle clusters that have separate scratch partitions
for each node. When scratch_dir_node_only is enabled, the temp_dir that
monty creates will be copied to all other compute nodes used in the
calculation and subsequently removed when the job is finished.
"""
__doc__ += Custodian.__doc__
def __init__(self, handlers, jobs, validators=None, max_errors=1,
polling_time_step=10, monitor_freq=30,
skip_over_errors=False, scratch_dir=None,
gzipped_output=False, checkpoint=False,
scratch_dir_node_only=False, pbs_nodefile=None):
""" scratch_dir_node_only (bool): If set to True, custodian will grab
the list of nodes in the file path provided to pbs_nodefile and
use copy the temp_dir to the scratch_dir on each node over
ssh. This is necessary on cluster setups where each node has
its own independent scratch partition.
pbs_nodefile (str): The filepath to the list of nodes to be used in
a calculation. If this path does not point to a valid file,
then scratch_dir_node_only will be automatically set to False.
"""
super(SSHCustodian, self).__init__(handlers, jobs, validators,
max_errors, polling_time_step,
monitor_freq, skip_over_errors,
scratch_dir, gzipped_output,
checkpoint)
self.hostname = gethostname()
if pbs_nodefile is None:
self.scratch_dir_node_only = False
self.slave_compute_node_list = None
elif os.path.exists(pbs_nodefile):
self.scratch_dir_node_only = scratch_dir_node_only
self.pbs_nodefile = pbs_nodefile
self.slave_compute_node_list = (
self._process_pbs_nodefile(self.pbs_nodefile, self.hostname))
else:
self.scratch_dir_node_only = False
self.pbs_nodefile = None
self.slave_compute_node_list = None
@staticmethod
def _process_pbs_nodefile(pbs_nodefile, hostname):
with open(pbs_nodefile) as in_file:
nodelist = in_file.read().splitlines()
slave_compute_node_list = [
node for node, _ in groupby(filterfalse(lambda x: x == hostname,
nodelist))
]
return slave_compute_node_list
def _copy_to_slave_node_dirs(self, temp_dir_path):
"""
Copy temporary scratch directory from master node to other nodes.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
It is assumed here that the root path of the scratch directory
is the same on all nodes.
"""
process_list = []
for node in self.slave_compute_node_list:
command = ['rsync', '-azhq', temp_dir_path,
'{0}:{1}'.format(node,
os.path.abspath(self.scratch_dir))]
p = subprocess.Popen(command, shell=False)
process_list.append(p)
# Wait for syncing to finish before moving on
for process in process_list:
process.wait()
def _update_slave_node_vasp_input_files(self, temp_dir_path):
"""
Update VASP input files in the scratch partition on the slave compute
nodes.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
It is assumed here that the root path of the scratch directory
is the same on all nodes.
"""
VASP_INPUT_FILES = [x for x in ["{0}/CHGCAR".format(temp_dir_path),
"{0}/WAVECAR".format(temp_dir_path),
"{0}/INCAR".format(temp_dir_path),
"{0}/POSCAR".format(temp_dir_path),
"{0}/POTCAR".format(temp_dir_path),
"{0}/KPOINTS".format(temp_dir_path)] if
os.path.exists(x)]
process_list = []
for node in self.slave_compute_node_list:
for filepath in VASP_INPUT_FILES:
command = 'scp {0} {1}:{2}/'.format(filepath, node,
temp_dir_path)
p = subprocess.Popen(command, shell=True)
process_list.append(p)
# Wait for syncing to finish before moving on
for process in process_list:
process.wait()
def _delete_slave_node_dirs(self, temp_dir_path):
"""
Delete the temporary scratch directory on the slave nodes.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
It is assumed here that the root path of the scratch directory
is the same on all nodes.
"""
process_list = []
for node in self.slave_compute_node_list:
command = 'ssh {0} "rm -rf {1}"'.format(node, temp_dir_path)
p = subprocess.Popen(command, shell=True)
process_list.append(p)
# Wait for deletion to finish before moving on
for process in process_list:
process.wait()
def _manage_node_scratch(self, temp_dir_path, job_start):
"""
Checks whether the user wants to make use of scratch partitions on each
compute node, and if True, either copies the temporary directory to or
deletes the temporary directory from each slave compute node. If the
user does not specify to use node-specific scratch partitions, then the
function does nothing.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
job_start (bool): If True, then the job has started and the
temporary directory will be copied to the slave compute
nodes. If False, then the temporary directories will be deleted
from the slave compute nodes.
"""
if self.scratch_dir_node_only:
if job_start:
self._copy_to_slave_node_dirs(temp_dir_path)
else:
self._delete_slave_node_dirs(temp_dir_path)
else:
pass
def _update_node_scratch(self, temp_dir_path, job):
"""
Method to update the scratch partitions on the slave compute nodes
if they exist and are running a VASP job.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
job (object): The job object you intend to run. Currently supports
VASP jobs.
"""
vasp_re = re.compile(r'vasp')
if self.scratch_dir is not None:
try:
jobtype = job.get_jobtype()
if self.scratch_dir_node_only:
if vasp_re.match(jobtype):
self._update_slave_node_vasp_input_files(temp_dir_path)
else:
pass
else:
pass
except:
pass
else:
pass
def run(self):
"""
Override of Custodian.run() to include instructions to copy the
temp_dir to the scratch partition on slave compute nodes if requested.
"""
cwd = os.getcwd()
with ScratchDir(self.scratch_dir, create_symbolic_link=True,
copy_to_current_on_exit=True,
copy_from_current_on_enter=True) as temp_dir:
self._manage_node_scratch(temp_dir_path=temp_dir,
job_start=True)
self.total_errors = 0
start = datetime.datetime.now()
logger.info("Run started at {} in {}.".format(
start, temp_dir))
v = sys.version.replace("\n", " ")
logger.info("Custodian running on Python version {}".format(v))
try:
# skip jobs until the restart
for job_n, job in islice(enumerate(self.jobs, 1),
self.restart, None):
self._run_job(job_n, job, temp_dir)
# Checkpoint after each job so that we can recover from
# last point and remove old checkpoints
if self.checkpoint:
super(SSHCustodian, self)._save_checkpoint(cwd, job_n)
except CustodianError as ex:
logger.error(ex.message)
if ex.raises:
raise RuntimeError("{} errors reached: {}. Exited..."
.format(self.total_errors, ex))
finally:
# Log the corrections to a json file.
logger.info("Logging to {}...".format(super(SSHCustodian,
self).LOG_FILE))
dumpfn(self.run_log, super(SSHCustodian, self).LOG_FILE,
cls=MontyEncoder, indent=4)
end = datetime.datetime.now()
logger.info("Run ended at {}.".format(end))
run_time = end - start
logger.info("Run completed. Total time taken = {}."
.format(run_time))
# Remove duplicate copy of log file, provided it ends with
# ".log"
for x in ([x for x in os.listdir(temp_dir)
if re.match(r'\w*\.log', x)]):
os.remove(os.path.join(temp_dir, x))
self._manage_node_scratch(temp_dir_path=temp_dir,
job_start=False)
if self.gzipped_output:
gzip_dir(".")
# Cleanup checkpoint files (if any) if run is successful.
super(SSHCustodian, self)._delete_checkpoints(cwd)
return self.run_log
def _run_job(self, job_n, job, temp_dir):
"""
Overrides custodian.custodian._run_job() to propagate changes to input
files on different scratch partitions on compute nodes, if needed.
"""
self.run_log.append({"job": job.as_dict(), "corrections": []})
job.setup()
for attempt in range(1, self.max_errors - self.total_errors + 1):
# Propagate updated input files, if needed
self._update_node_scratch(temp_dir, job)
logger.info(
"Starting job no. {} ({}) attempt no. {}. Errors "
"thus far = {}.".format(
job_n, job.name, attempt, self.total_errors))
p = job.run()
# Check for errors using the error handlers and perform
# corrections.
has_error = False
# While the job is running, we use the handlers that are
# monitors to monitor the job.
if isinstance(p, subprocess.Popen):
if self.monitors:
n = 0
while True:
n += 1
time.sleep(self.polling_time_step)
if p.poll() is not None:
break
if n % self.monitor_freq == 0:
has_error = self._do_check(self.monitors,
p.terminate)
else:
p.wait()
logger.info("{}.run has completed. "
"Checking remaining handlers".format(job.name))
# Check for errors again, since in some cases non-monitor
# handlers fix the problems detected by monitors
# if an error has been found, not all handlers need to run
if has_error:
self._do_check([h for h in self.handlers
if not h.is_monitor])
else:
has_error = self._do_check(self.handlers)
# If there are no errors detected, perform
# postprocessing and exit.
if not has_error:
for v in self.validators:
if v.check():
s = "Validation failed: {}".format(v)
raise CustodianError(s, True, v)
job.postprocess()
return
# check that all errors could be handled
for x in self.run_log[-1]["corrections"]:
if not x["actions"] and x["handler"].raises_runtime_error:
s = "Unrecoverable error for handler: {}. " \
"Raising RuntimeError".format(x["handler"])
raise CustodianError(s, True, x["handler"])
for x in self.run_log[-1]["corrections"]:
if not x["actions"]:
s = "Unrecoverable error for handler: %s" % x["handler"]
raise CustodianError(s, False, x["handler"])
logger.info("Max errors reached.")
raise CustodianError("MaxErrors", True)
# Inherit Custodian docstrings
__init__.__doc__ = Custodian.__init__.__doc__ + __init__.__doc__
run.__doc__ = Custodian.run.__doc__
_run_job.__doc__ = Custodian._run_job.__doc__
| 43.505714
| 80
| 0.568858
| 1,786
| 15,227
| 4.653415
| 0.218925
| 0.030321
| 0.031765
| 0.021658
| 0.272651
| 0.209842
| 0.183371
| 0.142943
| 0.142943
| 0.142943
| 0
| 0.003191
| 0.361923
| 15,227
| 349
| 81
| 43.630372
| 0.852203
| 0.248506
| 0
| 0.202899
| 0
| 0
| 0.062481
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0.024155
| 0.082126
| 0
| 0.144928
| 0.004831
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c88ca1454e3c43e792033b4722a580761e424d90
| 17,217
|
py
|
Python
|
sherlock/__init__.py
|
akudelka/sherlock
|
9e85f36c01e0cb1d495283f024423bc60c3f7a4e
|
[
"MIT"
] | 165
|
2015-01-12T09:09:19.000Z
|
2022-03-14T11:26:23.000Z
|
sherlock/__init__.py
|
akudelka/sherlock
|
9e85f36c01e0cb1d495283f024423bc60c3f7a4e
|
[
"MIT"
] | 35
|
2015-01-07T14:57:24.000Z
|
2022-03-24T17:43:28.000Z
|
sherlock/__init__.py
|
akudelka/sherlock
|
9e85f36c01e0cb1d495283f024423bc60c3f7a4e
|
[
"MIT"
] | 38
|
2015-03-11T09:10:05.000Z
|
2022-01-17T11:29:38.000Z
|
'''
Sherlock: Distributed Locks with a choice of backend
====================================================
:mod:`sherlock` is a library that provides easy-to-use distributed inter-process
locks and also allows you to choose a backend of your choice for lock
synchronization.
|Build Status| |Coverage Status|
.. |Build Status| image:: https://travis-ci.org/vaidik/sherlock.png
:target: https://travis-ci.org/vaidik/sherlock/
.. |Coverage Status| image:: https://coveralls.io/repos/vaidik/incoming/badge.png
:target: https://coveralls.io/r/vaidik/incoming
Overview
--------
When you are working with resources which are accessed by multiple services or
distributed services, more than often you need some kind of locking mechanism
to make it possible to access some resources at a time.
Distributed Locks or Mutexes can help you with this. :mod:`sherlock` provides
the exact same facility, with some extra goodies. It provides an easy-to-use API
that resembles standard library's `threading.Lock` semantics.
Apart from this, :mod:`sherlock` gives you the flexibilty of using a backend of
your choice for managing locks.
:mod:`sherlock` also makes it simple for you to extend :mod:`sherlock` to use
backends that are not supported.
Features
++++++++
* API similar to standard library's `threading.Lock`.
* Support for With statement, to cleanly acquire and release locks.
* Backend agnostic: supports `Redis`_, `Memcached`_ and `Etcd`_ as choice of
backends.
* Extendable: can be easily extended to work with any other of backend of
choice by extending base lock class. Read :ref:`extending`.
.. _Redis: http://redis.io
.. _Memcached: http://memcached.org
.. _Etcd: http://github.com/coreos/etcd
Supported Backends and Client Libraries
+++++++++++++++++++++++++++++++++++++++
Following client libraries are supported for every supported backend:
* Redis: `redis-py`_
* Memcached: `pylibmc`_
* Etcd: `python-etcd`_
.. _redis-py: http://github.com
.. _pylibmc: http://github.com
.. _python-etcd: https://github.com/jplana/python-etcd
As of now, only the above mentioned libraries are supported. Although
:mod:`sherlock` takes custom client objects so that you can easily provide
settings that you want to use for that backend store, but :mod:`sherlock` also
checks if the provided client object is an instance of the supported clients
and accepts client objects which pass this check, even if the APIs are the
same. :mod:`sherlock` might get rid of this issue later, if need be and if
there is a demand for that.
Installation
------------
Installation is simple.
.. code:: bash
pip install sherlock
.. note:: :mod:`sherlock` will install all the client libraries for all the
supported backends.
Basic Usage
-----------
:mod:`sherlock` is simple to use as at the API and semantics level, it tries to
conform to standard library's :mod:`threading.Lock` APIs.
.. code-block:: python
import sherlock
from sherlock import Lock
# Configure :mod:`sherlock`'s locks to use Redis as the backend,
# never expire locks and retry acquiring an acquired lock after an
# interval of 0.1 second.
sherlock.configure(backend=sherlock.backends.REDIS,
expire=None,
retry_interval=0.1)
# Note: configuring sherlock to use a backend does not limit you
# another backend at the same time. You can import backend specific locks
# like RedisLock, MCLock and EtcdLock and use them just the same way you
# use a generic lock (see below). In fact, the generic Lock provided by
# sherlock is just a proxy that uses these specific locks under the hood.
# acquire a lock called my_lock
lock = Lock('my_lock')
# acquire a blocking lock
lock.acquire()
# check if the lock has been acquired or not
lock.locked() == True
# release the lock
lock.release()
Support for ``with`` statement
++++++++++++++++++++++++++++++
.. code-block:: python
# using with statement
with Lock('my_lock'):
# do something constructive with your locked resource here
pass
Blocking and Non-blocking API
+++++++++++++++++++++++++++++
.. code-block:: python
# acquire non-blocking lock
lock1 = Lock('my_lock')
lock2 = Lock('my_lock')
# successfully acquire lock1
lock1.acquire()
# try to acquire lock in a non-blocking way
lock2.acquire(False) == True # returns False
# try to acquire lock in a blocking way
lock2.acquire() # blocks until lock is acquired to timeout happens
Using two backends at the same time
+++++++++++++++++++++++++++++++++++
Configuring :mod:`sherlock` to use a backend does not limit you from using
another backend at the same time. You can import backend specific locks like
RedisLock, MCLock and EtcdLock and use them just the same way you use a generic
lock (see below). In fact, the generic Lock provided by :mod:`sherlock` is just
a proxy that uses these specific locks under the hood.
.. code-block:: python
import sherlock
from sherlock import Lock
# Configure :mod:`sherlock`'s locks to use Redis as the backend
sherlock.configure(backend=sherlock.backends.REDIS)
# Acquire a lock called my_lock, this lock uses Redis
lock = Lock('my_lock')
# Now acquire locks in Memcached
from sherlock import MCLock
mclock = MCLock('my_mc_lock')
mclock.acquire()
Tests
-----
To run all the tests (including integration), you have to make sure that all
the databases are running. Make sure all the services are running:
.. code:: bash
# memcached
memcached
# redis-server
redis-server
# etcd (etcd is probably not available as package, here is the simplest way
# to run it).
wget https://github.com/coreos/etcd/releases/download/<version>/etcd-<version>-<platform>.tar.gz
tar -zxvf etcd-<version>-<platform>.gz
./etcd-<version>-<platform>/etcd
Run tests like so:
.. code:: bash
python setup.py test
Documentation
-------------
Available `here`_.
.. _here: http://sher-lock.readthedocs.org
Roadmap
-------
* Support for `Zookeeper`_ as backend.
* Support for `Gevent`_, `Multithreading`_ and `Multiprocessing`_.
.. _Zookeeper: http://zookeeper.apache.org/
.. _Gevent: http://www.gevent.org/
.. _Multithreading: http://docs.python.org/2/library/multithreading.html
.. _Multiprocessing: http://docs.python.org/2/library/multiprocessing.html
License
-------
See `LICENSE`_.
**In short**: This is an open-source project and exists in the public domain
for anyone to modify and use it. Just be nice and attribute the credits
wherever you can. :)
.. _LICENSE: http://github.com/vaidik/sherlock/blob/master/LICENSE.rst
Distributed Locking in Other Languages
--------------------------------------
* NodeJS - https://github.com/thedeveloper/warlock
'''
import etcd
import pylibmc
import redis
class _Backends(object):
'''
A simple object that provides a list of available backends.
'''
REDIS = {
'name': 'REDIS',
'library': 'redis',
'client_class': redis.StrictRedis,
'lock_class': 'RedisLock',
'default_args': (),
'default_kwargs': {},
}
ETCD = {
'name': 'ETCD',
'library': 'etcd',
'client_class': etcd.Client,
'lock_class': 'EtcdLock',
'default_args': (),
'default_kwargs': {},
}
MEMCACHED = {
'name': 'MEMCACHED',
'library': 'pylibmc',
'client_class': pylibmc.Client,
'lock_class': 'MCLock',
'default_args': (
['localhost'],
),
'default_kwargs': {
'binary': True,
},
}
_valid_backends = (
REDIS,
ETCD,
MEMCACHED,
)
def register(self, name, lock_class, library, client_class,
default_args=(), default_kwargs={}):
'''
Register a custom backend.
:param str name: Name of the backend by which you would want to refer
this backend in your code.
:param class lock_class: the sub-class of
:class:`sherlock.lock.BaseLock` that you have
implemented. The reference to your implemented
lock class will be used by
:class:`sherlock.Lock` proxy to use your
implemented class when you globally set that
the choice of backend is the one that has been
implemented by you.
:param str library: dependent client library that this implementation
makes use of.
:param client_class: the client class or valid type which you use to
connect the datastore. This is used by the
:func:`configure` function to validate that
the object provided for the `client`
parameter is actually an instance of this class.
:param tuple default_args: default arguments that need to passed to
create an instance of the callable passed to
`client_class` parameter.
:param dict default_kwargs: default keyword arguments that need to
passed to create an instance of the
callable passed to `client_class`
parameter.
Usage:
>>> import some_db_client
>>> class MyLock(sherlock.lock.BaseLock):
... # your implementation comes here
... pass
>>>
>>> sherlock.configure(name='Mylock',
... lock_class=MyLock,
... library='some_db_client',
... client_class=some_db_client.Client,
... default_args=('localhost:1234'),
... default_kwargs=dict(connection_pool=6))
'''
if not issubclass(lock_class, lock.BaseLock):
raise ValueError('lock_class parameter must be a sub-class of '
'sherlock.lock.BaseLock')
setattr(self, name, {
'name': name,
'lock_class': lock_class,
'library': library,
'client_class': client_class,
'default_args': default_args,
'default_kwargs': default_kwargs,
})
valid_backends = list(self._valid_backends)
valid_backends.append(getattr(self, name))
self._valid_backends = tuple(valid_backends)
@property
def valid_backends(self):
'''
Return a tuple of valid backends.
:returns: a list of valid supported backends
:rtype: tuple
'''
return self._valid_backends
def configure(**kwargs):
'''
Set basic global configuration for :mod:`sherlock`.
:param backend: global choice of backend. This backend will be used
for managing locks by :class:`sherlock.Lock` class
objects.
:param client: global client object to use to connect with backend
store. This client object will be used to connect to the
backend store by :class:`sherlock.Lock` class instances.
The client object must be a valid object of the client
library. If the backend has been configured using the
`backend` parameter, the custom client object must belong
to the same library that is supported for that backend.
If the backend has not been set, then the custom client
object must be an instance of a valid supported client.
In that case, :mod:`sherlock` will set the backend by
introspecting the type of provided client object.
:param str namespace: provide global namespace
:param float expire: provide global expiration time. If expicitly set to
`None`, lock will not expire.
:param float timeout: provide global timeout period
:param float retry_interval: provide global retry interval
Basic Usage:
>>> import sherlock
>>> from sherlock import Lock
>>>
>>> # Configure sherlock to use Redis as the backend and the timeout for
>>> # acquiring locks equal to 20 seconds.
>>> sherlock.configure(timeout=20, backend=sherlock.backends.REDIS)
>>>
>>> import redis
>>> redis_client = redis.StrictRedis(host='X.X.X.X', port=6379, db=1)
>>> sherlock.configure(client=redis_client)
'''
_configuration.update(**kwargs)
class _Configuration(object):
def __init__(self):
# Choice of backend
self._backend = None
# Client object to connect with the backend store
self._client = None
# Namespace to use for setting lock keys in the backend store
self.namespace = None
# Lock expiration time. If explicitly set to `None`, lock will not
# expire.
self.expire = 60
# Timeout to acquire lock
self.timeout = 10
# Retry interval to retry acquiring a lock if previous attempts failed
self.retry_interval = 0.1
@property
def backend(self):
return self._backend
@backend.setter
def backend(self, val):
if val not in backends.valid_backends:
backend_names = list(map(
lambda x: 'sherlock.backends.%s' % x['name'],
backends.valid_backends))
error_str = ', '.join(backend_names[:-1])
backend_names = '%s and %s' % (error_str,
backend_names[-1])
raise ValueError('Invalid backend. Valid backends are: '
'%s.' % backend_names)
self._backend = val
@property
def client(self):
if self._client is not None:
return self._client
else:
if self.backend is None:
raise ValueError('Cannot create a default client object when '
'backend is not configured.')
for backend in backends.valid_backends:
if self.backend == backend:
self.client = self.backend['client_class'](
*self.backend['default_args'],
**self.backend['default_kwargs'])
return self._client
@client.setter
def client(self, val):
# When backend is set, check client type
if self.backend is not None:
exc_msg = ('Only a client of the %s library can be used '
'when using %s as the backend store option.')
if isinstance(val, self.backend['client_class']):
self._client = val
else:
raise ValueError(exc_msg % (self.backend['library'],
self.backend['name']))
else:
for backend in backends.valid_backends:
if isinstance(val, backend['client_class']):
self._client = val
self.backend = backend
if self._client is None:
raise ValueError('The provided object is not a valid client'
'object. Client objects can only be '
'instances of redis library\'s client class, '
'python-etcd library\'s client class or '
'pylibmc library\'s client class.')
def update(self, **kwargs):
'''
Update configuration. Provide keyword arguments where the keyword
parameter is the configuration and its value (the argument) is the
value you intend to set.
:param backend: global choice of backend. This backend will be used
for managing locks.
:param client: global client object to use to connect with backend
store.
:param str namespace: optional global namespace to namespace lock keys
for your application in order to avoid conflicts.
:param float expire: set lock expiry time. If explicitly set to `None`,
lock will not expire.
:param float timeout: global timeout for acquiring a lock.
:param float retry_interval: global timeout for retrying to acquire the
lock if previous attempts failed.
'''
for key, val in kwargs.items():
if key not in dir(self):
raise AttributeError('Invalid configuration. No such '
'configuration as %s.' % key)
setattr(self, key, val)
# Create a backends singleton
backends = _Backends()
# Create a configuration singleton
_configuration = _Configuration()
# Import important Lock classes
from . import lock
from .lock import *
| 34.228628
| 100
| 0.606319
| 2,063
| 17,217
| 4.996122
| 0.204072
| 0.01921
| 0.010478
| 0.009314
| 0.206171
| 0.177064
| 0.136703
| 0.125546
| 0.125546
| 0.11856
| 0
| 0.002819
| 0.299355
| 17,217
| 502
| 101
| 34.296813
| 0.851612
| 0.68566
| 0
| 0.126984
| 0
| 0
| 0.181875
| 0.004615
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.039683
| 0.007937
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c88d252547df6d3f79fae0aefc72512a6ebb61d4
| 7,199
|
py
|
Python
|
misc.py
|
ChristophReich1996/Semantic_Pyramid_for_Image_Generation
|
00e6e7787a5d90b9c09f50a5d7039cb9b5cd4509
|
[
"MIT"
] | 46
|
2020-04-13T07:54:49.000Z
|
2022-03-01T06:29:15.000Z
|
misc.py
|
ChristophReich1996/Semantic_Pyramid_for_Image_Generation
|
00e6e7787a5d90b9c09f50a5d7039cb9b5cd4509
|
[
"MIT"
] | 2
|
2020-07-27T15:11:09.000Z
|
2021-04-04T10:58:03.000Z
|
misc.py
|
ChristophReich1996/Semantic_Pyramid_for_Image_Generation
|
00e6e7787a5d90b9c09f50a5d7039cb9b5cd4509
|
[
"MIT"
] | 5
|
2020-06-22T01:56:30.000Z
|
2021-12-22T04:34:49.000Z
|
from typing import List, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
from skimage.draw import random_shapes
import os
import json
def get_masks_for_training(
mask_shapes: List[Tuple] =
[(1, 128, 128), (1, 64, 64), (1, 32, 32), (1, 16, 16), (1, 8, 8), (4096,), (365,)],
device: str = 'cpu', add_batch_size: bool = False,
p_random_mask: float = 0.3) -> List[torch.Tensor]:
'''
Method returns random masks similar to 3.2. of the paper
:param mask_shapes: (List[Tuple]) Shapes of the features generated by the vgg16 model
:param device: (str) Device to store tensor masks
:param add_batch_size: (bool) If true a batch size is added to each mask
:param p_random_mask: (float) Probability that a random mask is generated else no mask is utilized
:return: (List[torch.Tensor]) Generated masks for each feature tensor
'''
# Select layer where no masking is used. Every output from the deeper layers get mapped out. Every higher layer gets
# masked by a random shape
selected_stage = random.choice(list(range(len(mask_shapes))) + [0, 1])
# Make masks
masks = []
# Apply spatial varying masks
spatial_varying_masks = (np.random.rand() < p_random_mask) \
and (selected_stage < (len(mask_shapes) - 1)) \
and (selected_stage > 0)
# Init random mask
if spatial_varying_masks:
random_mask = random_shapes(tuple(reversed(mask_shapes))[selected_stage + 1][1:],
min_shapes=1,
max_shapes=4,
min_size=min(8, tuple(reversed(mask_shapes))[selected_stage + 1][1] // 2),
allow_overlap=True)[0][:, :, 0]
# Random mask to torch tensor
random_mask = torch.tensor(random_mask, dtype=torch.float32, device=device)[None, :, :]
# Change range of mask to [0, 1]
random_mask = (random_mask == 255.0).float()
# Loop over all shapes
for index, mask_shape in enumerate(reversed(mask_shapes)):
# Case if spatial varying masks are applied after selected stage
if spatial_varying_masks:
if index == selected_stage:
masks.append(torch.ones(mask_shape, dtype=torch.float32, device=device))
elif index < selected_stage:
masks.append(torch.zeros(mask_shape, dtype=torch.float32, device=device))
else:
masks.append(F.interpolate(random_mask[None], size=mask_shape[1:], mode='nearest')[0])
# Case if only one stage is selected
else:
if index == selected_stage:
masks.append(torch.ones(mask_shape, dtype=torch.float32, device=device))
else:
masks.append(torch.zeros(mask_shape, dtype=torch.float32, device=device))
# Add batch size dimension
if add_batch_size:
for index in range(len(masks)):
masks[index] = masks[index].unsqueeze(dim=0)
# Reverse order of masks to match the features of the vgg16 model
masks.reverse()
return masks
def get_masks_for_validation(mask_shapes: Tuple[Tuple[int, int, int], ...] =
((1, 128, 128), (1, 64, 64), (1, 32, 32), (1, 16, 16), (1, 8, 8), (4096,),
(365,)), device: str = 'cpu', add_batch_size: bool = False) -> List[torch.Tensor]:
return get_masks_for_inference(stage_index_to_choose=random.choice(range(len(mask_shapes))),
mask_shapes=mask_shapes, device=device, add_batch_size=add_batch_size)
def get_masks_for_inference(stage_index_to_choose: int,
mask_shapes: Tuple[Tuple[int, int, int], ...] = (
(1, 128, 128), (1, 64, 64), (1, 32, 32), (1, 16, 16), (1, 8, 8), (4096,), (365,)),
device: str = 'cpu',
add_batch_size: bool = False) -> List[torch.Tensor]:
# Init list for masks
masks = []
# Loop over all shapes
for index, mask_shape in enumerate(reversed(mask_shapes)):
if index == stage_index_to_choose:
masks.append(torch.ones(mask_shape, dtype=torch.float32, device=device))
else:
masks.append(torch.zeros(mask_shape, dtype=torch.float32, device=device))
# Add batch size dimension
if add_batch_size:
for index in range(len(masks)):
masks[index] = masks[index].unsqueeze(dim=0)
# Reverse order of masks to match the features of the vgg16 model
masks.reverse()
return masks
def normalize_0_1_batch(input: torch.tensor) -> torch.tensor:
'''
Normalize a given tensor to a range of [-1, 1]
:param input: (Torch tensor) Input tensor
:return: (Torch tensor) Normalized output tensor
'''
input_flatten = input.view(input.shape[0], -1)
return ((input - torch.min(input_flatten, dim=1)[0][:, None, None, None]) / (
torch.max(input_flatten, dim=1)[0][:, None, None, None] -
torch.min(input_flatten, dim=1)[0][:, None, None, None]))
def normalize_m1_1_batch(input: torch.tensor) -> torch.tensor:
'''
Normalize a given tensor to a range of [-1, 1]
:param input: (Torch tensor) Input tensor
:return: (Torch tensor) Normalized output tensor
'''
input_flatten = input.view(input.shape[0], -1)
return 2 * ((input - torch.min(input_flatten, dim=1)[0][:, None, None, None]) / (
torch.max(input_flatten, dim=1)[0][:, None, None, None] -
torch.min(input_flatten, dim=1)[0][:, None, None, None])) - 1
class Logger(object):
"""
Class to log different metrics
"""
def __init__(self) -> None:
self.metrics = dict()
self.hyperparameter = dict()
def log(self, metric_name: str, value: float) -> None:
"""
Method writes a given metric value into a dict including list for every metric
:param metric_name: (str) Name of the metric
:param value: (float) Value of the metric
"""
if metric_name in self.metrics:
self.metrics[metric_name].append(value)
else:
self.metrics[metric_name] = [value]
def save_metrics(self, path: str) -> None:
"""
Static method to save dict of metrics
:param metrics: (Dict[str, List[float]]) Dict including metrics
:param path: (str) Path to save metrics
:param add_time_to_file_name: (bool) True if time has to be added to filename of every metric
"""
# Save dict of hyperparameter as json file
with open(os.path.join(path, 'hyperparameter.txt'), 'w') as json_file:
json.dump(self.hyperparameter, json_file)
# Iterate items in metrics dict
for metric_name, values in self.metrics.items():
# Convert list of values to torch tensor to use build in save method from torch
values = torch.tensor(values)
# Save values
torch.save(values, os.path.join(path, '{}.pt'.format(metric_name)))
| 44.99375
| 120
| 0.607862
| 969
| 7,199
| 4.389061
| 0.190918
| 0.041383
| 0.028215
| 0.037856
| 0.462732
| 0.45027
| 0.446038
| 0.446038
| 0.410299
| 0.410299
| 0
| 0.03268
| 0.2774
| 7,199
| 159
| 121
| 45.27673
| 0.78489
| 0.263648
| 0
| 0.362637
| 0
| 0
| 0.007845
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087912
| false
| 0
| 0.098901
| 0.010989
| 0.252747
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c88f24e0c4f56b49a1514bbc5fcfcc00efd5e15c
| 4,204
|
py
|
Python
|
EasyMCDM/models/Irmo.py
|
qanastek/EasyMCDM
|
7fa2e2dfe9397834ca9f50211ea2717a16785394
|
[
"MIT"
] | 4
|
2022-03-05T20:51:38.000Z
|
2022-03-15T17:10:22.000Z
|
EasyMCDM/models/Irmo.py
|
qanastek/EasyMCDM
|
7fa2e2dfe9397834ca9f50211ea2717a16785394
|
[
"MIT"
] | null | null | null |
EasyMCDM/models/Irmo.py
|
qanastek/EasyMCDM
|
7fa2e2dfe9397834ca9f50211ea2717a16785394
|
[
"MIT"
] | 1
|
2022-03-08T13:45:22.000Z
|
2022-03-08T13:45:22.000Z
|
import math
from typing import Dict, List, Tuple, Union
from EasyMCDM.models.MCDM import MCDM
# Instant-Runoff Multicriteria Optimization (IRMO)
class Irmo(MCDM):
# Memory allocation
__slots__ = ['verbose', 'matrix', 'names', 'indexes', 'preferences', 'matrix']
# Constructor
def __init__(self, data : Union[str, dict], col_sep=',', row_sep='\n', verbose=True):
super().__init__(data, col_sep=col_sep, row_sep=row_sep, verbose=verbose)
# Read the lines of indexes
def get_indexes(self, path) -> List:
f = open(path,"r")
content = f.read()
f.close()
return [[int(i) for i in w.split(self.col_sep)] for w in content.split(self.row_sep) if len(w) > 0]
def __getVector(self, i, idx, banned, nbr_rounds):
items_lst = []
for s in self.matrix.keys():
# Check if already banned
if s not in banned:
insert_value = self.matrix[s][i]
else:
# Best item value
if (idx == nbr_rounds - 1 and self.preferences[idx] == "min") or (idx != nbr_rounds - 1 and self.preferences[idx] == "max"):
insert_value = math.inf
else:
insert_value = -math.inf
items_lst.append(insert_value)
return items_lst
# Compute
def __compute(self) -> Tuple[float, float]:
banned = []
# Check if the number of criteria is higher than the number of subjects else reduce the number of rounds
nbr_rounds = len(self.indexes) if len(self.indexes) <= len(self.matrix.keys()) else len(self.matrix.keys())
# For each criteria
for idx, i in enumerate(self.indexes):
# Values for the subjects left
items_vec = self.__getVector(i, idx, banned, nbr_rounds)
# Best item value
if (idx == nbr_rounds - 1 and self.preferences[idx] == "min") or (idx != nbr_rounds - 1 and self.preferences[idx] == "max"):
value = min(items_vec)
else:
value = max(items_vec)
# Worst item index
item_idx = items_vec.index(value)
item_name = list(self.matrix.keys())[item_idx]
# Ban Worst item
banned.append(item_name)
# Reverse the rank
banned.reverse()
return {
"best": banned[0], # Return best
"eleminated": banned
}
# Solve the problem
def solve(
self,
indexes : Union[str, list],
prefs : Union[str, List[str]],
indexes_idx = 0
) -> Dict:
# Define the indexes of the attributes
if type(indexes) == str:
self.indexes = self.get_indexes(indexes)[indexes_idx]
elif type(indexes) == list:
self.indexes = indexes
# Check if the lengths matches togethers
assert len(self.indexes) <= self.constraints_length, '\033[91m' + "The number of indexes as a variable length, please give a consistent length with the matrix constraints !" + '\033[0m'
# Check variable types
assert all(isinstance(e, (int)) for e in self.indexes), '\033[91m' + "The indexes as variable types, please give only integers !" + '\033[0m'
# Get preferences
if type(prefs) == str:
self.preferences = self.get_preferences(prefs)
elif type(prefs) == list:
self.preferences = prefs
# Check if has preferences other than max and min
assert all([a in ['max', 'min'] for a in sorted(list(set(self.preferences)))]), '\033[91m' + "The preferences need to containt only min and max. Found : " + str(sorted(list(set(self.preferences)))) + '\033[0m'
# Check if the lengths matches togethers
assert len(self.preferences) == len(self.indexes), '\033[91m' + "The number of preferences as a variable length, please give a consistent length with the indexes !" + '\033[0m'
return self.__compute()
| 37.873874
| 218
| 0.562559
| 515
| 4,204
| 4.483495
| 0.271845
| 0.058467
| 0.02382
| 0.022521
| 0.234301
| 0.189693
| 0.162841
| 0.162841
| 0.162841
| 0.122997
| 0
| 0.01527
| 0.330162
| 4,204
| 111
| 219
| 37.873874
| 0.804688
| 0.142245
| 0
| 0.083333
| 0
| 0
| 0.131837
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.083333
| false
| 0
| 0.05
| 0
| 0.233333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8919966f9b0c8cb69e17d80a649cb9b3d0b7138
| 2,046
|
py
|
Python
|
ramp/estimators/r.py
|
kvh/ramp
|
8618ce673e49b95f40c9659319c3cb72281dacac
|
[
"MIT"
] | 214
|
2015-01-01T07:42:25.000Z
|
2022-03-08T08:57:49.000Z
|
ramp/estimators/r.py
|
Marigold/ramp
|
f9ddea84bc3b5097c0ddb8a3f71a0fce1775ba76
|
[
"MIT"
] | 8
|
2020-05-19T20:15:40.000Z
|
2020-05-19T20:15:41.000Z
|
ramp/estimators/r.py
|
Marigold/ramp
|
f9ddea84bc3b5097c0ddb8a3f71a0fce1775ba76
|
[
"MIT"
] | 87
|
2015-01-13T19:25:15.000Z
|
2021-05-16T10:40:05.000Z
|
import numpy as np
from rpy2.robjects import FloatVector
from rpy2.robjects.packages import importr
from rpy2 import robjects
stats = importr('stats')
base = importr('base')
def matrix_to_r_dataframe(x):
rx = FloatVector(np.ravel(x))
rx = robjects.r['matrix'](rx, nrow = len(x), byrow=True)
return robjects.r["data.frame"](rx)
class REstimator(object):
def __init__(self, r_estimator, **kwargs):
self.estimator = r_estimator
self.kwargs = kwargs
def fit(self, x, y):
rx = matrix_to_r_dataframe(x)
ry = FloatVector(y)
robjects.globalenv["y"] = ry
self.estimator_fit = self.estimator("y ~ .", data=rx,
**self.kwargs)
def predict(self, x):
rx = matrix_to_r_dataframe(x)
return np.array(stats.predict(self.estimator_fit, rx)[0])
class OrderedLogit(object):
def fit(self, x, y):
ordinal = importr('ordinal')
rx = matrix_to_r_dataframe(x)
self.levels = range(int(round(min(y))), int(round(max(y)))+1)
ry = base.factor(FloatVector(y), levels=self.levels, ordered=True)
robjects.globalenv["y"] = ry
self.clmfit = ordinal.clm("y ~ .", data=rx)
#print base.summary(self.clmfit)
def predict(self, x):
rx = matrix_to_r_dataframe(x)
rfac = stats.predict(self.clmfit, rx, type="class")[0]
rvec = [self.levels[v - 1] for v in rfac]
return rvec
class WeightedLM(object):
def fit(self, x, y, weights):
rx = matrix_to_r_dataframe(x)
ry = FloatVector(y)
rw = FloatVector(weights)
robjects.globalenv["score"] = ry
self.lmfit = stats.lm("score ~ .", data=rx, weights=rw)
#print base.summary(self.clmfit)
def predict(self, x):
rx = matrix_to_r_dataframe(x)
rvec = stats.predict(self.lmfit, rx)[0]
return np.array(rvec)
class GBM(REstimator):
def __init__(self, **kwargs):
gbm = importr('gbm')
super(GBM, self).__init__(gbm.gbm, **kwargs)
| 29.652174
| 74
| 0.610948
| 278
| 2,046
| 4.363309
| 0.251799
| 0.046167
| 0.051937
| 0.103875
| 0.301731
| 0.236603
| 0.189613
| 0.189613
| 0.189613
| 0.131904
| 0
| 0.005215
| 0.250244
| 2,046
| 68
| 75
| 30.088235
| 0.785528
| 0.030303
| 0
| 0.294118
| 0
| 0
| 0.033317
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.156863
| 0
| 0.490196
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c89234777cdd2b2357d8a397dcec12fefab43a56
| 1,138
|
py
|
Python
|
tests/decorators/test_timer.py
|
ShaneMicro/azure-functions-python-library
|
f56564effbf291a27e1bd5751a38484af387bb7f
|
[
"MIT"
] | null | null | null |
tests/decorators/test_timer.py
|
ShaneMicro/azure-functions-python-library
|
f56564effbf291a27e1bd5751a38484af387bb7f
|
[
"MIT"
] | 1
|
2022-03-02T11:49:02.000Z
|
2022-03-02T11:49:02.000Z
|
tests/decorators/test_timer.py
|
ShaneMicro/azure-functions-python-library
|
f56564effbf291a27e1bd5751a38484af387bb7f
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import unittest
from azure.functions.decorators.constants import TIMER_TRIGGER
from azure.functions.decorators.core import BindingDirection, DataType
from azure.functions.decorators.timer import TimerTrigger
class TestTimer(unittest.TestCase):
def test_timer_trigger_valid_creation(self):
trigger = TimerTrigger(name="req",
schedule="dummy_schedule",
data_type=DataType.UNDEFINED,
run_on_startup=False,
use_monitor=False,
dummy_field="dummy")
self.assertEqual(trigger.get_binding_name(), "timerTrigger")
self.assertEqual(trigger.get_dict_repr(), {
"type": TIMER_TRIGGER,
"direction": BindingDirection.IN,
'dummyField': 'dummy',
"name": "req",
"dataType": DataType.UNDEFINED,
"schedule": "dummy_schedule",
"runOnStartup": False,
"useMonitor": False
})
| 37.933333
| 70
| 0.598418
| 102
| 1,138
| 6.509804
| 0.558824
| 0.040663
| 0.081325
| 0.126506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.311951
| 1,138
| 29
| 71
| 39.241379
| 0.84802
| 0.079086
| 0
| 0
| 0
| 0
| 0.1159
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 1
| 0.043478
| false
| 0
| 0.173913
| 0
| 0.26087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c895e6b35498811fbcaa8204ceff2eff7744a4b3
| 8,368
|
py
|
Python
|
src/client.py
|
Da3dalu2/SimpleNetworkSimulator
|
447bc099b35720ab8d6e8a9703bb2354f1f01cae
|
[
"MIT"
] | null | null | null |
src/client.py
|
Da3dalu2/SimpleNetworkSimulator
|
447bc099b35720ab8d6e8a9703bb2354f1f01cae
|
[
"MIT"
] | null | null | null |
src/client.py
|
Da3dalu2/SimpleNetworkSimulator
|
447bc099b35720ab8d6e8a9703bb2354f1f01cae
|
[
"MIT"
] | null | null | null |
import socket
import threading
import time
from threading import Thread
import utilities as utils
import error_handling as check
BUFFER_SIZE = 1024
BROADCAST_MAC = "FF:FF:FF:FF:FF:FF"
class ClientThread(threading.Thread):
"""
Initializes the client.
The event synchronization primitive, among the initialization parameters
given, is used to guarantee that the client will send its message when
the router is actually listening.
"""
def __init__(self, init_params):
self.connected = False
self.clients_threads = init_params["clients_threads"]
self.arp_table_mac = init_params["arp_table_mac"]
self.client_data = init_params["client_data"]
self.client_id = init_params["client_id"]
self.router_thread = init_params["router_thread"]
self.router_id = init_params["router_id"]
self.sync_event_message = init_params["sync_event_message"]
self.sync_event_connection = init_params["sync_event_connection"]
self.stop_event = threading.Event()
self.sleep_time = 1.0
port = self.client_data["port"]
address = ("localhost", port)
self.client_connection = check.socket_create(
address,
backlog = 0,
timeout = 3,
reuse_address = True
)
threading.Thread.__init__(self, name=self.client_id)
"""
Client main loop.
Listens for messages from the network.
"""
def run(self):
utils.show_status(self.getName(), "starting")
connected = self.go_online()
if(connected is True):
utils.show_status(self.client_id, "listening for incoming packets")
while not self.stop_event.isSet():
self.listen_packets()
# exit procedure
utils.show_status(self.client_id, "going offline")
utils.show_status(self.client_id, "closing connection")
self.client_connection.close()
self.stop_event.clear()
del self.clients_threads[self.client_id]
"""
Tells the client to exit from its main loop.
It goes offline thus closing its connection to the network.
"""
def join(self, timeout=None):
self.stop_event.set()
threading.Thread.join(self, timeout)
"""
Tells the router of its network to start listening for a message
from this client.
"""
def notify_incoming_message(self):
msg = " ".join(["notifying", self.router_id, "of an incoming message"])
utils.show_status(self.client_id, msg)
my_ip_address = self.client_data["ip_address"]
listen_task = threading.Thread(
target=self.router_thread.listen_client_side,
args=[my_ip_address],
daemon=True
)
listen_task.start()
"""
Tells the router of its network to start listening for a connection
from this client.
"""
def notify_incoming_connection(self):
msg = " ".join(["notifying",self.router_id, \
"of an incoming connection"])
utils.show_status(self.client_id, msg)
listen_task = threading.Thread(
target=self.router_thread.listen_connections_client_side,
daemon=True
)
listen_task.start()
"""
Sends packets to other clients.
"""
def send_message(self, recipient_ip, message):
gateway_ip = self.client_data["gateway_ip"]
packet = utils.write_packet(
self.client_data["ip_address"],
recipient_ip,
self.client_data.get("mac_address"),
self.arp_table_mac[gateway_ip],
message
)
utils.show_status(
self.client_id,
"waiting for router listening messages"
)
self.notify_incoming_message()
# waiting for router approving message sending
self.sync_event_message.wait()
sent = check.socket_send(self.client_connection, packet, self.router_id)
if(sent is True):
msg = " ".join(["message sent to", gateway_ip])
utils.show_status(self.client_id, msg)
self.sync_event_message.clear()
"""
Sends a special packet to notify the server it is currently online.
Returns false if the connection was not established or the packet could not
be sent (in the latter case the server will not recognize the client as
online, hence the action go_online is considered failed even if a connection
has been created)
"""
def go_online(self):
utils.show_status(self.client_id, "connecting to the network")
server_ip = self.client_data["server_ip"]
router_port = self.client_data["gateway_port"]
router_address = ("localhost", router_port)
gateway_ip = self.client_data["gateway_ip"]
self.notify_incoming_connection()
# waiting for router approving connection
self.sync_event_connection.wait()
self.sync_event_connection.clear() # ready for reuse
connected = check.socket_connect(
self.client_connection,
router_address,
self.client_id
)
if(connected is True):
utils.show_status(self.client_id, "going online")
# waiting for router completing connection procedure
self.sync_event_connection.wait()
self.sync_event_connection.clear() # ready for reuse
greeting_packet = utils.write_packet(
self.client_data.get("ip_address"),
server_ip,
self.client_data.get("mac_address"),
self.arp_table_mac[gateway_ip],
"{going online}"
)
utils.show_status(
self.client_id,
"waiting for router accepting message"
)
self.notify_incoming_message()
# waiting for router approving message sending
self.sync_event_message.wait()
self.sync_event_message.clear()
check.socket_send(
self.client_connection,
greeting_packet,
self.client_id,
"Greeting packet could not be sent"
)
return connected
"""
Sends a special packet to notify the server it is currently offline.
Then closes its connection to the network.
"""
def go_offline(self):
utils.show_status(self.client_id, "going offline")
gateway_ip = self.client_data["gateway_ip"]
server_ip = self.client_data["server_ip"]
leave_packet = utils.write_packet(
self.client_data.get("ip_address"),
server_ip,
self.client_data.get("mac_address"),
self.arp_table_mac[gateway_ip],
"{going offline}"
)
self.notify_incoming_message()
self.sync_event_message.wait() # wait for router approval
self.sync_event_message.clear()
check.socket_send(
self.client_connection,
leave_packet,
self.client_id,
"Leave packet could not be sent"
)
self.join()
"""
Listens for packets from the server.
"""
def listen_packets(self):
received_message = check.socket_recv(
self.client_connection,
self.client_id
)
if(received_message is not None and len(received_message) > 0):
parsed_message = utils.read_packet(received_message)
time.sleep(2) # give time to router to show its status
msg = " ".join(["message received from:",
parsed_message["source_ip"]])
utils.show_status(self.client_id, msg)
utils.report(
self.client_id,
parsed_message,
"reading received packet"
)
if(parsed_message["destination_mac"] == BROADCAST_MAC):
msg = " ".join(["received an ARP request from",
parsed_message["source_ip"]])
utils.show_status(self.client_id, msg)
self.send_message(
parsed_message.get("source_ip"),
"{ARP reply}"
)
| 32.30888
| 80
| 0.603848
| 963
| 8,368
| 5.011423
| 0.186916
| 0.089101
| 0.052217
| 0.055118
| 0.456486
| 0.410485
| 0.373187
| 0.30978
| 0.293618
| 0.245545
| 0
| 0.001735
| 0.311066
| 8,368
| 258
| 81
| 32.434109
| 0.835386
| 0.059154
| 0
| 0.357143
| 0
| 0
| 0.113235
| 0.003064
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053571
| false
| 0
| 0.035714
| 0
| 0.10119
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8a19d3ee1214101499b5145f53a93867a82f056
| 675
|
py
|
Python
|
dl/src/CookieManager.py
|
PatrykCholewa/PI_Stored
|
4ff4d72fe56281b76ddf7b759c19aabbce3c9899
|
[
"MIT"
] | null | null | null |
dl/src/CookieManager.py
|
PatrykCholewa/PI_Stored
|
4ff4d72fe56281b76ddf7b759c19aabbce3c9899
|
[
"MIT"
] | null | null | null |
dl/src/CookieManager.py
|
PatrykCholewa/PI_Stored
|
4ff4d72fe56281b76ddf7b759c19aabbce3c9899
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import jwt
from src import ConfigManager
secret = ConfigManager.get_config("DL_COOKIE_SECRET_KEY")
secure = ConfigManager.get_config("APP_SECURE")
def validate_user_jwt(token, username):
token = jwt.decode(token, secret, "HS256")
expire = token['exp']
if username != token['user']:
return False
return datetime.now() < datetime.fromtimestamp(expire)
def validate_file_by_jwt(token, file_id):
token = jwt.decode(token, secret, "HS256")
expire = token['exp']
file_ids = token['file_list']
if file_id not in file_ids:
return False
return datetime.now() < datetime.fromtimestamp(expire)
| 23.275862
| 58
| 0.708148
| 88
| 675
| 5.25
| 0.397727
| 0.060606
| 0.095238
| 0.082251
| 0.428571
| 0.428571
| 0.428571
| 0.428571
| 0.190476
| 0
| 0
| 0.010889
| 0.183704
| 675
| 28
| 59
| 24.107143
| 0.827586
| 0
| 0
| 0.444444
| 0
| 0
| 0.087407
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8a47ee8db41845109ebaa2bf272e65a01b66623
| 2,683
|
py
|
Python
|
argos/countdown.9s.py
|
solettitiger/countdown
|
c5df89c7d67984171de08508ef4433ea9d6fbbd1
|
[
"MIT"
] | null | null | null |
argos/countdown.9s.py
|
solettitiger/countdown
|
c5df89c7d67984171de08508ef4433ea9d6fbbd1
|
[
"MIT"
] | null | null | null |
argos/countdown.9s.py
|
solettitiger/countdown
|
c5df89c7d67984171de08508ef4433ea9d6fbbd1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import datetime
import sys
import subprocess
import os
from playsound import playsound
# ******************************************************************
# Definitionen
# ******************************************************************
filename = 'countdown.txt'
audiofile = 'ringing.mp3'
settimer = 'add.py'
stoptimer = 'stop.py'
overlay = 'overlay.py'
title = "⏰"
zeit = ""
command = ""
path = ""
diff = 0
# ******************************************************************
# Funktionen
# ******************************************************************
def readdata():
global title, zeit, command, path
full_path = os.path.realpath(__file__)
path, thisfile = os.path.split(full_path)
ff = open(path+"/countdown/"+filename,"r")
ll = ff.readlines()
if(len(ll) == 3):
title = ll[0].strip()
zeit = ll[1].strip()
command = ll[2].strip()
ff.close()
def gettimediff():
global zeit
now = datetime.datetime.now()
day = datetime.datetime(now.year, now.month, now.day)
endtime = datetime.datetime.strptime(now.strftime("%Y-%m-%d ") + zeit, "%Y-%m-%d %H:%M")
diff = int((endtime-now).seconds/60)
if(diff < 0):
diff = diff + 1440
if(diff < 1 and diff >= -1):
runDone()
else:
zeit = convertTime(diff)
def runDone():
global zeit
# Command ausführen
if(command != ""):
cmdlist = command.split()
subprocess.Popen(cmdlist, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
# Overlay anzeigen
subprocess.Popen([path+"/countdown/"+overlay, beautifyTimestring(zeit), title], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
zeit = ""
# Sound abspielen
playsound(path+"/countdown/"+audiofile)
# Countdown beenden - dauert die Zeit von Argos
stopCountdown()
def stopCountdown():
ff = open(path+"/countdown/"+filename,"w")
ff.close()
def convertTime(minutes):
hours = int(minutes/60)
minutes = minutes - hours*60
str_hours = "0" + str(hours)
str_minutes = "0" + str(minutes)
return (str_hours[-2:] + ":" + str_minutes[-2:])
def beautifyTimestring(timestring):
times = timestring.split(":")
str_hours = "0" + times[0]
str_minutes = "0" + times[1]
return (str_hours[-2:] + ":" + str_minutes[-2:])
# ******************************************************************
# Main
# ******************************************************************
def main():
readdata()
if(zeit != ""):
gettimediff()
print (title + " " + zeit)
print ("---")
print ("Set Timer | bash='"+ path+"/countdown/"+settimer +"' terminal=false")
print ("Stopp Timer | bash='"+ path+"/countdown/"+stoptimer +"' terminal=false")
if __name__ == "__main__":
main()
| 26.83
| 151
| 0.561685
| 295
| 2,683
| 5.037288
| 0.362712
| 0.05249
| 0.020188
| 0.025572
| 0.15074
| 0.114401
| 0.114401
| 0.079408
| 0.079408
| 0
| 0
| 0.013339
| 0.133805
| 2,683
| 99
| 152
| 27.10101
| 0.625645
| 0.211331
| 0
| 0.112676
| 0
| 0
| 0.10852
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.098592
| false
| 0
| 0.070423
| 0
| 0.197183
| 0.056338
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8a8f855a2d0fbd314903aae2f023f9e8c19884d
| 5,043
|
py
|
Python
|
multimodal_models/StackGAN_V2_PyTorch/models.py
|
kumayu0108/model-zoo
|
4285779f6ff51fa1efb0625d67b428e90c343c0c
|
[
"MIT"
] | 43
|
2020-05-16T21:05:34.000Z
|
2022-02-08T11:33:29.000Z
|
multimodal_models/StackGAN_V2_PyTorch/models.py
|
kumayu0108/model-zoo
|
4285779f6ff51fa1efb0625d67b428e90c343c0c
|
[
"MIT"
] | 52
|
2020-05-14T16:18:08.000Z
|
2021-11-02T19:13:47.000Z
|
multimodal_models/StackGAN_V2_PyTorch/models.py
|
kumayu0108/model-zoo
|
4285779f6ff51fa1efb0625d67b428e90c343c0c
|
[
"MIT"
] | 69
|
2020-05-14T13:39:23.000Z
|
2021-07-30T00:51:27.000Z
|
import torch
import torch.nn as nn
from generator_model import G1, G2
from helper_functions.Blocks import downBlock, Block3x3_leakRelu
from helper_functions.ret_image import Interpolate, condAugmentation
from helper_functions.initial_weights import weights_init
from helper_functions.losses import KLloss, custom_loss
from helper_functions.Blocks import upScale, normalBlock, Residual
import helper_functions.config as cfg
class GET_IMAGE_G(nn.Module):
def __init__(self, ngf):
super(GET_IMAGE_G, self).__init__()
self.gf_dim = ngf
self.img = nn.Sequential(
nn.Conv2d(ngf, 3, kernel_size=3, stride=1, padding=1, bias=False),
nn.Tanh())
def forward(self, h_code):
out_img = self.img(h_code)
return out_img
class G_NET(nn.Module):
def __init__(self, StageNum, zDim = 100):
super(G_NET, self).__init__()
self.zDim = zDim
self.StageNum = StageNum
self.gf_dim = cfg.generatorDim
self.define_module()
def define_module(self):
self.ca_net = condAugmentation()
if self.StageNum == 1:
self.h_net1 = G1(self.gf_dim * 16, self.zDim)
self.img_net1 = GET_IMAGE_G(self.gf_dim)
elif self.StageNum == 2:
self.h_net1 = G1(self.gf_dim * 16, self.zDim)
self.img_net1 = GET_IMAGE_G(self.gf_dim)
self.h_net2 = G2(self.gf_dim)
self.img_net2 = GET_IMAGE_G(self.gf_dim // 2)
elif self.StageNum == 3:
self.h_net1 = G1(self.gf_dim * 16, self.zDim)
self.img_net1 = GET_IMAGE_G(self.gf_dim)
self.h_net2 = G2(self.gf_dim)
self.img_net2 = GET_IMAGE_G(self.gf_dim // 2)
self.h_net3 = G2(self.gf_dim // 2)
self.img_net3 = GET_IMAGE_G(self.gf_dim // 4)
elif self.StageNum == 4:
self.h_net1 = G1(self.gf_dim * 16, self.zDim)
self.img_net1 = GET_IMAGE_G(self.gf_dim)
self.h_net2 = G2(self.gf_dim)
self.img_net2 = GET_IMAGE_G(self.gf_dim // 2)
self.h_net3 = G2(self.gf_dim // 2)
self.img_net3 = GET_IMAGE_G(self.gf_dim // 4)
self.h_net4 = G2(self.gf_dim // 4, num_residual=1)
self.img_net4 = GET_IMAGE_G(self.gf_dim // 8)
def forward(self, z_code, text_embedding=None):
c_code, mu, logvar = self.ca_net(text_embedding)
fake_imgs = []
if self.StageNum == 1:
h_code1 = self.h_net1(z_code, c_code)
fake_img1 = self.img_net1(h_code1)
fake_imgs.append(fake_img1)
elif self.StageNum == 2:
h_code1 = self.h_net1(z_code, c_code)
fake_img1 = self.img_net1(h_code1)
fake_imgs.append(fake_img1)
h_code2 = self.h_net2(h_code1, c_code)
fake_img2 = self.img_net2(h_code2)
fake_imgs.append(fake_img2)
elif self.StageNum == 3:
h_code1 = self.h_net1(z_code, c_code)
fake_img1 = self.img_net1(h_code1)
fake_imgs.append(fake_img1)
h_code2 = self.h_net2(h_code1, c_code)
fake_img2 = self.img_net2(h_code2)
fake_imgs.append(fake_img2)
h_code3 = self.h_net3(h_code2, c_code)
fake_img3 = self.img_net3(h_code3)
fake_imgs.append(fake_img3)
elif self.StageNum == 4:
h_code1 = self.h_net1(z_code, c_code)
fake_img1 = self.img_net1(h_code1)
fake_imgs.append(fake_img1)
h_code2 = self.h_net2(h_code1, c_code)
fake_img2 = self.img_net2(h_code2)
fake_imgs.append(fake_img2)
h_code3 = self.h_net3(h_code2, c_code)
fake_img3 = self.img_net3(h_code3)
fake_imgs.append(fake_img3)
h_code4 = self.h_net4(h_code3, c_code)
fake_img4 = self.img_net4(h_code4)
fake_imgs.append(fake_img4)
return fake_imgs, mu, logvar
class eval256(nn.Module):
def __init__(self):
super(eval256, self).__init__()
self.df_dim = cfg.discriminatorDim
self.ef_dim = cfg.embeddingsDim
self.define_module()
def define_module(self):
ndf = self.df_dim
efg = self.ef_dim
self.img_code_s16 = encode_image_by_16times(ndf)
self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
self.img_code_s64 = downBlock(ndf * 16, ndf * 32)
self.img_code_s64_1 = Block3x3_leakRelu(ndf * 32, ndf * 16)
self.img_code_s64_2 = Block3x3_leakRelu(ndf * 16, ndf * 8)
self.logits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
def forward(self, x_var, c_code=None):
x_code = self.img_code_s16(x_var)
x_code = self.img_code_s32(x_code)
x_code = self.img_code_s64(x_code)
x_code = self.img_code_s64_1(x_code)
x_code = self.img_code_s64_2(x_code)
h_c_code = x_code
output = self.logits(h_c_code)
return output.view(-1)
| 39.093023
| 78
| 0.615507
| 766
| 5,043
| 3.708877
| 0.148825
| 0.078845
| 0.069694
| 0.050334
| 0.530799
| 0.471313
| 0.451602
| 0.426962
| 0.401619
| 0.401619
| 0
| 0.052632
| 0.284156
| 5,043
| 129
| 79
| 39.093023
| 0.734349
| 0
| 0
| 0.491379
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.077586
| 0
| 0.198276
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8a9475637b6493e4ff65f91b1c3dca0e1d6f885
| 382
|
py
|
Python
|
utils/agro_utils.py
|
TiagoMarta/data_fusion_Vineyard-Segmentation
|
de54e149d36027bb314b5890ea4a1e71ba472d17
|
[
"Unlicense",
"MIT"
] | 3
|
2021-08-04T08:03:50.000Z
|
2022-03-25T11:22:09.000Z
|
utils/agro_utils.py
|
TiagoMarta/data_fusion_Vineyard-Segmentation
|
de54e149d36027bb314b5890ea4a1e71ba472d17
|
[
"Unlicense",
"MIT"
] | null | null | null |
utils/agro_utils.py
|
TiagoMarta/data_fusion_Vineyard-Segmentation
|
de54e149d36027bb314b5890ea4a1e71ba472d17
|
[
"Unlicense",
"MIT"
] | null | null | null |
import numpy as np
def NDVI(nir,red):
'''
# https://eos.com/make-an-analysis/ndvi/
Inputs: nxm numpy arrays
NIR – reflection in the near-infrared spectrum
RED – reflection in the red range of the spectrum
'''
num = nir-red
dom = nir+red
ndvi = np.divide(num,dom)
ndvi[np.isnan(ndvi)]=0 # Clean array with nan
return(ndvi)
| 25.466667
| 57
| 0.609948
| 59
| 382
| 3.983051
| 0.610169
| 0.076596
| 0.110638
| 0.13617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003636
| 0.280105
| 382
| 15
| 58
| 25.466667
| 0.843636
| 0.505236
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8abec201704fed99560906ddf5c95d5088bad9f
| 840
|
py
|
Python
|
heap/maxSlidingWindow.py
|
saai/LeetcodePythonSolutions
|
201f2054dda3f303ae6a376b40cbc7f98688322c
|
[
"MIT"
] | null | null | null |
heap/maxSlidingWindow.py
|
saai/LeetcodePythonSolutions
|
201f2054dda3f303ae6a376b40cbc7f98688322c
|
[
"MIT"
] | null | null | null |
heap/maxSlidingWindow.py
|
saai/LeetcodePythonSolutions
|
201f2054dda3f303ae6a376b40cbc7f98688322c
|
[
"MIT"
] | null | null | null |
class Solution(object):
def maxSlidingWindow(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
res = []
tmp = [] # tmp[0] always save the current windows max
for i in xrange(len(nums)):
if i < k-1: # first k-1 numbers
while tmp and nums[tmp[-1]]<nums[i]: # keep tmp[0] the max
tmp.pop()
tmp.append(i)
continue
while tmp and nums[tmp[-1]] < nums[i]: # find proper location for nums[i]
tmp.pop()
tmp.append(i)
while tmp and tmp[0]<= i-k: #pop the old max values
tmp.pop(0)
res.append(nums[tmp[0]])
return res
| 31.111111
| 85
| 0.42381
| 102
| 840
| 3.490196
| 0.411765
| 0.044944
| 0.092697
| 0.08427
| 0.224719
| 0.134831
| 0.134831
| 0.134831
| 0
| 0
| 0
| 0.019912
| 0.461905
| 840
| 27
| 86
| 31.111111
| 0.767699
| 0.22619
| 0
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8adae8d9f3f33704f82f32bb3e323260ea0ba97
| 29,151
|
py
|
Python
|
tccli/services/tsf/v20180326/help.py
|
zyh911/tencentcloud-cli
|
dfc5dbd660d4c60d265921c4edc630091478fc41
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/tsf/v20180326/help.py
|
zyh911/tencentcloud-cli
|
dfc5dbd660d4c60d265921c4edc630091478fc41
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/tsf/v20180326/help.py
|
zyh911/tencentcloud-cli
|
dfc5dbd660d4c60d265921c4edc630091478fc41
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
DESC = "tsf-2018-03-26"
INFO = {
"DeletePublicConfig": {
"params": [
{
"name": "ConfigId",
"desc": "配置项ID"
}
],
"desc": "删除公共配置项"
},
"DescribeSimpleGroups": {
"params": [
{
"name": "GroupIdList",
"desc": "部署组ID列表,不填写时查询全量"
},
{
"name": "ApplicationId",
"desc": "应用ID,不填写时查询全量"
},
{
"name": "ClusterId",
"desc": "集群ID,不填写时查询全量"
},
{
"name": "NamespaceId",
"desc": "命名空间ID,不填写时查询全量"
},
{
"name": "Limit",
"desc": "每页条数"
},
{
"name": "Offset",
"desc": "起始偏移量"
},
{
"name": "GroupId",
"desc": "部署组ID,不填写时查询全量"
},
{
"name": "SearchWord",
"desc": "模糊查询,部署组名称,不填写时查询全量"
},
{
"name": "AppMicroServiceType",
"desc": "部署组类型,精确过滤字段,M:service mesh, P:原生应用, M:网关应用"
}
],
"desc": "查询简单部署组列表"
},
"CreateGroup": {
"params": [
{
"name": "ApplicationId",
"desc": "部署组所属的应用ID"
},
{
"name": "NamespaceId",
"desc": "部署组所属命名空间ID"
},
{
"name": "GroupName",
"desc": "部署组名称"
},
{
"name": "ClusterId",
"desc": "集群ID"
},
{
"name": "GroupDesc",
"desc": "部署组描述"
}
],
"desc": "创建容器部署组"
},
"CreateCluster": {
"params": [
{
"name": "ClusterName",
"desc": "集群名称"
},
{
"name": "ClusterType",
"desc": "集群类型"
},
{
"name": "VpcId",
"desc": "私有网络ID"
},
{
"name": "ClusterCIDR",
"desc": "分配给集群容器和服务IP的CIDR"
},
{
"name": "ClusterDesc",
"desc": "集群备注"
},
{
"name": "TsfRegionId",
"desc": "集群所属TSF地域"
},
{
"name": "TsfZoneId",
"desc": "集群所属TSF可用区"
},
{
"name": "SubnetId",
"desc": "私有网络子网ID"
}
],
"desc": "创建集群"
},
"DescribePkgs": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID(只传入应用ID,返回该应用下所有软件包信息)"
},
{
"name": "SearchWord",
"desc": "查询关键字(支持根据包ID,包名,包版本号搜索)"
},
{
"name": "OrderBy",
"desc": "排序关键字(默认为\"UploadTime\":上传时间)"
},
{
"name": "OrderType",
"desc": "升序:0/降序:1(默认降序)"
},
{
"name": "Offset",
"desc": "查询起始偏移"
},
{
"name": "Limit",
"desc": "返回数量限制"
}
],
"desc": "无"
},
"ModifyContainerReplicas": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID,部署组唯一标识"
},
{
"name": "InstanceNum",
"desc": "实例数量"
}
],
"desc": "修改容器部署组实例数"
},
"DescribeConfigSummary": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID,不传入时查询全量"
},
{
"name": "SearchWord",
"desc": "查询关键字,模糊查询:应用名称,配置项名称,不传入时查询全量"
},
{
"name": "Offset",
"desc": "偏移量,默认为0"
},
{
"name": "Limit",
"desc": "每页条数,默认为20"
}
],
"desc": "查询配置汇总列表"
},
"DeployContainerGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID,分组唯一标识"
},
{
"name": "Server",
"desc": "镜像server"
},
{
"name": "TagName",
"desc": "镜像版本名称,如v1"
},
{
"name": "InstanceNum",
"desc": "实例数量"
},
{
"name": "Reponame",
"desc": "旧版镜像名,如/tsf/nginx"
},
{
"name": "CpuLimit",
"desc": "最大的 CPU 核数,对应 K8S 的 limit;不填时默认为 request 的 2 倍"
},
{
"name": "MemLimit",
"desc": "最大的内存 MiB 数,对应 K8S 的 limit;不填时默认为 request 的 2 倍"
},
{
"name": "JvmOpts",
"desc": "jvm参数"
},
{
"name": "CpuRequest",
"desc": "分配的 CPU 核数,对应 K8S 的 request"
},
{
"name": "MemRequest",
"desc": "分配的内存 MiB 数,对应 K8S 的 request"
},
{
"name": "DoNotStart",
"desc": "是否不立即启动"
},
{
"name": "RepoName",
"desc": "(优先使用)新版镜像名,如/tsf/nginx"
},
{
"name": "UpdateType",
"desc": "更新方式:0:快速更新 1:滚动更新"
},
{
"name": "UpdateIvl",
"desc": "滚动更新必填,更新间隔"
}
],
"desc": "部署容器应用"
},
"AddClusterInstances": {
"params": [
{
"name": "ClusterId",
"desc": "集群ID"
},
{
"name": "InstanceIdList",
"desc": "云主机ID列表"
},
{
"name": "OsName",
"desc": "操作系统名称"
},
{
"name": "ImageId",
"desc": "操作系统镜像ID"
},
{
"name": "Password",
"desc": "重装系统密码设置"
},
{
"name": "KeyId",
"desc": "重装系统,关联密钥设置"
},
{
"name": "SgId",
"desc": "安全组设置"
},
{
"name": "InstanceImportMode",
"desc": "云主机导入方式,虚拟机集群必填,容器集群不填写此字段,R:重装TSF系统镜像,M:手动安装agent"
}
],
"desc": "添加云主机节点至TSF集群"
},
"DescribePodInstances": {
"params": [
{
"name": "GroupId",
"desc": "实例所属groupId"
},
{
"name": "Offset",
"desc": "偏移量,取值从0开始"
},
{
"name": "Limit",
"desc": "分页个数,默认为20, 取值应为1~50"
}
],
"desc": "获取部署组实例列表"
},
"DescribeServerlessGroups": {
"params": [
{
"name": "SearchWord",
"desc": "搜索字段,模糊搜索groupName字段"
},
{
"name": "ApplicationId",
"desc": "分组所属应用ID"
},
{
"name": "OrderBy",
"desc": "排序字段,默认为 createTime字段,支持id, name, createTime"
},
{
"name": "OrderType",
"desc": "排序方式,默认为1:倒序排序,0:正序,1:倒序"
},
{
"name": "Offset",
"desc": "偏移量,取值从0开始"
},
{
"name": "Limit",
"desc": "分页个数,默认为20, 取值应为1~50"
},
{
"name": "NamespaceId",
"desc": "分组所属名字空间ID"
},
{
"name": "ClusterId",
"desc": "分组所属集群ID"
}
],
"desc": "查询Serverless部署组列表"
},
"CreateNamespace": {
"params": [
{
"name": "NamespaceName",
"desc": "命名空间名称"
},
{
"name": "ClusterId",
"desc": "集群ID"
},
{
"name": "NamespaceDesc",
"desc": "命名空间描述"
},
{
"name": "NamespaceResourceType",
"desc": "命名空间资源类型(默认值为DEF)"
},
{
"name": "NamespaceType",
"desc": "是否是全局命名空间(默认是DEF,表示普通命名空间;GLOBAL表示全局命名空间)"
},
{
"name": "NamespaceId",
"desc": "命名空间ID"
}
],
"desc": "创建命名空间"
},
"DeleteApplication": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID"
}
],
"desc": "删除应用"
},
"DeleteMicroservice": {
"params": [
{
"name": "MicroserviceId",
"desc": "微服务ID"
}
],
"desc": "删除微服务"
},
"StartGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "启动分组"
},
"DeleteNamespace": {
"params": [
{
"name": "NamespaceId",
"desc": "命名空间ID"
},
{
"name": "ClusterId",
"desc": "集群ID"
}
],
"desc": "删除命名空间"
},
"DescribeGroupInstances": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
},
{
"name": "SearchWord",
"desc": "搜索字段"
},
{
"name": "OrderBy",
"desc": "排序字段"
},
{
"name": "OrderType",
"desc": "排序类型"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "分页个数"
}
],
"desc": "查询虚拟机部署组云主机列表"
},
"DeleteConfig": {
"params": [
{
"name": "ConfigId",
"desc": "配置项ID"
}
],
"desc": "删除配置项"
},
"DescribePublicConfigSummary": {
"params": [
{
"name": "SearchWord",
"desc": "查询关键字,模糊查询:配置项名称,不传入时查询全量"
},
{
"name": "Offset",
"desc": "偏移量,默认为0"
},
{
"name": "Limit",
"desc": "每页条数,默认为20"
}
],
"desc": "查询公共配置汇总列表"
},
"DeletePkgs": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID"
},
{
"name": "PkgIds",
"desc": "需要删除的程序包ID列表"
}
],
"desc": "从软件仓库批量删除程序包。\n一次最多支持删除1000个包,数量超过1000,返回UpperDeleteLimit错误。"
},
"RevocationPublicConfig": {
"params": [
{
"name": "ConfigReleaseId",
"desc": "配置项发布ID"
}
],
"desc": "撤回已发布的公共配置"
},
"DescribePublicConfigs": {
"params": [
{
"name": "ConfigId",
"desc": "配置项ID,不传入时查询全量,高优先级"
},
{
"name": "Offset",
"desc": "偏移量,默认为0"
},
{
"name": "Limit",
"desc": "每页条数,默认为20"
},
{
"name": "ConfigIdList",
"desc": "配置项ID列表,不传入时查询全量,低优先级"
},
{
"name": "ConfigName",
"desc": "配置项名称,精确查询,不传入时查询全量"
},
{
"name": "ConfigVersion",
"desc": "配置项版本,精确查询,不传入时查询全量"
}
],
"desc": "查询公共配置项列表"
},
"DescribeSimpleClusters": {
"params": [
{
"name": "ClusterIdList",
"desc": "需要查询的集群ID列表,不填或不传入时查询所有内容"
},
{
"name": "ClusterType",
"desc": "需要查询的集群类型,不填或不传入时查询所有内容"
},
{
"name": "Offset",
"desc": "查询偏移量,默认为0"
},
{
"name": "Limit",
"desc": "分页个数,默认为20, 取值应为1~50"
},
{
"name": "SearchWord",
"desc": "对id和name进行关键词过滤"
}
],
"desc": "查询简单集群列表"
},
"CreateServerlessGroup": {
"params": [
{
"name": "ApplicationId",
"desc": "分组所属应用ID"
},
{
"name": "GroupName",
"desc": "分组名称字段,长度1~60,字母或下划线开头,可包含字母数字下划线"
},
{
"name": "NamespaceId",
"desc": "分组所属名字空间ID"
},
{
"name": "ClusterId",
"desc": "分组所属集群ID"
}
],
"desc": "创建Serverless部署组"
},
"DescribeConfigs": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID,不传入时查询全量"
},
{
"name": "ConfigId",
"desc": "配置项ID,不传入时查询全量,高优先级"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "每页条数"
},
{
"name": "ConfigIdList",
"desc": "配置项ID列表,不传入时查询全量,低优先级"
},
{
"name": "ConfigName",
"desc": "配置项名称,精确查询,不传入时查询全量"
},
{
"name": "ConfigVersion",
"desc": "配置项版本,精确查询,不传入时查询全量"
}
],
"desc": "查询配置项列表"
},
"DescribeConfig": {
"params": [
{
"name": "ConfigId",
"desc": "配置项ID"
}
],
"desc": "查询配置"
},
"DescribeMicroservices": {
"params": [
{
"name": "NamespaceId",
"desc": "命名空间ID"
},
{
"name": "SearchWord",
"desc": "搜索字段"
},
{
"name": "OrderBy",
"desc": "排序字段"
},
{
"name": "OrderType",
"desc": "排序类型"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "分页个数"
}
],
"desc": "获取微服务列表"
},
"StartContainerGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "启动容器部署组"
},
"RemoveInstances": {
"params": [
{
"name": "ClusterId",
"desc": "集群 ID"
},
{
"name": "InstanceIdList",
"desc": "云主机 ID 列表"
}
],
"desc": "从 TSF 集群中批量移除云主机节点"
},
"ExpandGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
},
{
"name": "InstanceIdList",
"desc": "扩容的机器实例ID列表"
}
],
"desc": "虚拟机部署组添加实例"
},
"DeleteGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "删除容器部署组"
},
"DescribeContainerGroupDetail": {
"params": [
{
"name": "GroupId",
"desc": "分组ID"
}
],
"desc": " 容器部署组详情"
},
"DeleteContainerGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID,分组唯一标识"
}
],
"desc": "删除容器部署组"
},
"RollbackConfig": {
"params": [
{
"name": "ConfigReleaseLogId",
"desc": "配置项发布历史ID"
},
{
"name": "ReleaseDesc",
"desc": "回滚描述"
}
],
"desc": "回滚配置"
},
"ModifyMicroservice": {
"params": [
{
"name": "MicroserviceId",
"desc": "微服务 ID"
},
{
"name": "MicroserviceDesc",
"desc": "微服务备注信息"
}
],
"desc": "修改微服务详情"
},
"CreatePublicConfig": {
"params": [
{
"name": "ConfigName",
"desc": "配置项名称"
},
{
"name": "ConfigVersion",
"desc": "配置项版本"
},
{
"name": "ConfigValue",
"desc": "配置项值,总是接收yaml格式的内容"
},
{
"name": "ConfigVersionDesc",
"desc": "配置项版本描述"
},
{
"name": "ConfigType",
"desc": "配置项类型"
}
],
"desc": "创建公共配置项"
},
"DescribeImageTags": {
"params": [
{
"name": "ApplicationId",
"desc": "应用Id"
},
{
"name": "Offset",
"desc": "偏移量,取值从0开始"
},
{
"name": "Limit",
"desc": "分页个数,默认为20, 取值应为1~100"
},
{
"name": "QueryImageIdFlag",
"desc": "不填和0:查询 1:不查询"
},
{
"name": "SearchWord",
"desc": "可用于搜索的 tag 名字"
}
],
"desc": "镜像版本列表"
},
"DescribeServerlessGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "查询Serverless部署组明细"
},
"DescribeMicroservice": {
"params": [
{
"name": "MicroserviceId",
"desc": "微服务ID"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "分页个数"
}
],
"desc": "查询微服务详情"
},
"DescribePublicConfigReleaseLogs": {
"params": [
{
"name": "NamespaceId",
"desc": "命名空间ID,不传入时查询全量"
},
{
"name": "Offset",
"desc": "偏移量,默认为0"
},
{
"name": "Limit",
"desc": "每页条数,默认为20"
}
],
"desc": "查询公共配置发布历史"
},
"DescribeApplicationAttribute": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID"
}
],
"desc": "获取应用列表其它字段,如实例数量信息等"
},
"RevocationConfig": {
"params": [
{
"name": "ConfigReleaseId",
"desc": "配置项发布ID"
}
],
"desc": "撤回已发布的配置"
},
"ReleasePublicConfig": {
"params": [
{
"name": "ConfigId",
"desc": "配置ID"
},
{
"name": "NamespaceId",
"desc": "命名空间ID"
},
{
"name": "ReleaseDesc",
"desc": "发布描述"
}
],
"desc": "发布公共配置"
},
"ReleaseConfig": {
"params": [
{
"name": "ConfigId",
"desc": "配置ID"
},
{
"name": "GroupId",
"desc": "部署组ID"
},
{
"name": "ReleaseDesc",
"desc": "发布描述"
}
],
"desc": "发布配置"
},
"DescribeReleasedConfig": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "查询group发布的配置"
},
"CreateContainGroup": {
"params": [
{
"name": "ApplicationId",
"desc": "分组所属应用ID"
},
{
"name": "NamespaceId",
"desc": "分组所属命名空间ID"
},
{
"name": "GroupName",
"desc": "分组名称字段,长度1~60,字母或下划线开头,可包含字母数字下划线"
},
{
"name": "InstanceNum",
"desc": "实例数量"
},
{
"name": "AccessType",
"desc": "0:公网 1:集群内访问 2:NodePort"
},
{
"name": "ProtocolPorts",
"desc": "数组对象,见下方定义"
},
{
"name": "ClusterId",
"desc": "集群ID"
},
{
"name": "CpuLimit",
"desc": "最大分配 CPU 核数,对应 K8S limit"
},
{
"name": "MemLimit",
"desc": "最大分配内存 MiB 数,对应 K8S limit"
},
{
"name": "GroupComment",
"desc": "分组备注字段,长度应不大于200字符"
},
{
"name": "UpdateType",
"desc": "更新方式:0:快速更新 1:滚动更新"
},
{
"name": "UpdateIvl",
"desc": "滚动更新必填,更新间隔"
},
{
"name": "CpuRequest",
"desc": "初始分配的 CPU 核数,对应 K8S request"
},
{
"name": "MemRequest",
"desc": "初始分配的内存 MiB 数,对应 K8S request"
}
],
"desc": "创建容器部署组"
},
"DescribePublicConfigReleases": {
"params": [
{
"name": "ConfigName",
"desc": "配置项名称,不传入时查询全量"
},
{
"name": "NamespaceId",
"desc": "命名空间ID,不传入时查询全量"
},
{
"name": "Limit",
"desc": "每页条数"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "ConfigId",
"desc": "配置项ID,不传入时查询全量"
}
],
"desc": "查询公共配置发布信息"
},
"DescribeGroups": {
"params": [
{
"name": "SearchWord",
"desc": "搜索字段"
},
{
"name": "ApplicationId",
"desc": "应用ID"
},
{
"name": "OrderBy",
"desc": "排序字段"
},
{
"name": "OrderType",
"desc": "排序方式"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "分页个数"
},
{
"name": "NamespaceId",
"desc": "命名空间ID"
},
{
"name": "ClusterId",
"desc": "集群ID"
},
{
"name": "GroupResourceTypeList",
"desc": "部署组资源类型列表"
}
],
"desc": "获取虚拟机部署组列表"
},
"DescribeSimpleNamespaces": {
"params": [
{
"name": "NamespaceIdList",
"desc": "命名空间ID列表,不传入时查询全量"
},
{
"name": "ClusterId",
"desc": "集群ID,不传入时查询全量"
},
{
"name": "Limit",
"desc": "每页条数"
},
{
"name": "Offset",
"desc": "起始偏移量"
},
{
"name": "NamespaceId",
"desc": "命名空间ID,不传入时查询全量"
},
{
"name": "NamespaceResourceTypeList",
"desc": "查询资源类型列表"
},
{
"name": "SearchWord",
"desc": "通过id和name进行过滤"
},
{
"name": "NamespaceTypeList",
"desc": "查询的命名空间类型列表"
},
{
"name": "NamespaceName",
"desc": "通过命名空间名精确过滤"
},
{
"name": "IsDefault",
"desc": "通过是否是默认命名空间过滤,不传表示拉取全部命名空间。0:默认,命名空间。1:非默认命名空间"
}
],
"desc": "查询简单命名空间列表 "
},
"DescribeConfigReleaseLogs": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID,不传入时查询全量"
},
{
"name": "Offset",
"desc": "偏移量,默认为0"
},
{
"name": "Limit",
"desc": "每页条数,默认为20"
},
{
"name": "NamespaceId",
"desc": "命名空间ID,不传入时查询全量"
},
{
"name": "ClusterId",
"desc": "集群ID,不传入时查询全量"
},
{
"name": "ApplicationId",
"desc": "应用ID,不传入时查询全量"
}
],
"desc": "查询配置发布历史"
},
"CreateMicroservice": {
"params": [
{
"name": "NamespaceId",
"desc": "命名空间ID"
},
{
"name": "MicroserviceName",
"desc": "微服务名称"
},
{
"name": "MicroserviceDesc",
"desc": "微服务描述信息"
}
],
"desc": "新增微服务"
},
"DescribeDownloadInfo": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID"
},
{
"name": "PkgId",
"desc": "程序包ID"
}
],
"desc": "TSF上传的程序包存放在腾讯云对象存储(COS)中,通过该API可以获取从COS下载程序包需要的信息,包括包所在的桶、存储路径、鉴权信息等,之后使用COS API(或SDK)进行下载。\nCOS相关文档请查阅:https://cloud.tencent.com/document/product/436"
},
"DeployServerlessGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
},
{
"name": "PkgId",
"desc": "程序包ID"
},
{
"name": "Memory",
"desc": "所需实例内存大小,取值为 1Gi 2Gi 4Gi 8Gi 16Gi,缺省为 1Gi,不传表示维持原态"
},
{
"name": "InstanceRequest",
"desc": "要求最小实例数,取值范围 [1, 4],缺省为 1,不传表示维持原态"
},
{
"name": "StartupParameters",
"desc": "部署组启动参数,不传表示维持原态"
}
],
"desc": "部署Serverless应用"
},
"DescribeGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "查询虚拟机部署组详情"
},
"CreateConfig": {
"params": [
{
"name": "ConfigName",
"desc": "配置项名称"
},
{
"name": "ConfigVersion",
"desc": "配置项版本"
},
{
"name": "ConfigValue",
"desc": "配置项值"
},
{
"name": "ApplicationId",
"desc": "应用ID"
},
{
"name": "ConfigVersionDesc",
"desc": "配置项版本描述"
},
{
"name": "ConfigType",
"desc": "配置项值类型"
}
],
"desc": "创建配置项"
},
"DescribeContainerGroups": {
"params": [
{
"name": "SearchWord",
"desc": "搜索字段,模糊搜索groupName字段"
},
{
"name": "ApplicationId",
"desc": "分组所属应用ID"
},
{
"name": "OrderBy",
"desc": "排序字段,默认为 createTime字段,支持id, name, createTime"
},
{
"name": "OrderType",
"desc": "排序方式,默认为1:倒序排序,0:正序,1:倒序"
},
{
"name": "Offset",
"desc": "偏移量,取值从0开始"
},
{
"name": "Limit",
"desc": "分页个数,默认为20, 取值应为1~50"
},
{
"name": "ClusterId",
"desc": "集群ID"
},
{
"name": "NamespaceId",
"desc": "命名空间 ID"
}
],
"desc": "容器部署组列表"
},
"DeleteImageTags": {
"params": [
{
"name": "ImageTags",
"desc": "镜像版本数组"
}
],
"desc": "批量删除镜像版本"
},
"DescribeClusterInstances": {
"params": [
{
"name": "ClusterId",
"desc": "集群ID"
},
{
"name": "SearchWord",
"desc": "搜索字段"
},
{
"name": "OrderBy",
"desc": "排序字段"
},
{
"name": "OrderType",
"desc": "排序类型"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "分页个数"
}
],
"desc": "查询集群实例"
},
"CreateApplication": {
"params": [
{
"name": "ApplicationName",
"desc": "应用名称"
},
{
"name": "ApplicationType",
"desc": "应用类型,V:虚拟机应用;C:容器应用;S:serverless应用"
},
{
"name": "MicroserviceType",
"desc": "应用微服务类型,M:service mesh应用;N:普通应用;G:网关应用"
},
{
"name": "ApplicationDesc",
"desc": "应用描述"
},
{
"name": "ApplicationLogConfig",
"desc": "应用日志配置项,废弃参数"
},
{
"name": "ApplicationResourceType",
"desc": "应用资源类型,废弃参数"
},
{
"name": "ApplicationRuntimeType",
"desc": "应用runtime类型"
}
],
"desc": "创建应用"
},
"StopGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "停止虚拟机部署组"
},
"ShrinkGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "下线部署组所有机器实例"
},
"DeployGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
},
{
"name": "PkgId",
"desc": "程序包ID"
},
{
"name": "StartupParameters",
"desc": "部署组启动参数"
}
],
"desc": "部署虚拟机部署组应用"
},
"DescribeApplications": {
"params": [
{
"name": "SearchWord",
"desc": "搜索字段"
},
{
"name": "OrderBy",
"desc": "排序字段"
},
{
"name": "OrderType",
"desc": "排序类型"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "分页个数"
},
{
"name": "ApplicationType",
"desc": "应用类型"
},
{
"name": "MicroserviceType",
"desc": "应用的微服务类型"
},
{
"name": "ApplicationResourceTypeList",
"desc": "应用资源类型数组"
}
],
"desc": "获取应用列表"
},
"DeleteServerlessGroup": {
"params": [
{
"name": "GroupId",
"desc": "groupId,分组唯一标识"
}
],
"desc": "删除Serverless部署组"
},
"DescribeUploadInfo": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID"
},
{
"name": "PkgName",
"desc": "程序包名"
},
{
"name": "PkgVersion",
"desc": "程序包版本"
},
{
"name": "PkgType",
"desc": "程序包类型"
},
{
"name": "PkgDesc",
"desc": "程序包介绍"
}
],
"desc": "TSF会将软件包上传到腾讯云对象存储(COS)。调用此接口获取上传信息,如目标地域,桶,包Id,存储路径,鉴权信息等,之后请使用COS API(或SDK)进行上传。\nCOS相关文档请查阅:https://cloud.tencent.com/document/product/436"
},
"DescribeConfigReleases": {
"params": [
{
"name": "ConfigName",
"desc": "配置项名称,不传入时查询全量"
},
{
"name": "GroupId",
"desc": "部署组ID,不传入时查询全量"
},
{
"name": "NamespaceId",
"desc": "命名空间ID,不传入时查询全量"
},
{
"name": "ClusterId",
"desc": "集群ID,不传入时查询全量"
},
{
"name": "Limit",
"desc": "每页条数"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "ConfigId",
"desc": "配置ID,不传入时查询全量"
},
{
"name": "ApplicationId",
"desc": "应用ID,不传入时查询全量"
}
],
"desc": "查询配置发布信息"
},
"StopContainerGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "停止容器部署组"
},
"DescribeSimpleApplications": {
"params": [
{
"name": "ApplicationIdList",
"desc": "应用ID列表"
},
{
"name": "ApplicationType",
"desc": "应用类型"
},
{
"name": "Limit",
"desc": "每页条数"
},
{
"name": "Offset",
"desc": "起始偏移量"
},
{
"name": "MicroserviceType",
"desc": "微服务类型"
},
{
"name": "ApplicationResourceTypeList",
"desc": "资源类型数组"
},
{
"name": "SearchWord",
"desc": "通过id和name进行关键词过滤"
}
],
"desc": "查询简单应用列表"
},
"DescribePublicConfig": {
"params": [
{
"name": "ConfigId",
"desc": "需要查询的配置项ID"
}
],
"desc": "查询公共配置(单条)"
},
"ModifyContainerGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
},
{
"name": "AccessType",
"desc": "0:公网 1:集群内访问 2:NodePort"
},
{
"name": "ProtocolPorts",
"desc": "ProtocolPorts数组"
},
{
"name": "UpdateType",
"desc": "更新方式:0:快速更新 1:滚动更新"
},
{
"name": "UpdateIvl",
"desc": "更新间隔,单位秒"
}
],
"desc": "修改容器部署组"
},
"DescribeApplication": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID"
}
],
"desc": "获取应用详情"
},
"ShrinkInstances": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
},
{
"name": "InstanceIdList",
"desc": "下线机器实例ID列表"
}
],
"desc": "虚拟机部署组下线实例"
},
"ModifyUploadInfo": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID"
},
{
"name": "PkgId",
"desc": "调用DescribeUploadInfo接口时返回的软件包ID"
},
{
"name": "Result",
"desc": "COS返回上传结果(默认为0:成功,其他值表示失败)"
},
{
"name": "Md5",
"desc": "程序包MD5"
},
{
"name": "Size",
"desc": "程序包大小(单位字节)"
}
],
"desc": "调用该接口和COS的上传接口后,需要调用此接口更新TSF中保存的程序包状态。\n调用此接口完成后,才标志上传包流程结束。"
},
"AddInstances": {
"params": [
{
"name": "ClusterId",
"desc": "集群ID"
},
{
"name": "InstanceIdList",
"desc": "云主机ID列表"
},
{
"name": "OsName",
"desc": "操作系统名称"
},
{
"name": "ImageId",
"desc": "操作系统镜像ID"
},
{
"name": "Password",
"desc": "重装系统密码设置"
},
{
"name": "KeyId",
"desc": "重装系统,关联密钥设置"
},
{
"name": "SgId",
"desc": "安全组设置"
},
{
"name": "InstanceImportMode",
"desc": "云主机导入方式,虚拟机集群必填,容器集群不填写此字段,R:重装TSF系统镜像,M:手动安装agent"
}
],
"desc": "添加云主机节点至TSF集群"
}
}
| 18.567516
| 165
| 0.392028
| 1,844
| 29,151
| 6.197397
| 0.284707
| 0.064753
| 0.032814
| 0.038502
| 0.510851
| 0.45861
| 0.35973
| 0.322979
| 0.287714
| 0.254725
| 0
| 0.007365
| 0.41309
| 29,151
| 1,570
| 166
| 18.567516
| 0.660588
| 0.00072
| 0
| 0.379222
| 0
| 0.001275
| 0.404511
| 0.056061
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.001275
| 0.001275
| 0
| 0.001275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8b067f63a4c14a9b78ac5bf7aace3e8420c7a16
| 1,729
|
py
|
Python
|
workflow_scripts/test_models.py
|
jcwchen/models
|
2fd86acdd51037570e1daefa03873237b76bd5a6
|
[
"MIT"
] | 1
|
2020-12-19T14:46:23.000Z
|
2020-12-19T14:46:23.000Z
|
workflow_scripts/test_models.py
|
sumit6597/models
|
2fd86acdd51037570e1daefa03873237b76bd5a6
|
[
"MIT"
] | null | null | null |
workflow_scripts/test_models.py
|
sumit6597/models
|
2fd86acdd51037570e1daefa03873237b76bd5a6
|
[
"MIT"
] | 1
|
2021-08-08T11:47:35.000Z
|
2021-08-08T11:47:35.000Z
|
import onnx
from pathlib import Path
import subprocess
import sys
def run_lfs_install():
result = subprocess.run(['git', 'lfs', 'install'], cwd=cwd_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print("Git LFS install completed with return code=" + str(result.returncode))
def pull_lfs_file(file_name):
result = subprocess.run(['git', 'lfs', 'pull', '--include', file_name, '--exclude', '\"\"'], cwd=cwd_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print("LFS pull completed with return code=" + str(result.returncode))
cwd_path = Path.cwd()
# obtain list of added or modified files in this PR
obtain_diff = subprocess.Popen(['git', 'diff', '--name-only', '--diff-filter=AM', 'origin/master', 'HEAD'],
cwd=cwd_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutput, stderroutput = obtain_diff.communicate()
diff_list = stdoutput.split()
# identify list of changed onnx models in model Zoo
model_list = [str(model).replace("b'","").replace("'", "") for model in diff_list if ".onnx" in str(model)]
# run lfs install before starting the tests
run_lfs_install()
print("\n=== Running ONNX Checker on added models ===\n")
# run checker on each model
failed_models = []
for model_path in model_list:
model_name = model_path.split('/')[-1]
print("Testing:", model_name)
try:
pull_lfs_file(model_path)
model = onnx.load(model_path)
onnx.checker.check_model(model)
print("Model", model_name, "has been successfully checked!")
except Exception as e:
print(e)
failed_models.append(model_path)
if len(failed_models) != 0:
print(str(len(failed_models)) +" models failed onnx checker.")
sys.exit(1)
print(len(model_list), "model(s) checked.")
| 35.285714
| 156
| 0.707924
| 248
| 1,729
| 4.798387
| 0.354839
| 0.070588
| 0.032773
| 0.040336
| 0.247059
| 0.205042
| 0.205042
| 0.134454
| 0.134454
| 0.092437
| 0
| 0.002024
| 0.142857
| 1,729
| 48
| 157
| 36.020833
| 0.800945
| 0.096588
| 0
| 0
| 0
| 0
| 0.205523
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.117647
| 0
| 0.176471
| 0.235294
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8b5d127b254896268904720f95e3739d411d338
| 1,374
|
py
|
Python
|
src/classifier/utils/create_data.py
|
maxscheijen/dutch-sentiment-classifier
|
6b3149d906710fadc0b104a9f79ca389a7f5cba3
|
[
"Apache-2.0"
] | null | null | null |
src/classifier/utils/create_data.py
|
maxscheijen/dutch-sentiment-classifier
|
6b3149d906710fadc0b104a9f79ca389a7f5cba3
|
[
"Apache-2.0"
] | null | null | null |
src/classifier/utils/create_data.py
|
maxscheijen/dutch-sentiment-classifier
|
6b3149d906710fadc0b104a9f79ca389a7f5cba3
|
[
"Apache-2.0"
] | null | null | null |
import glob
import pandas as pd
from tqdm import tqdm
from classifier import config
class Dataset:
"""Create dataset class"""
def __init__(self):
# Get all txt files
self.paths = sorted(glob.glob("data/*/*/*.txt"))
self.dataframe = None
def load_data(self):
dfs = [] # initialize list for dataframes
# Loop over all txt files
for filepath in tqdm(self.paths):
# Read text files
with open(filepath, "r") as f:
text = f.read()
# Create label from path
if "pos" in filepath:
sentiment = "positief"
else:
sentiment = "negatief"
# Append dataframe to list
dfs.append(pd.DataFrame({"text": [text],
"sentiment": [sentiment]}))
# Concat DataFrames
self.dataframe = pd.concat(dfs).reset_index(drop=True)
def save_data(self):
# Create train and test split
train_data = self.dataframe.sample(frac=config.SPLIT_SIZE,
random_state=config.SEED)
test_data = self.dataframe.iloc[train_data.index]
# Save data
train_data.to_csv(config.TRAIN_DATA, index=None)
test_data.to_csv(config.TEST_DATA, index=None)
| 28.625
| 68
| 0.54294
| 154
| 1,374
| 4.727273
| 0.441558
| 0.071429
| 0.03022
| 0.041209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.365357
| 1,374
| 47
| 69
| 29.234043
| 0.834862
| 0.15575
| 0
| 0
| 0
| 0
| 0.041012
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.153846
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8b68cb341dae475cc25f2d74d8dcd06d0f58623
| 1,682
|
py
|
Python
|
algorithms/intervals.py
|
calebperkins/algorithms
|
9f4a029261160e6b12b8bedd53f0a0ebf541237a
|
[
"MIT"
] | null | null | null |
algorithms/intervals.py
|
calebperkins/algorithms
|
9f4a029261160e6b12b8bedd53f0a0ebf541237a
|
[
"MIT"
] | null | null | null |
algorithms/intervals.py
|
calebperkins/algorithms
|
9f4a029261160e6b12b8bedd53f0a0ebf541237a
|
[
"MIT"
] | null | null | null |
import collections
Interval = collections.namedtuple("Interval", "start, end")
class AugmentedTree:
"""
An augmented tree for querying intervals. The nodes are ordered by the start interval. The high attribute is the
maximum end interval of the node and any of its children.
This tree could become imbalanced. More advanced augmented trees should be a based on a self-balancing BST.
"""
def __init__(self, interval):
self.interval = interval
self.high = interval.end
self.left = None
self.right = None
def overlaps(self, interval):
i = self.interval
return i.end >= interval.start and i.start <= interval.end
def intersecting(self, interval):
s = [self]
while s:
n = s.pop()
if n.high < interval.start:
continue
if n.overlaps(interval):
yield n.interval
if n.right and n.right.interval.start <= interval.end:
s.append(n.right)
if n.left:
s.append(n.left)
def __lt__(self, other):
return self.interval.start < other.interval.start
def add(self, interval):
# Create a new node and add it to a leaf
m = AugmentedTree(interval)
n = self
while True:
n.high = max(n.high, m.high)
if m < n:
if n.left:
n = n.left
else:
n.left = m
return
else:
if n.right:
n = n.right
else:
n.right = m
return
| 29
| 116
| 0.521998
| 202
| 1,682
| 4.306931
| 0.361386
| 0.096552
| 0.036782
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.399524
| 1,682
| 57
| 117
| 29.508772
| 0.861386
| 0.189655
| 0
| 0.166667
| 0
| 0
| 0.013443
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.119048
| false
| 0
| 0.02381
| 0.02381
| 0.261905
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8bd12730bd20c4875906f949b15caeb99026f0f
| 4,874
|
py
|
Python
|
utils/visualization.py
|
yigitozgumus/Polimi_Thesis
|
711c1edcf1fdb92fc6c15bf5ab1be141c13995c3
|
[
"MIT"
] | 3
|
2019-07-27T14:00:42.000Z
|
2020-01-17T17:07:51.000Z
|
utils/visualization.py
|
yigitozgumus/Polimi_Thesis
|
711c1edcf1fdb92fc6c15bf5ab1be141c13995c3
|
[
"MIT"
] | null | null | null |
utils/visualization.py
|
yigitozgumus/Polimi_Thesis
|
711c1edcf1fdb92fc6c15bf5ab1be141c13995c3
|
[
"MIT"
] | 4
|
2019-10-22T02:58:26.000Z
|
2020-10-06T09:59:26.000Z
|
import numpy as np
import matplotlib.pyplot as plt
def show_anomalies(patch_array):
num_figs = len(patch_array)
fig = plt.figure(figsize=(num_figs * 30, 30))
plt.tight_layout()
for i in range(len(patch_array)):
plt.subplot(num_figs, 1, i + 1)
plt.imshow(patch_array[i])
plt.axis("off")
def make_3_channel(image):
return np.array([[[s, s, s] for s in r] for r in image], dtype="u1")
def add_color_red_2d(image):
#return np.array([[[0.7, s, s] for s in r] for r in image], dtype="u1")
return np.array([[[s, 0, 0] for s in r] for r in image], dtype="u1")
def add_color_green_2d(image):
#return np.array([[[0.4, s, 0.9] for s in r] for r in image], dtype="u1")
return np.array([[[0, s, 0] for s in r] for r in image], dtype="u1")
def add_color_blue_2d(image):
#return np.array([[[s, 0.3, 0.3] for s in r] for r in image], dtype="u1")
return np.array([[[0, 0, s] for s in r] for r in image], dtype="u1")
def paint_image_anomalies(image_list, true_labels, pred_labels):
imgs = []
h_turns = 21
w_turns = 32
for img in image_list:
image = make_3_channel(img)
top = 0
left = 0
h, w = image.shape[:2]
for adv_h in range(h_turns):
for adv_w in range(w_turns):
tag = img_tag[adv_h * 32 : (adv_h + 1) * 32, adv_w * 32 : (adv_w + 1) * 32]
anomaly = np.sum(tag)
if anomaly:
mask = np.array(tag == 255)
image[adv_h * 32 : (adv_h + 1) * 32, adv_w * 32 : (adv_w + 1) * 32, 0][
mask
] = 255
imgs.append(image)
return imgs
def connect_imgs(imgs):
patch = np.squeeze(imgs[0])
for i in range(1, len(imgs)):
patch = np.vstack((patch, np.squeeze(imgs[i])))
return patch
def paint_anomalies(num, patches, scores_pred, tl_bool, statistics=False, show=False):
patch_image = np.zeros(2064384, dtype=int)
patch_image = patch_image.reshape(672, 1024, 3)
# plt.imshow(patch_image)
tests = patches[672 * num : 672 * (num + 1)]
preds = scores_pred[672 * num : 672 * (num + 1)]
tl_bool = tl_bool.astype(bool)
real = tl_bool[672 * num : 672 * (num + 1)]
height = 21
width = 32
trues = 0
fps = 0
fns = 0
for i in range(height):
for j in range(width):
index = j + (width * i)
if preds[index] and real[index]:
# make it green, correct_guess
add = add_color_green_2d(tests[index] * 255)
trues += 1
elif preds[index]: # false positive
add = add_color_red_2d(tests[index] * 255)
fps += 1
elif real[index]: # False Negative
add = add_color_blue_2d(tests[index] * 255)
fns += 1
else:
add = make_3_channel(tests[index] * 255)
patch_image[i * 32 : (i + 1) * 32, j * 32 : (j + 1) * 32] += add
if statistics:
print("true predictions: {}".format(trues))
print("False Positives: {}".format(fps))
print("False Negatives: {}".format(fns))
if show:
plt.figure(figsize=(15, 15))
plt.imshow(patch_image)
return
return patch_image
def paint_anomalies_pixelwise(num, patches, scores_pred, true_scores, statistics=False, show=False):
patch_image = np.zeros(1972098, dtype=int)
patch_image = patch_image.reshape(662, 993, 3)
tests = patches[660345 * num : 660345 * (num + 1)]
preds = scores_pred[660345 * num : 660345 * (num + 1)]
true_scores = true_scores.astype(bool)
real = true_scores[660345 * num : 660345 * (num + 1)]
height = 662
width = 993
trues, fps, fns = 0, 0, 0
for h in range(height):
for w in range(width):
index = w + (width * h)
if preds[index] and real[index]:
add = add_color_green_2d(tests[index][15:16, 16:17] * 255)
trues += 1
elif preds[index]:
add = add_color_red_2d(tests[index][15:16, 16:17] * 255)
fps += 1
elif real[index]:
add = add_color_blue_2d(tests[index][15:16, 16:17] * 255)
fns += 1
else:
add = make_3_channel(tests[index][15:16, 16:17] * 255)
patch_image[h : (h + 1), w : (w + 1)] += add
if statistics:
print("true predictions: {}".format(trues))
print("False Positives: {}".format(fps))
print("False Negatives: {}".format(fns))
if show:
plt.figure(figsize=(15, 15))
plt.imshow(patch_image)
return
return patch_image
def compute_predictions(scores, percentile):
per = np.percentile(scores, percentile)
predictions = scores >= per
return predictions
| 34.083916
| 100
| 0.55437
| 712
| 4,874
| 3.660112
| 0.172753
| 0.049885
| 0.034919
| 0.018803
| 0.55449
| 0.48043
| 0.415196
| 0.335764
| 0.278972
| 0.278972
| 0
| 0.079128
| 0.312885
| 4,874
| 142
| 101
| 34.323944
| 0.699015
| 0.060936
| 0
| 0.273504
| 0
| 0
| 0.02779
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08547
| false
| 0
| 0.017094
| 0.034188
| 0.179487
| 0.051282
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8c0726d584812a525a610e545b5c0960badaf74
| 18,223
|
py
|
Python
|
tests/unit/core/tensorrt_loaders.py
|
ParikhKadam/NeMo
|
ee11f7c4666d410d91f9da33c61f4819ea625013
|
[
"Apache-2.0"
] | 10
|
2020-03-17T08:32:06.000Z
|
2021-04-19T19:03:50.000Z
|
tests/unit/core/tensorrt_loaders.py
|
dcmartin/NeMo
|
d2120a40bf23d3e38ff5677c2685c712f297e6b1
|
[
"Apache-2.0"
] | 1
|
2020-06-11T00:54:42.000Z
|
2020-06-11T00:54:42.000Z
|
tests/unit/core/tensorrt_loaders.py
|
dcmartin/NeMo
|
d2120a40bf23d3e38ff5677c2685c712f297e6b1
|
[
"Apache-2.0"
] | 3
|
2020-03-10T05:10:07.000Z
|
2020-12-08T01:33:35.000Z
|
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# =============================================================================
# Copyright 2020 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import time
import warnings
from collections import OrderedDict
import numpy as np
import onnx
import tensorrt as trt
from .tensorrt_format import FormatManager
from .tensorrt_runner import (
DEFAULT_SHAPE_VALUE,
TRT_LOGGER,
TensorRTRunnerV2,
default_value,
find_in_dict,
get_input_metadata_from_profile,
is_dimension_dynamic,
is_shape_dynamic,
is_valid_shape_override,
send_on_queue,
write_timestamped,
)
from nemo import logging, logging_mode
def set_onnx_logging_level(sev):
if sev >= logging.INFO:
warnings.filterwarnings("ignore")
class BaseDataLoader(object):
"""
Responsible for fetching or generting input data for runners.
"""
def __call__(self, index, input_metadata, input_example=None):
"""
Fetches or generates inputs.
Args:
index (int): The index of inputs to fetch. For any given index, the inputs should always be the same.
input_metadata (OrderedDict[str, Tuple[np.dtype, Tuple[int]]]): Mapping of input names to their data types and shapes.
Returns:
OrderedDict[str, np.ndarray]: Mapping of input names to numpy buffers containing data.
"""
raise NotImplementedError("BaseDataLoader is an abstract class")
class DefaultDataLoader(BaseDataLoader):
def __init__(
self,
seed=None,
default_shape_value=None,
default_shapes=None,
int_min=None,
int_max=None,
float_min=None,
float_max=None,
):
"""
Optional Args:
seed (int): The seed to use when generating random inputs.
default_shape_value (int): The default value to use when a dimension is dynamic.
default_shapes (Dict[str, Tuple[int]]): A mapping of input names to their corresponding shapes.
"""
self.seed = default_value(seed, int(time.time()))
self.default_shapes = default_value(default_shapes, {})
self.default_shape_value = default_value(default_shape_value, DEFAULT_SHAPE_VALUE)
self.int_min = default_value(int_min, 1)
self.int_max = default_value(int_max, 25)
self.float_min = default_value(float_min, -1.0)
self.float_max = default_value(float_max, 1.0)
def __call__(self, index, input_metadata, input_example=None):
logging.debug("Updating seed to: {:}".format(self.seed + index))
rng = np.random.RandomState(self.seed + index)
buffers = OrderedDict()
i = 0
for name, (dtype, shape) in input_metadata.items():
if input_example is not None and (not isinstance(input_example, tuple) or i < len(input_example)):
if isinstance(input_example, tuple):
static_shape = input_example[i].shape
elif isinstance(input_example, OrderedDict):
static_shape = tuple(input_example.values())[i].shape
else:
static_shape = [tuple(input_example.shape)]
elif is_shape_dynamic(shape):
if name in self.default_shapes:
static_shape = self.default_shapes[name]
else:
static_shape = [self.default_shape_value if is_dimension_dynamic(elem) else elem for elem in shape]
if static_shape != shape:
if not is_valid_shape_override(static_shape, shape):
logging.critical(
"Cannot override original shape: {:}, for input: {:} to {:}".format(
shape, name, static_shape
)
)
logging.warning(
"Input: {:}: Adjusted dynamic shape: {:} to: {:}".format(name, shape, static_shape),
mode=logging_mode.ONCE,
)
else:
if name in self.default_shapes:
logging.warning(
"Will not override static shape: {:}, for input: {:}".format(shape, name),
mode=logging_mode.ONCE,
)
static_shape = shape
if input_example is not None and (not isinstance(input_example, tuple) or i < len(input_example)):
if isinstance(input_example, OrderedDict):
buffers[name] = list(input_example.values())[i].cpu()
else:
buffers[name] = input_example[i].cpu() if isinstance(input_example, tuple) else input_example.cpu()
elif np.issubdtype(dtype, np.integer):
buffers[name] = rng.randint(low=self.int_min, high=self.int_max, size=static_shape, dtype=dtype)
elif np.issubdtype(dtype, np.bool_):
buffers[name] = rng.randint(low=0, high=2, size=static_shape).astype(dtype)
else:
buffers[name] = (
rng.random_sample(size=static_shape) * (self.float_max - self.float_min) + self.float_min
).astype(dtype)
buffers[name] = np.array(
buffers[name]
) # To handle scalars. The above functions return a float if shape is ().
# If the shape is 1D, and has a length equal to the rank of the provided default shape, it is
# likely to be a TRT shape tensor, and so should be overriden such that it's value (not shape) is the default shape.
is_shape_tensor = (
(not is_shape_dynamic(shape))
and (name in self.default_shapes)
and (len(shape) == 1)
and (shape[0] == len(self.default_shapes[name]))
)
if is_shape_tensor:
buffers[name] = np.array(self.default_shapes[name], dtype=dtype)
logging.warning(
"Assuming {:} is a shape tensor. Setting to: {:}".format(name, buffers[name]),
mode=logging_mode.ONCE,
)
i = i + 1
return buffers
# Caches data loaded by a DataLoader for use across multiple runners.
class DataLoaderCache(object):
def __init__(self, data_loader):
self.data_loader = data_loader
self.cache = {} # Dict[int, OrderedDict[str, np.ndarray]]
def load(self, iteration, input_metadata, input_example=None):
"""
Load the specified iteration from the cache if present, or generate using the data loader.
Args:
iteration (int): The iteration whose data to retrieve.
input_metadata (OrderedDict[str, Tuple[np.dtype, Tuple[int]]]): Input Metadata, including shape and type information. The loader may attempt to match input_metadata when data in the cache does not exactly match a new set of input_metadata.
"""
if iteration not in self.cache:
logging.debug("Iteration {:} not found in cache, generating new buffers for all inputs".format(iteration))
self.cache[iteration] = self.data_loader(iteration, input_metadata, input_example)
if self.cache[iteration] is None:
logging.critical(
"Received no data from data_loader(iteration, input_metadata) for input_metadata: {:}".format(
input_metadata
)
)
else:
logging.info("Found iteration {:} in cache".format(iteration))
feed_dict = OrderedDict()
for index, (name, (dtype, shape)) in enumerate(input_metadata.items()):
cached_name = find_in_dict(name, self.cache[iteration], index)
if cached_name is None:
logging.warning("Could not find input: {:} in cache, regenerating buffers".format(name))
self.cache[iteration] = self.data_loader(iteration, input_metadata, input_example)
cached_name = name
buffer = self.cache[iteration][cached_name]
if dtype != buffer.dtype:
logging.warning(
"Cached buffer data type does not match data type for input: {:}. Note: Cached type: {:}, input type: {:}. Attempting to cast".format(
name, buffer.dtype, dtype
)
)
buffer = buffer.astype(dtype)
if not is_valid_shape_override(buffer.shape, shape):
logging.warning(
"Cached buffer shape does not match shape for input. Note: Cached shape: {:}, input shape: {:}.".format(
buffer.shape, shape
)
)
# Try to permute the shape to match
try:
perm = FormatManager.permutation(
FormatManager.deduce_format(buffer.shape), FormatManager.deduce_format(shape)
)
new_shape = FormatManager.convert(tuple(buffer.shape), FormatManager.deduce_format(shape))
logging.warning(
"Attempting to permute shape: {:} using permutation {:}. New shape: {:}".format(
buffer.shape, perm, new_shape
)
)
buffer = np.transpose(buffer, perm)
except NotImplementedError as err:
# If the FormatManager does not recognize the format, skip permutation.
logging.info("Skipping permutation due to {:}".format(err))
except KeyError as err:
# If the FormatManager cannot generate the permutation for the format combination, skip permutation.
logging.info("Skipping permutation due to {:}".format(err))
feed_dict[name] = buffer
return feed_dict
class BaseModelLoader(object):
"""
Loads a model for a runner.
"""
def __call__(self):
"""
Load the model.
Returns:
A model usable by the runner. The return type is dependent on the runner the loader has been implemented for.
"""
raise NotImplementedError("BaseModelLoader is an abstract class")
class BaseOnnxModelLoader(BaseModelLoader):
def check(self, model):
try:
onnx.checker.check_model(model)
logging.debug("ONNX Checker Passed")
except onnx.checker.ValidationError as err:
logging.warning("ONNX Checker exited with an error: {:}".format(err))
return model
# ONNX loaders return ONNX models in memory.
class OnnxFileLoader(BaseOnnxModelLoader):
def __init__(self, path):
"""
Loads an ONNX model from a file.
Args:
path (str): The path from which to load the model.
"""
self.path = path
def __call__(self):
logging.info("Loading {:}".format(self.path))
return self.check(onnx.load(self.path))
def __str__(self):
return "ONNX Model Loader: {:}".format(self.path)
def __repr__(self):
return self.__str__()
class OnnxNetworkLoader(BaseModelLoader):
def __init__(self, onnx_loader, explicit_precision=None):
"""
Parses an ONNX model to create an engine.
Args:
onnx_loader (Callable() -> onnx.ModelProto): A loader that can supply an ONNX model.
Optional Args:
explicit_precision (bool): Whether to create the network with explicit precision enabled.
"""
self.onnx_loader = onnx_loader
self.explicit_precision = default_value(explicit_precision, False)
def __call__(self):
network = TensorRTRunnerV2.create_network(explicit_precision=self.explicit_precision)
parser = trt.OnnxParser(network, TRT_LOGGER)
success = parser.parse(self.onnx_loader().SerializeToString())
if not success:
for index in range(parser.num_errors):
logging.error(parser.get_error(index))
logging.critical("Could not parse ONNX correctly")
return network, parser
class BuildEngineLoader(BaseModelLoader):
def __init__(
self,
network_loader,
max_workspace_size=None,
fp16_mode=None,
int8_mode=None,
profile_shapes=None,
write_engine=None,
calibrator=None,
preprocess_network=None,
layerwise=None,
):
"""
Uses a TensorRT INetworkDefinition to build an engine
Args:
network_loader (Callable()->trt.INetworkDefinition): A callable capable of returning an TensorRT INetworkDefinition. The returned network is owned by the BuildEngineLoader and should not be freed manually. The callable may have at most 2 return values if another object needs to be kept alive for the duration of the network, e.g., in the case of a parser. BuildEngineLoader will take ownership of the second return value, and, like the network, it should not be freed by the callable. The first return value must always be the network.
Optional Args:
max_workspace_size (int): The maximum workspace size, in bytes, when building the engine.
fp16_mode (bool): Whether to build the engine with fp16 mode enabled.
int8_mode (bool): Whether to build the engine with int8 mode enabled.
profile_shapes (Dict[str, List[shape, shape, shape]]): A mapping of binding name to min/opt/max shapes. Only needed for networks with dynamic input shapes.
write_engine (str): A directory in which to save the engine.
calibrator (trt_smeagol.runners.tensorrt_runner_v2.Calibrator): An int8 calibrator. Only required in int8 mode when the network does not have explicit precision.
preprocess_network (Callable(trt.INetworkDefinition)): Preprocessing function for the network definition. May be used to modify the network after parsing. This is called before enabling layerwise outputs.
layerwise (bool): Whether to treat the output of every layer as an output of the network. Defaults to False.
"""
self.network_loader = network_loader
self.max_workspace_size = default_value(max_workspace_size, 1 << 24)
self.fp16_mode = default_value(fp16_mode, False)
self.int8_mode = default_value(int8_mode, False)
self.profile_shapes = default_value(profile_shapes, OrderedDict())
self.write_engine = write_engine
self.written_engine_path = None
self.calibrator = calibrator
self.preprocess_network = default_value(preprocess_network, None)
self.layerwise = default_value(layerwise, False)
def __call__(self):
class DummyContextManager(object):
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return None
network_parser = self.network_loader()
try:
network, parser = network_parser
assert isinstance(network, trt.INetworkDefinition)
except (ValueError, AssertionError):
network = network_parser
parser = DummyContextManager()
with trt.Builder(TRT_LOGGER) as builder, network, parser:
if self.preprocess_network:
logging.debug("Applying network preprocessing: {:}".format(self.preprocess_network))
self.preprocess_network(network)
if self.layerwise:
TensorRTRunnerV2.mark_layerwise(network)
if logging.getEffectiveLevel() <= logging.DEBUG:
TensorRTRunnerV2.log_network(network)
config = builder.create_builder_config()
profile = TensorRTRunnerV2.build_profile(builder, network, self.profile_shapes)
config.add_optimization_profile(profile)
config.max_workspace_size = int(self.max_workspace_size)
if self.fp16_mode:
config.flags = 1 << int(trt.BuilderFlag.FP16)
if self.int8_mode:
config.flags = config.flags | 1 << int(trt.BuilderFlag.INT8)
if not network.has_explicit_precision:
if not self.calibrator:
logging.critical(
"Network does not have explicit precision. A calibrator must be provided in order to use int8 mode."
)
self.calibrator.set_input_metadata(get_input_metadata_from_profile(profile, network))
config.int8_calibrator = self.calibrator
logging.debug("Using builder configuration flags: {:}".format(config.flags))
logging.info(
"Building engine: max workspace size={:} bytes, fp16={:}, int8={:}, layerwise={:}".format(
self.max_workspace_size, self.fp16_mode, self.int8_mode, self.layerwise
)
)
engine = builder.build_engine(network, config)
self.written_engine_path = write_timestamped(
contents=lambda: engine.serialize(), dir=self.write_engine, name="tensorrt_runner_v2.engine"
)
return engine
def get_engine_path(self):
"""
Returns the path at which the engine was written, or None if write_engine was not specified.
"""
return self.written_engine_path
| 43.70024
| 548
| 0.610492
| 2,076
| 18,223
| 5.197977
| 0.194123
| 0.023353
| 0.011862
| 0.011584
| 0.143638
| 0.098415
| 0.064869
| 0.064869
| 0.058382
| 0.041331
| 0
| 0.005254
| 0.300225
| 18,223
| 416
| 549
| 43.805288
| 0.840966
| 0.255831
| 0
| 0.158672
| 0
| 0.00369
| 0.098311
| 0.003593
| 0
| 0
| 0
| 0
| 0.00738
| 1
| 0.070111
| false
| 0.00369
| 0.03321
| 0.01476
| 0.177122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8c174e66db5ae93829e5da36ac5e18a48241662
| 15,382
|
py
|
Python
|
server/services/wiki/pages/overview_service.py
|
hotosm/oeg-reporter
|
f0c3da80ba380df907a818db224e9ca2ae0018b3
|
[
"BSD-2-Clause"
] | 1
|
2021-02-03T13:37:48.000Z
|
2021-02-03T13:37:48.000Z
|
server/services/wiki/pages/overview_service.py
|
hotosm/oeg-reporter
|
f0c3da80ba380df907a818db224e9ca2ae0018b3
|
[
"BSD-2-Clause"
] | 8
|
2020-07-16T23:17:51.000Z
|
2020-10-14T20:40:00.000Z
|
server/services/wiki/pages/overview_service.py
|
hotosm/oeg-reporter
|
f0c3da80ba380df907a818db224e9ca2ae0018b3
|
[
"BSD-2-Clause"
] | null | null | null |
from server.services.wiki.pages.templates import OverviewPageTemplates
from server.services.wiki.pages.page_service import PageService
from server.services.wiki.mediawiki_service import MediaWikiService
from server.services.wiki.wiki_text_service import WikiTextService
from server.services.wiki.wiki_table_service import WikiTableService
from server.services.wiki.wiki_section_service import WikiSectionService
from server.models.serializers.document import OverviewPageSchema
class OverviewPageService(PageService):
def __init__(self):
self.templates = OverviewPageTemplates()
self.page_fields = [
"organisation.name",
"organisation.url",
"platform.name",
"platform.url",
]
def filter_page_data(self, document_data: dict) -> dict:
"""
Filter required data for the overview page from
document data
Keyword arguments:
document_data -- All required data for a project using
Organised Editing Guidelines
Returns:
overview_page_data -- Dict containing only the required data
for the overview page
"""
overview_page_data = {
"organisation": {
"name": document_data["organisation"]["name"],
"url": document_data["organisation"]["url"],
},
"platform": {
"name": document_data["platform"]["name"],
"url": document_data["platform"]["url"],
},
}
return overview_page_data
def generate_page_sections_dict(self, overview_page_data: dict) -> dict:
"""
Generate dict containing the document content parsed to wikitext
for all sections present in the overview page
Keyword arguments:
overview_page_data -- Dictionary containing the required data for the
overview page sections
Returns:
overview_page_sections -- Dictionary with the document content
parsed to wikitext for the overview
page sections
"""
new_row = self.generate_activities_list_table_row(overview_page_data)
activities_list_section = self.templates.activities_list_section_title
overview_page_sections = {activities_list_section: new_row}
return overview_page_sections
def generate_activities_list_table_row(self, overview_page_data: dict) -> str:
"""
Generates a new table row for activities list table
overview_page_data -- Dict containing only the required data
for the overview page
Returns:
new_row -- String in wikitext format for a new table row
"""
wikitext = WikiTextService()
organisation_name = overview_page_data["organisation"]["name"].capitalize()
organisation_page_title = f"{self.templates.oeg_page}/" f"{organisation_name}"
organisation_link = wikitext.hyperlink_wiki_page(
organisation_page_title, organisation_name
)
platform_link = wikitext.hyperlink_external_link(
overview_page_data["platform"]["name"],
overview_page_data["platform"]["url"],
)
new_row = f"\n| {organisation_link}\n| {platform_link}\n|-"
return new_row
def create_page(self, document_data: dict) -> None:
"""
Creates a wiki page
Keyword arguments:
document_data -- All required data for a project using
Organised Editing Guidelines
"""
mediawiki = MediaWikiService()
wikitext = WikiTextService()
token = mediawiki.get_token()
page_title = self.templates.oeg_page
overview_page_sections = self.document_to_page_sections(document_data)
sections_text = wikitext.generate_text_from_dict(
self.templates.page_template,
f"=={self.templates.page_initial_section}==",
overview_page_sections,
)
updated_text = WikiTableService().add_table_row(
page_text=sections_text,
new_row=self.generate_activities_list_table_row(document_data),
table_section_title=self.templates.activities_list_section_title,
table_template=self.templates.table_template,
)
if mediawiki.is_existing_page(page_title):
page_text = MediaWikiService().get_page_text(self.templates.oeg_page)
overview_page_table = (
WikiSectionService()
.get_section_table(
page_text, self.templates.activities_list_section_title
)
.string
)
updated_text = WikiTableService().add_table_row(
page_text=page_text,
new_row=self.generate_activities_list_table_row(document_data),
table_section_title=self.templates.activities_list_section_title,
table_template=overview_page_table,
)
mediawiki.edit_page(token, self.templates.oeg_page, updated_text)
else:
mediawiki.create_page(token, page_title, updated_text)
def enabled_to_report(self, document_data: dict):
if MediaWikiService().is_existing_page(self.templates.oeg_page):
overview_dictionary = self.wikitext_to_dict(self.templates.oeg_page)
serialized_overview_page = self.parse_page_to_serializer(
overview_dictionary
)
organisation_names = [
organisation_data["name"]
for organisation_data in serialized_overview_page["organisation"]
]
platform_names = [
platform_data["name"]
for platform_data in serialized_overview_page["platform"]
]
if (
document_data["organisation"]["name"].capitalize() in organisation_names
and document_data["platform"]["name"] in platform_names
):
return False
else:
return True
else:
return True
def edit_page_text(
self, update_fields: dict, overview_page_data: dict, document_data: dict
):
page_text = MediaWikiService().get_page_text(self.templates.oeg_page)
updated_table_fields = self.get_update_table_fields(
update_fields, overview_page_data
)
if updated_table_fields:
overview_page_table = WikiSectionService().get_section_table(
page_text, self.templates.activities_list_section_title
)
project_list_section_title = (
f"\n=={self.templates.page_initial_section}==\n"
f"==={self.templates.activities_list_section_title}===\n"
)
updated_text = WikiTableService().edit_table(
overview_page_table.string,
project_list_section_title,
updated_table_fields,
)
return updated_text
else:
return page_text
def edit_page(
self, document_data: dict, update_fields: dict, overview_page_data: dict
):
mediawiki = MediaWikiService()
token = mediawiki.get_token()
updated_text = self.edit_page_text(
update_fields, overview_page_data, document_data
)
mediawiki.edit_page(token, self.templates.oeg_page, updated_text)
def table_field_updated(self, update_fields: dict, overview_page_data: dict):
if "platform" in update_fields.keys():
return WikiTextService().hyperlink_external_link(
overview_page_data["platform"]["name"],
overview_page_data["platform"]["url"],
)
elif "organisation" in update_fields.keys():
organisation_page_title = (
f"{self.templates.oeg_page}/"
f"{overview_page_data['organisation']['name'].capitalize()}"
)
return WikiTextService().hyperlink_wiki_page(
organisation_page_title,
overview_page_data["organisation"]["name"].capitalize(),
)
else:
return False
def get_update_table_fields(self, update_fields, overview_page_data):
current_organisation_page_title = (
"Organised_Editing/Activities/Auto_report/"
f"{overview_page_data['organisation']['name'].capitalize()}"
)
current_row_data = {
"organisation": WikiTextService().hyperlink_wiki_page(
current_organisation_page_title,
overview_page_data["organisation"]["name"].capitalize(),
),
"platform": WikiTextService().hyperlink_external_link(
overview_page_data["platform"]["name"],
overview_page_data["platform"]["url"],
),
}
if (
"platform" in update_fields.keys()
and "organisation" in update_fields.keys()
):
update_platform_name = (
update_fields["platform"]["name"]
if "name" in update_fields["platform"].keys()
else overview_page_data["platform"]["name"]
)
update_platform_url = (
update_fields["platform"]["url"]
if "url" in update_fields["platform"].keys()
else overview_page_data["platform"]["url"]
)
update_organisation_name = (
update_fields["organisation"]["name"].capitalize()
if "name" in update_fields["organisation"].keys()
else overview_page_data["organisation"]["name"].capitalize()
)
update_organisation_page_title = (
"Organised_Editing/Activities/Auto_report/"
f"{update_organisation_name.capitalize()}"
)
update_fields = {
self.templates.overview_list_organisation_name_column: {
"current": current_row_data["organisation"],
"update": WikiTextService().hyperlink_wiki_page(
update_organisation_page_title,
update_organisation_name.capitalize(),
),
},
self.templates.overview_list_platform_name_column: {
"current": current_row_data["platform"],
"update": WikiTextService().hyperlink_external_link(
update_platform_name, update_platform_url
),
},
}
return update_fields
elif "platform" in update_fields.keys():
update_platform_name = (
update_fields["platform"]["name"]
if "name" in update_fields["platform"].keys()
else overview_page_data["platform"]["name"]
)
update_platform_url = (
update_fields["platform"]["url"]
if "url" in update_fields["platform"].keys()
else overview_page_data["platform"]["url"]
)
update_fields = {
self.templates.overview_list_organisation_name_column: {
"current": current_row_data["organisation"],
"update": current_row_data["organisation"],
},
self.templates.overview_list_platform_name_column: {
"current": current_row_data["platform"],
"update": WikiTextService().hyperlink_external_link(
update_platform_name, update_platform_url
),
},
}
return update_fields
elif "organisation" in update_fields.keys():
update_organisation_name = (
update_fields["organisation"]["name"].capitalize()
if "name" in update_fields["organisation"].keys()
else overview_page_data["organisation"]["name"].capitalize()
)
update_organisation_page_title = (
"Organised_Editing/Activities/Auto_report/"
f"{update_organisation_name.capitalize()}"
)
update_fields = {
self.templates.overview_list_organisation_name_column: {
"current": current_row_data["organisation"],
"update": WikiTextService().hyperlink_wiki_page(
update_organisation_page_title,
update_organisation_name.capitalize(),
),
},
self.templates.overview_list_platform_name_column: {
"current": current_row_data["platform"],
"update": current_row_data["platform"],
},
}
return update_fields
else:
return False
def parse_page_to_serializer(self, page_dictionary: dict):
overview_page_data = {"organisation": [], "platform": []}
overview_page_table_text = page_dictionary[self.templates.page_initial_section][
self.templates.activities_list_section_title
]
(
platform_list,
organisation_list,
) = self.get_overview_page_platforms_and_organisations(overview_page_table_text)
overview_page_data["organisation"] = organisation_list
overview_page_data["platform"] = platform_list
# Validate
overview_page_schema = OverviewPageSchema(partial=True)
overview_page_schema.load(overview_page_data)
return overview_page_data
def get_overview_page_platforms_and_organisations(
self, overview_page_table_text: str
):
overview_page_table = WikiTableService().get_text_table(
overview_page_table_text
)
overview_page_table_data = overview_page_table.data(span=False)
organisation_list = []
platform_list = []
wikitext = WikiTextService()
for table_row_number, table_row_data in enumerate(
overview_page_table_data[1:], start=1
):
hyperlinked_organisation_url = overview_page_table.cells(
row=table_row_number,
column=self.templates.overview_list_organisation_name_column,
).value
hyperlinked_platform_url = overview_page_table.cells(
row=table_row_number,
column=self.templates.overview_list_platform_name_column,
).value
organisation_list.append(
{
"name": wikitext.get_page_link_and_text_from_wiki_page_hyperlink(
hyperlinked_organisation_url
)[1]
}
)
(
platform_url,
platform_name,
) = wikitext.get_page_link_and_text_from_external_hyperlink(
hyperlinked_platform_url
)
platform_list.append({"name": platform_name, "url": platform_url})
return platform_list, organisation_list
| 39.64433
| 88
| 0.594981
| 1,451
| 15,382
| 5.926258
| 0.085458
| 0.094895
| 0.066985
| 0.030701
| 0.606698
| 0.532271
| 0.476916
| 0.435167
| 0.402605
| 0.371206
| 0
| 0.000288
| 0.323625
| 15,382
| 387
| 89
| 39.74677
| 0.826221
| 0.071122
| 0
| 0.394137
| 0
| 0
| 0.100863
| 0.037735
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039088
| false
| 0
| 0.022801
| 0
| 0.120521
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8c574de241b0c8199ec3be2586cfc5532691047
| 5,253
|
py
|
Python
|
xmuda/eval_sem_pcd.py
|
anhquancao/xmuda-extend
|
4b670ec2f6766e3a624e81dbe5d97b209c1c4f76
|
[
"Apache-2.0"
] | null | null | null |
xmuda/eval_sem_pcd.py
|
anhquancao/xmuda-extend
|
4b670ec2f6766e3a624e81dbe5d97b209c1c4f76
|
[
"Apache-2.0"
] | null | null | null |
xmuda/eval_sem_pcd.py
|
anhquancao/xmuda-extend
|
4b670ec2f6766e3a624e81dbe5d97b209c1c4f76
|
[
"Apache-2.0"
] | null | null | null |
from xmuda.models.SSC2d_proj3d2d import SSC2dProj3d2d
from xmuda.data.NYU.nyu_dm import NYUDataModule
from xmuda.data.semantic_kitti.kitti_dm import KittiDataModule
from xmuda.common.utils.sscMetrics import SSCMetrics
from xmuda.data.NYU.params import class_relation_freqs as NYU_class_relation_freqs, class_freq_1_4 as NYU_class_freq_1_4, class_freq_1_8 as NYU_class_freq_1_8, class_freq_1_16 as NYU_class_freq_1_16
import numpy as np
import torch
import torch.nn.functional as F
from xmuda.models.ssc_loss import get_class_weights
from tqdm import tqdm
import pickle
import os
#model_path = "/gpfsscratch/rech/kvd/uyl37fq/logs/no_mask_255/v12_removeCPThreshold_KLnonzeros_LRDecay30_NYU_1_0.0001_0.0001_CPThreshold0.0_CEssc_MCAssc_ProportionLoss_CERel_CRCP_Proj_2_4_8/checkpoints/epoch=030-val/mIoU=0.26983.ckpt"
model_path = "/gpfsscratch/rech/kvd/uyl37fq/logs/kitti/v12_ProjectScale2_CPAt1_8_1divlog_LargerFOV_kitti_1_FrusSize_4_WD0_lr0.0001_CEssc_MCAssc_ProportionLoss_CERel_CRCP_Proj_2_4_8/checkpoints/epoch=037-val/mIoU=0.11056.ckpt"
class_weights = {
'1_4': get_class_weights(NYU_class_freq_1_4).cuda(),
'1_8': get_class_weights(NYU_class_freq_1_8).cuda(),
'1_16': get_class_weights(NYU_class_freq_1_16).cuda(),
}
#dataset = "NYU"
dataset = "kitti"
if dataset == "NYU":
NYU_root = "/gpfswork/rech/kvd/uyl37fq/data/NYU/depthbin"
NYU_preprocess_dir = "/gpfsscratch/rech/kvd/uyl37fq/precompute_data/NYU"
kitti_root = "/gpfswork/rech/kvd/uyl37fq/data/semantic_kitti"
full_scene_size = (240, 144, 240)
output_scene_size = (60, 36, 60)
NYUdm = NYUDataModule(NYU_root, NYU_preprocess_dir, batch_size=4, num_workers=3)
NYUdm.setup()
_C = 12
data_loader = NYUdm.val_dataloader()
else:
kitti_root = "/gpfswork/rech/kvd/uyl37fq/data/semantic_kitti"
kitti_depth_root = "/gpfsscratch/rech/kvd/uyl37fq/Adabin/KITTI/"
kitti_logdir = '/gpfsscratch/rech/kvd/uyl37fq/logs/kitti'
kitti_tsdf_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/TSDF_pred_depth_adabin/kitti"
kitti_label_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/labels/kitti"
kitti_occ_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/occupancy_adabin/kitti"
kitti_sketch_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/sketch_3D/kitti"
kitti_mapping_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/mapping_adabin/kitti"
full_scene_size = (256, 256, 32)
KITTIdm = KittiDataModule(root=kitti_root,
data_aug=True,
TSDF_root=kitti_tsdf_root,
label_root=kitti_label_root,
mapping_root=kitti_mapping_root,
occ_root=kitti_occ_root,
depth_root=kitti_depth_root,
sketch_root=kitti_sketch_root,
batch_size=1,
num_workers=3)
KITTIdm.setup()
_C = 20
data_loader = KITTIdm.val_dataloader()
class_relation_weights = get_class_weights(NYU_class_relation_freqs)
model = SSC2dProj3d2d.load_from_checkpoint(model_path)
model.cuda()
model.eval()
count = 0
out_dict = {}
count = 0
write_path = "/gpfsscratch/rech/kvd/uyl37fq/temp/draw_output/kitti"
with torch.no_grad():
for batch in tqdm(data_loader):
if dataset == "NYU":
y_true = batch['ssc_label_1_4'].detach().cpu().numpy()
valid_pix_4 = batch['valid_pix_4']
else:
y_true = batch['ssc_label_1_1'].detach().cpu().numpy()
# valid_pix_1 = batch['valid_pix_1']
valid_pix_1 = batch['valid_pix_double']
batch['img'] = batch['img'].cuda()
pred = model(batch)
y_pred = torch.softmax(pred['ssc'], dim=1).detach().cpu().numpy()
y_pred = np.argmax(y_pred, axis=1)
for i in range(y_true.shape[0]):
out_dict = {
"y_pred": y_pred[i].astype(np.uint16),
"y_true": y_true[i].astype(np.uint16),
}
if dataset == "NYU":
filepath = os.path.join(write_path, batch['name'][i] + ".pkl")
out_dict["cam_pose"] = batch['cam_pose'][i].detach().cpu().numpy()
out_dict["vox_origin"] = batch['vox_origin'][i].detach().cpu().numpy()
elif dataset == "kitti":
filepath = os.path.join(write_path, batch['sequence'][i], batch['frame_id'][i] + ".pkl")
out_dict['valid_pix_1'] = valid_pix_1[i].detach().cpu().numpy()
out_dict['cam_k'] = batch['cam_k'][i].detach().cpu().numpy()
out_dict['T_velo_2_cam'] = batch['T_velo_2_cam'][i].detach().cpu().numpy()
os.makedirs(os.path.join(write_path, batch['sequence'][i]), exist_ok=True)
with open(filepath, 'wb') as handle:
pickle.dump(out_dict, handle)
print("wrote to", filepath)
count += 1
# if count == 4:
# break
# write_path = "/gpfsscratch/rech/kvd/uyl37fq/temp/output"
# filepath = os.path.join(write_path, "output.pkl")
# with open(filepath, 'wb') as handle:
# pickle.dump(out_dict, handle)
# print("wrote to", filepath)
| 44.897436
| 234
| 0.663811
| 732
| 5,253
| 4.434426
| 0.247268
| 0.032348
| 0.064695
| 0.092421
| 0.401725
| 0.358287
| 0.277264
| 0.127542
| 0.107209
| 0.077634
| 0
| 0.043783
| 0.213021
| 5,253
| 116
| 235
| 45.284483
| 0.741413
| 0.106225
| 0
| 0.098901
| 0
| 0.010989
| 0.230999
| 0.181896
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.131868
| 0
| 0.131868
| 0.010989
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8c6f7ca2165cf621b2f2448c66168d6e16e7af2
| 9,695
|
py
|
Python
|
hnn/src/apps/dataparallel.py
|
anlewy/mt-dnn
|
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
[
"MIT"
] | 2,075
|
2019-02-25T08:54:38.000Z
|
2022-03-31T10:44:50.000Z
|
hnn/src/apps/dataparallel.py
|
anlewy/mt-dnn
|
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
[
"MIT"
] | 176
|
2019-03-12T02:58:42.000Z
|
2022-03-22T20:17:23.000Z
|
hnn/src/apps/dataparallel.py
|
anlewy/mt-dnn
|
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
[
"MIT"
] | 437
|
2019-03-11T21:36:21.000Z
|
2022-03-29T02:40:53.000Z
|
# Author: penhe@microsoft.com
# Date: 05/30/2019
#
""" Data parallel module
"""
from collections import OrderedDict
import numpy as np
import torch
from torch.cuda.comm import broadcast_coalesced
from torch.cuda.comm import reduce_add_coalesced
from torch.nn.parallel import parallel_apply
from torch.nn.parallel.scatter_gather import scatter_kwargs,gather
import torch.cuda.comm as comm
import pdb
from bert.optimization import BertAdam
def replicate(network, devices):
devices = tuple(devices)
num_replicas = len(devices)
params = list(network.parameters())
param_indices = {param: idx for idx, param in enumerate(params)}
param_copies = broadcast_coalesced(params, devices)
buffers = list(network._all_buffers())
buffer_indices = {buf: idx for idx, buf in enumerate(buffers)}
buffer_copies = broadcast_coalesced(buffers, devices)
modules = list(network.modules())
module_copies = [[] for device in devices]
module_indices = {}
for i, module in enumerate(modules):
module_indices[module] = i
for j in range(num_replicas):
replica = module.__new__(type(module))
replica.__dict__ = module.__dict__.copy()
replica._parameters = replica._parameters.copy()
replica._buffers = replica._buffers.copy()
replica._modules = replica._modules.copy()
module_copies[j].append(replica)
for i, module in enumerate(modules):
for key, child in module._modules.items():
if child is None:
for j in range(num_replicas):
replica = module_copies[j][i]
replica._modules[key] = None
else:
module_idx = module_indices[child]
for j in range(num_replicas):
replica = module_copies[j][i]
replica._modules[key] = module_copies[j][module_idx]
for key, param in module._parameters.items():
if param is None:
for j in range(num_replicas):
replica = module_copies[j][i]
replica._parameters[key] = None
else:
param_idx = param_indices[param]
for j in range(num_replicas):
replica = module_copies[j][i]
replica._parameters[key] = param_copies[j][param_idx]
replica._parameters[key].requires_grad = param.requires_grad
for key, buf in module._buffers.items():
if buf is None:
for j in range(num_replicas):
replica = module_copies[j][i]
replica._buffers[key] = None
else:
buffer_idx = buffer_indices[buf]
for j in range(num_replicas):
replica = module_copies[j][i]
replica._buffers[key] = buffer_copies[j][buffer_idx]
return [module_copies[j][0] for j in range(num_replicas)]
class XDataParallel(torch.nn.Module):
def __init__(self, module):
super().__init__()
self.device_ids = [i for i in range(torch.cuda.device_count())]
module = module.cuda(self.device_ids[0])
self.replicas = replicate(module, self.device_ids)
self.output_device = self.device_ids[0]
self.dim = 0
self.module = module
def forward(self, *inputs, **kwargs):
#if not self.device_ids:
# return self.module(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
#if len(self.device_ids) == 1:
# return self.module(*inputs[0], **kwargs[0])
#replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(self.replicas[:len(inputs)], inputs, kwargs)
return self.gather(outputs, self.output_device)
def state_dict(self, destination=None, prefix='', keep_vars=False):
sd = self.replicas[0].state_dict(destination, prefix, keep_vars)
return sd
def eval(self):
for m in self.replicas:
m.eval()
return self
def train(self, mode=True):
for m in self.replicas:
m.train(mode)
return self
def zero_grad(self):
for m in self.replicas:
for p in m.parameters():
p.grad = None
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
class XParallelOptimizer():
def __init__(self, model, optimizer_fn, grad_clip_norm=1.0):
self.replicas = [model]
if hasattr(model, 'replicas'):
self.replicas = model.replicas
dcnt = torch.cuda.device_count()
total_size = sum([np.prod(p.size()) for p in self.replicas[0].parameters()])
quota = {i:0 for i in range(dcnt)}
#quota[0] = total_size//dcnt
param_groups = {i: [] for i in range(dcnt)}
self.named_parameters=[]
for i,(n, param) in enumerate(self.replicas[0].named_parameters()):
ps = np.prod(param.size())
index = list(sorted(quota.items(), key=lambda x: x[1]))[0][0]
quota[index] += ps
if param.dtype==torch.half:
cp = param.clone().type(torch.cuda.FloatTensor).detach().to('cuda:{}'.format(index)).requires_grad_()
else:
cp = dict(self.replicas[index].named_parameters())[n]
name = n[len('module.'):] if n.startswith('module.') else n
param_groups[index].append((name, cp))
self.named_parameters.append((name, cp))
self.param_groups = param_groups
self.sub_optimizers = [DeviceOptimizer(self.replicas, p, i, optimizer_fn(p, max_grad_norm=0)) for i,p in self.param_groups.items()]
self.grad_clip_norm = grad_clip_norm
def parameters(self):
return OrderedDict(self.named_parameters)
def step(self, grad_scale=1):
def bk(g):
return g.backward()
l2norm_square = parallel_apply([bk for _ in self.sub_optimizers], self.sub_optimizers, devices=[g.device for g in self.sub_optimizers])
l2norm = sum(l2norm_square)**0.5
if str(l2norm) in ['inf', 'nan']:
return False
if grad_scale != 1:
l2norm *= grad_scale
coef = self.grad_clip_norm/(l2norm+1e-6)
if coef<1:
grad_scale = grad_scale*coef
if grad_scale != 1:
for n,p in self.named_parameters:
if p.grad is not None:
p.grad.mul_(grad_scale)
def st(g):
return g.step(l2norm)
parallel_apply([st for _ in self.sub_optimizers], self.sub_optimizers, devices=[g.device for g in self.sub_optimizers])
return True
def zero_grad(self):
for m in self.replicas:
for p in m.parameters():
p.grad = None
for g in self.sub_optimizers:
g.zero_grad()
class DeviceOptimizer():
def __init__(self, replicas, param_group, device, optimizer):
self.param_group = param_group
self.device = device
self.optimizer = optimizer
self.replicas = replicas
self.named_params = [dict(m.named_parameters()) for m in replicas]
def backward(self):
group_params = [[(n,m[n]) for n,p in self.param_group if m[n].grad is not None] for m in self.named_params]
grad_params = [g for g in group_params if len(g)>0]
assert all([len(g)==len(grad_params[0]) for g in grad_params]), [len(g) for g in grad_params]
grad = [[p.grad for n,p in g] for g in grad_params]
reduced_grad = reduce_add_coalesced(grad, self.device)
grads = dict([(n,g) for ((n,p),g) in zip(grad_params[0], reduced_grad)])
l2norm = 0
for n,p in self.param_group:
if n in grads:
p.grad = grads[n].float() if grads[n].dtype==torch.half else grads[n]
l2norm += p.grad.norm().item()**2
else:
assert p.grad is None, n
return l2norm
def step(self, l2norm):
self.optimizer.step()
group_params = [(i, [(n,m[n]) for n,p in self.param_group]) for i,m in enumerate(self.named_params)]
group_params = sorted(group_params, key=lambda x:x[0] if x[0]!=self.device else -1)
params = dict(self.param_group)
for n,p in group_params[0][1]:
if p.data.dtype == torch.half:
p.data.copy_(params[n].data)
else:
p.data = params[n].data
param_list = [[p for n,p in g] for i,g in group_params]
device_list =[i for i,g in group_params]
outputs = broadcast_coalesced(param_list[0], device_list)
for o,p in zip(outputs, param_list):
for x,y in zip(o, p):
y.data.copy_(x.data)
def zero_grad(self):
for n,p in self.param_group:
p.grad = None
self.optimizer.zero_grad()
def optimizer_factory(args, training_steps=None, init_spec=None, no_decay=['bias', 'LayerNorm.weight']):
def optimizer_fn(param_group, max_grad_norm=None):
group0 = dict(params=[],
weight_decay_rate=args.weight_decay,
names=[])
group1 = dict(params=[],
weight_decay_rate=0.00,
names=[])
for (n,p) in param_group:
if not any(nd in n for nd in no_decay):
group0['params'].append(p)
group0['names'].append(n)
else:
group1['params'].append(p)
group1['names'].append(n)
optimizer_grouped_parameters = [group0, group1]
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
b1=args.adam_beta1,
b2=args.adam_beta2,
v1=args.qhadam_v1,
v2=args.qhadam_v2,
lr_ends=args.lr_schedule_ends,
warmup=args.warmup_proportion if args.warmup_proportion<1 else args.warmup_proportion/training_steps,
t_total=training_steps,
schedule=args.lr_schedule,
max_grad_norm = args.max_grad_norm if max_grad_norm is None else max_grad_norm,
global_grad_norm = args.global_grad_norm,
init_spec = init_spec,
weight_decay_rate = args.weight_decay)
return optimizer
return optimizer_fn
| 35.643382
| 139
| 0.664569
| 1,415
| 9,695
| 4.368905
| 0.142049
| 0.0165
| 0.008088
| 0.010191
| 0.214008
| 0.173245
| 0.126658
| 0.123261
| 0.113555
| 0.113555
| 0
| 0.009976
| 0.214234
| 9,695
| 271
| 140
| 35.774908
| 0.801523
| 0.030737
| 0
| 0.1875
| 0
| 0
| 0.008208
| 0
| 0
| 0
| 0
| 0
| 0.008929
| 1
| 0.098214
| false
| 0
| 0.044643
| 0.026786
| 0.227679
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8ca44f18c6c1244335778442d0b31143cb496f7
| 811
|
py
|
Python
|
ch02/multiSinal_button.py
|
you-know-who-2017/pythonQT
|
a713bfacbb53c5f23e9d7f61dc44592335a24423
|
[
"MIT"
] | null | null | null |
ch02/multiSinal_button.py
|
you-know-who-2017/pythonQT
|
a713bfacbb53c5f23e9d7f61dc44592335a24423
|
[
"MIT"
] | null | null | null |
ch02/multiSinal_button.py
|
you-know-who-2017/pythonQT
|
a713bfacbb53c5f23e9d7f61dc44592335a24423
|
[
"MIT"
] | null | null | null |
'''
Author: geekli
Date: 2020-12-27 10:38:46
LastEditTime: 2020-12-27 10:40:44
LastEditors: your name
Description:
FilePath: \pythonQT\ch02\multiSinal_button.py
'''
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton
class Demo(QWidget):
def __init__(self):
super(Demo, self).__init__()
self.button = QPushButton('Start', self)
self.button.pressed.connect(self.change_text) # 1
self.button.released.connect(self.change_text) # 2
#插槽
def change_text(self):
if self.button.text() == 'Start': # 3
self.button.setText('Stop')
else:
self.button.setText('Start')
if __name__ == '__main__':
app = QApplication(sys.argv)
demo = Demo()
demo.show()
sys.exit(app.exec_())
| 25.34375
| 62
| 0.630086
| 101
| 811
| 4.851485
| 0.554455
| 0.122449
| 0.032653
| 0.040816
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054927
| 0.236745
| 811
| 32
| 63
| 25.34375
| 0.736672
| 0.204686
| 0
| 0
| 0
| 0
| 0.04252
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8ccf268808a95f71f44af0d1f8a0dcac8ac8aa6
| 835
|
py
|
Python
|
record_voice.py
|
y1255018/voice-printer
|
cea33ae978a0709346bdbaf009f4fa07a97c7463
|
[
"MIT"
] | null | null | null |
record_voice.py
|
y1255018/voice-printer
|
cea33ae978a0709346bdbaf009f4fa07a97c7463
|
[
"MIT"
] | 1
|
2020-05-10T12:57:46.000Z
|
2020-05-10T12:59:27.000Z
|
record_voice.py
|
y1255018/voice-printer
|
cea33ae978a0709346bdbaf009f4fa07a97c7463
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys, select, termios,tty
import os
def getKey():
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
new = termios.tcgetattr(fd)
new[3] &= ~termios.ICANON
new[3] &= ~termios.ECHO
try:
termios.tcsetattr(fd, termios.TCSANOW, new)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSANOW, old)
print(ch)
return ch
def main():
try:
while 1:
key = getKey()
if key == 'r':
# record sound
os.system("arecord -d 5 -f cd 'test.wav'")
print("finish recording")
elif key == 'p':
#play sound
os.system("aplay 'test.wav'")
elif key == 'q':
break
elif key:
print(key)
except( KeyboardInterrupt, SystemExit):
print( "SIGINTを検知" )
if __name__ == "__main__":
main()
| 19.880952
| 50
| 0.568862
| 106
| 835
| 4.40566
| 0.537736
| 0.044968
| 0.077088
| 0.089936
| 0.137045
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009983
| 0.28024
| 835
| 42
| 51
| 19.880952
| 0.767055
| 0.052695
| 0
| 0.0625
| 0
| 0
| 0.102792
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.0625
| 0
| 0.15625
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8cd1764a3562bbf6dce2fed67c34407e35a1349
| 1,516
|
py
|
Python
|
findpeak.py
|
BartMassey/pdx-cs-sound
|
52f671f155f71eb75a635d9b125f9324889dd329
|
[
"MIT"
] | null | null | null |
findpeak.py
|
BartMassey/pdx-cs-sound
|
52f671f155f71eb75a635d9b125f9324889dd329
|
[
"MIT"
] | null | null | null |
findpeak.py
|
BartMassey/pdx-cs-sound
|
52f671f155f71eb75a635d9b125f9324889dd329
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# Copyright (c) 2019 Bart Massey
# [This program is licensed under the "MIT License"]
# Please see the file LICENSE in the source
# distribution of this software for license terms.
# Find maximum and minimum sample in an audio file.
import sys
import wave as wav
# Get the signal file.
wavfile = wav.open(sys.argv[1], 'rb')
# Channels per frame.
channels = wavfile.getnchannels()
# Bytes per sample.
width = wavfile.getsampwidth()
# Sample rate
rate = wavfile.getframerate()
# Number of frames.
frames = wavfile.getnframes()
# Length of a frame
frame_width = width * channels
# Get the signal and check it.
max_sample = None
min_sample = None
wave_bytes = wavfile.readframes(frames)
# Iterate over frames.
for f in range(0, len(wave_bytes), frame_width):
frame = wave_bytes[f : f + frame_width]
# Iterate over channels.
for c in range(0, len(frame), width):
# Build a sample.
sample_bytes = frame[c : c + width]
# XXX Eight-bit samples are unsigned
sample = int.from_bytes(sample_bytes,
byteorder='little',
signed=(width>1))
# Check extrema.
if max_sample == None:
max_sample = sample
if min_sample == None:
min_sample = sample
if sample > max_sample:
max_sample = sample
if sample < min_sample:
min_sample = sample
wavfile.close()
print("min: {} max: {}".format(min_sample, max_sample))
| 25.694915
| 56
| 0.638522
| 202
| 1,516
| 4.688119
| 0.435644
| 0.057022
| 0.044351
| 0.040127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008108
| 0.26781
| 1,516
| 58
| 57
| 26.137931
| 0.845045
| 0.32058
| 0
| 0.142857
| 0
| 0
| 0.023715
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8d1af14aa978ccc8ecf4f4ebec0ffa36d951d1c
| 345
|
py
|
Python
|
test/test_report.py
|
aymatveev/testing_framework
|
3e522d23b46ddb27b3b389210c244aaee5c3370e
|
[
"MIT"
] | null | null | null |
test/test_report.py
|
aymatveev/testing_framework
|
3e522d23b46ddb27b3b389210c244aaee5c3370e
|
[
"MIT"
] | null | null | null |
test/test_report.py
|
aymatveev/testing_framework
|
3e522d23b46ddb27b3b389210c244aaee5c3370e
|
[
"MIT"
] | null | null | null |
from testing_framework.report import report
from typing import Tuple
import html
def test_report():
result = report(("test_report", "second line"))
expected_result = f"""
<!DOCTYPE html>
<html>
<body>
<div>test_report</div><div>second line</div>
</body>
</html>
"""
assert html.escape(expected_result) == html.escape(result)
| 23
| 62
| 0.695652
| 46
| 345
| 5.086957
| 0.434783
| 0.128205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153623
| 345
| 15
| 62
| 23
| 0.80137
| 0
| 0
| 0
| 0
| 0
| 0.352601
| 0.095376
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.071429
| false
| 0
| 0.214286
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8d23bd00fcfedf98199c38fb1e64ea94cbde637
| 4,480
|
py
|
Python
|
qr_rover_lost_comms/src/qr_rover_lost_comms/qr_rover_lost_comms.py
|
QuantumRoboticsURC/qrteam
|
bb28f4ad82eab6fb0706be13f8571e0b3261641e
|
[
"MIT"
] | null | null | null |
qr_rover_lost_comms/src/qr_rover_lost_comms/qr_rover_lost_comms.py
|
QuantumRoboticsURC/qrteam
|
bb28f4ad82eab6fb0706be13f8571e0b3261641e
|
[
"MIT"
] | null | null | null |
qr_rover_lost_comms/src/qr_rover_lost_comms/qr_rover_lost_comms.py
|
QuantumRoboticsURC/qrteam
|
bb28f4ad82eab6fb0706be13f8571e0b3261641e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
import time
import rospy
import subprocess
import actionlib
from std_msgs.msg import Float32
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist, PoseWithCovarianceStamped
from actionlib_msgs.msg import GoalStatus, GoalStatusArray
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
def ping_host(host):
ping_fail_count = rospy.get_param('~ping_fail_count', 2)
ping_command = "ping -c %s -n -W 1 %s" % (ping_fail_count, host)
# TODO: don't shell out, use a more secure python library
p = subprocess.Popen(ping_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
(output, error) = p.communicate()
returncode = p.returncode
return output, error, returncode
class RecorveryController():
def __init__(self):
self.cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=10)
self.joy_drive = rospy.Publisher('joy_drive', Joy, queue_size=10)
self.joy_arm = rospy.Publisher('joy_arm', Joy, queue_size=10)
self.vel_limit_lost_comms = rospy.Publisher('vel_limit_lost_comms', Float32, queue_size=10)
self.cmd_vel_sub = rospy.Subscriber('cmd_vel', Twist, self.cmd_vel_callback)
self.cmd_vel_twist = Twist()
def cmd_vel_callback(self, msg):
self.cmd_vel_twist = msg
def working_comms(self):
working_comms = False
if (self.ips != "no"):
for ip in self.ips.split(','):
(output, error, returncode) = ping_host(ip)
if returncode == 0:
#ping = int(output.split('/')[-1].split('.')[0])
ping = float(output.split('time=')[1].split(' ')[0])
rospy.loginfo("ping %s: %s" % (ip, ping))
twist = Twist()
if ping > 1000:
self.vel_limit_lost_comms.publish(0.3)
twist.linear.x = self.cmd_vel_twist.linear.x/4
twist.angular.z = self.cmd_vel_twist.angular.z/4
self.cmd_vel.publish(twist)
elif ping > 500:
self.vel_limit_lost_comms.publish(0.6)
twist.linear.x = self.cmd_vel_twist.linear.x/2
twist.angular.z = self.cmd_vel_twist.angular.z/2
self.cmd_vel.publish(twist)
elif ping < 500:
self.vel_limit_lost_comms.publish(1)
twist.linear.x = self.cmd_vel_twist.linear.x
twist.angular.z = self.cmd_vel_twist.angular.z
self.cmd_vel.publish(twist)
working_comms = True
else:
working_comms = True
return working_comms
def zero_joystick(self):
joyDrive = Joy()
joyArm = Joy()
if (self.joy_drive_model == 'xbox'):
joyDrive.axes = [0] * 8
joyDrive.buttons = [0] * 11
elif (self.joy_drive_model == 'ec'):
joyDrive.axes = [0] * 8
joyDrive.buttons = [0] * 15
elif (self.joy_drive_model == 'ps5'):
joyDrive.axes = [0] * 12
joyDrive.buttons = [0] * 12
joyArm.axes = [0] * 3
joyArm.buttons = [0] * 11
self.joy_drive.publish(joyDrive)
self.joy_arm.publish(joyArm)
def do_recovery(self):
if rospy.is_shutdown(): return
rospy.logerr('No connection to base station.')
#if self.connect_to_move_base():
#if self.goal_in_progress():
#rospy.loginfo("Navigation in progress, not recovering until finished...")
#return
#self.navigation_goal_to(self.recovery_pose)
self.zero_joystick()
self.stop_motors()
def stop_motors(self):
twist = Twist() # zero motion
self.cmd_vel.publish(twist)
def main_loop(self):
while not rospy.is_shutdown():
if not self.working_comms():
self.do_recovery()
time.sleep(1)
def main():
rospy.init_node("qr_rover_lost_comms")
qr_rover_lost_comms = RecorveryController()
qr_rover_lost_comms.ips = rospy.get_param('~ips_to_monitor')
qr_rover_lost_comms.joy_drive_model = rospy.get_param('~joy_drive_model')
rospy.loginfo('Monitoring base station on IP(s): %s.' % qr_rover_lost_comms.ips)
qr_rover_lost_comms.main_loop() # start monitoring
| 39.646018
| 99
| 0.59375
| 568
| 4,480
| 4.450704
| 0.262324
| 0.042722
| 0.059335
| 0.047468
| 0.245253
| 0.170886
| 0.16693
| 0.131329
| 0.131329
| 0.048259
| 0
| 0.019396
| 0.297991
| 4,480
| 113
| 100
| 39.646018
| 0.78442
| 0.074107
| 0
| 0.106383
| 0
| 0
| 0.056294
| 0
| 0
| 0
| 0
| 0.00885
| 0
| 1
| 0.095745
| false
| 0
| 0.106383
| 0
| 0.234043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8d5d6f27303f0d53ce075025843560499c32f81
| 508
|
py
|
Python
|
backend/swagger_server/helpers/_add_audit_entry.py
|
Lend88/libresign
|
9537f39a696fa5f3433052406329d77d528b6cf9
|
[
"MIT"
] | 6
|
2019-01-29T05:58:37.000Z
|
2021-11-02T22:47:02.000Z
|
backend/swagger_server/helpers/_add_audit_entry.py
|
Lend88/libresign
|
9537f39a696fa5f3433052406329d77d528b6cf9
|
[
"MIT"
] | 9
|
2020-09-09T04:53:01.000Z
|
2022-03-08T22:52:18.000Z
|
backend/swagger_server/helpers/_add_audit_entry.py
|
Lend88/libresign
|
9537f39a696fa5f3433052406329d77d528b6cf9
|
[
"MIT"
] | 4
|
2019-01-29T07:38:55.000Z
|
2021-10-16T21:06:42.000Z
|
from uuid import UUID
import json
from ..mappings import *
def add_doc_audit_entry(session, doc_id, status, data):
""""Add an audit entry, requires that a commit
be run on the session afterwards
"""
if not isinstance(doc_id, UUID):
raise ValueError("Expecting UUID")
if not isinstance(data, dict):
raise ValueError("Expecting dict")
session.add(FileUsage(
document_id=doc_id.bytes,
fileusage_type=status,
data=json.dumps(data)
))
| 22.086957
| 55
| 0.65748
| 67
| 508
| 4.865672
| 0.537313
| 0.046012
| 0.092025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 508
| 22
| 56
| 23.090909
| 0.855643
| 0.149606
| 0
| 0
| 0
| 0
| 0.067961
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8da9080a11e6c113c5b2a18202d6e7d74fba286
| 4,942
|
py
|
Python
|
bioinfo/assembly/overlap.py
|
sohyongsheng/sequence_assembly
|
f2dea763da447f09f49de8fbf3ceaad8ed3e0559
|
[
"MIT"
] | 1
|
2022-02-02T07:49:58.000Z
|
2022-02-02T07:49:58.000Z
|
bioinfo/assembly/overlap.py
|
sohyongsheng/sequence_assembly
|
f2dea763da447f09f49de8fbf3ceaad8ed3e0559
|
[
"MIT"
] | null | null | null |
bioinfo/assembly/overlap.py
|
sohyongsheng/sequence_assembly
|
f2dea763da447f09f49de8fbf3ceaad8ed3e0559
|
[
"MIT"
] | null | null | null |
import numpy as np
from bioinfo.assembly.errors import InvalidPair
from bioinfo.molecules.sequence import Sequence
class LargestOverlapFinder:
def __init__(self):
pass
# Get indices a, b, c, d of longest substrings first,
# such that substring == first[a: b] == second[c: d].
# Also returns length of substring.
def get_substrings(self, counter):
while not np.all(counter == 0):
i, j = np.unravel_index(counter.argmax(), counter.shape)
length = counter[i, j]
for k in range(length):
counter[i - k, j - k] = 0
b, d = i + 1, j + 1
a, c = b - length, d - length
indices = a, b, c, d
yield indices, length
def is_overlap(self, indices, first, second):
a, b, c, d = indices
# First overlaps with second, e.g.
# 0123
# 1234
# ^^^
if b == len(first) and c == 0:
return True
# Second overlaps with first, e.g.
# 1234
# 0123
# ^^^
elif a == 0 and d == len(second):
return True
# First is within second, e.g.
# 123
# 01234
# ^^^
elif a == 0 and b == len(first):
return True
# Second is within first, e.g.
# 01234
# 123
# ^^^
elif c == 0 and d == len(second):
return True
else:
return False
# Taken from longest common substring problem. See
# following for tutorial on dynamic programming solution:
# https://www.youtube.com/watch?v=BysNXJHzCEs
def tally_counter(self, first, second):
num_rows = len(first) + 1
num_cols = len(second) + 1
counter = np.zeros((num_rows, num_cols), dtype = int)
for i, m in enumerate(first, start = 1):
for j, n in enumerate(second, start = 1):
if m == n:
counter[i, j] = counter[i - 1, j - 1] + 1
counter = self.remove_first_row_first_col(counter)
return counter
def find(self, first, second):
counter = self.tally_counter(first, second)
for indices, length in self.get_substrings(counter):
a, b, c, d = indices
assert first[a: b] == second[c: d]
if self.is_overlap(indices, first, second):
return indices, length
else:
indices, length = None, 0
return indices, length
def remove_first_row_first_col(self, x):
return x[1:, 1:]
class Pair:
finder = LargestOverlapFinder()
def __init__(self, first, second):
self.first = first
self.second = second
if self.first.is_dna != self.second.is_dna:
raise InvalidPair(
"Cannot compare DNA with RNA sequences."
)
self.indices, self.overlap_length = self.finder.find(
self.first.seq_str,
self.second.seq_str,
)
def combine(self):
first = self.first.seq_str
second = self.second.seq_str
# No overlap, so just concatenate.
if self.overlap_length == 0:
combined = first + second
return Sequence(
combined,
is_dna = self.first.is_dna,
)
else:
a, b, c, d = self.indices
# First overlaps with second, e.g.
# 0123
# 1234
# ^^^
if b == len(self.first) and c == 0:
prefix = first[:a]
assert first[a: b] == second[c: d]
overlap = first[a: b]
suffix = second[d:]
combined = prefix + overlap + suffix
return Sequence(
combined,
is_dna = self.first.is_dna,
)
# Second overlaps with first, e.g.
# 1234
# 0123
# ^^^
elif a == 0 and d == len(self.second):
prefix = second[:c]
assert second[c: d] == first[a: b]
overlap = second[c: d]
suffix = first[b:]
combined = prefix + overlap + suffix
return Sequence(
combined,
is_dna = self.first.is_dna,
)
# First is within second, e.g.
# 123
# 01234
# ^^^
elif a == 0 and b == len(self.first):
return Sequence(
second,
is_dna = self.second.is_dna,
)
# Second is within first, e.g.
# 01234
# 123
# ^^^
elif c == 0 and d == len(self.second):
return Sequence(
first,
is_dna = self.first.is_dna,
)
| 31.679487
| 68
| 0.474909
| 561
| 4,942
| 4.103387
| 0.219251
| 0.054735
| 0.026064
| 0.008688
| 0.337967
| 0.30278
| 0.261946
| 0.2298
| 0.2298
| 0.21199
| 0
| 0.030753
| 0.42756
| 4,942
| 155
| 69
| 31.883871
| 0.782962
| 0.140834
| 0
| 0.259615
| 0
| 0
| 0.009039
| 0
| 0
| 0
| 0
| 0
| 0.028846
| 1
| 0.076923
| false
| 0.009615
| 0.028846
| 0.009615
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8dab9e9589a6e0d7ec3775c63cd68cd42f91ee4
| 857
|
py
|
Python
|
models/operations.py
|
NikolayXHD/tinkoff-api-python
|
4a4b71f7af1d752b8566299c058b712b513fa92f
|
[
"MIT"
] | null | null | null |
models/operations.py
|
NikolayXHD/tinkoff-api-python
|
4a4b71f7af1d752b8566299c058b712b513fa92f
|
[
"MIT"
] | null | null | null |
models/operations.py
|
NikolayXHD/tinkoff-api-python
|
4a4b71f7af1d752b8566299c058b712b513fa92f
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from . import _base
class Operations(_base.Model):
swagger_types: dict[str, str] = {'operations': 'list[Operation]'}
attribute_map: dict[str, str] = {'operations': 'operations'}
def __init__(self, operations=None):
self._operations = None
self.discriminator = None
self.operations = operations
@property
def operations(self):
"""
:rtype: list[clients.tinkoff.models.Operation]
"""
return self._operations
@operations.setter
def operations(self, operations):
"""
:param list[clients.tinkoff.models.Operation] operations:
"""
if operations is None:
raise ValueError(
'Invalid value for `operations`, must not be `None`'
)
self._operations = operations
| 25.205882
| 69
| 0.611435
| 83
| 857
| 6.13253
| 0.46988
| 0.165029
| 0.10609
| 0.078585
| 0.129666
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.28238
| 857
| 33
| 70
| 25.969697
| 0.827642
| 0.121354
| 0
| 0
| 0
| 0
| 0.134561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.105263
| 0
| 0.473684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8dbf09302e48945dea0b1250add3f9a59269652
| 827
|
py
|
Python
|
app/api/utils/remoteImageMapper.py
|
nurely/lxdui
|
8cb31dc1117719b140f440f8a705282781db7b35
|
[
"Apache-2.0"
] | 589
|
2017-10-22T04:11:08.000Z
|
2022-03-26T22:50:30.000Z
|
app/api/utils/remoteImageMapper.py
|
nurely/lxdui
|
8cb31dc1117719b140f440f8a705282781db7b35
|
[
"Apache-2.0"
] | 134
|
2017-11-14T02:52:03.000Z
|
2022-03-22T12:51:09.000Z
|
app/api/utils/remoteImageMapper.py
|
nurely/lxdui
|
8cb31dc1117719b140f440f8a705282781db7b35
|
[
"Apache-2.0"
] | 170
|
2017-10-06T06:22:43.000Z
|
2022-03-15T02:12:34.000Z
|
def remoteImagesList(images):
response = []
aliasesProcessed = []
aliases = [alias[20:] for alias in images['metadata']]
for alias in aliases:
strippedAlias = alias.replace('/default','')
if strippedAlias not in aliasesProcessed:
aliasesDetails = alias.split('/')
if len(aliasesDetails) > 2:
image = prepRemoteImageObject(strippedAlias, aliasesDetails)
if image not in response: response.append(image)
aliasesProcessed.append(strippedAlias)
return response
def prepRemoteImageObject(alias, aliasesDetails):
image = {
'name': aliasesDetails[0].__str__(),
'distribution': aliasesDetails[1].__str__(),
'architecture': aliasesDetails[2].__str__(),
'image': alias
}
return image
| 30.62963
| 76
| 0.628779
| 71
| 827
| 7.15493
| 0.422535
| 0.031496
| 0.03937
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009836
| 0.262394
| 827
| 26
| 77
| 31.807692
| 0.822951
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8e2a3f8d1524fcc6efb93afc74fa20ef2432c75
| 2,049
|
py
|
Python
|
gemd/entity/template/has_property_templates.py
|
CitrineInformatics/gemd-python
|
4f80045c1b481269c7451f6a205755c22093eb74
|
[
"Apache-2.0"
] | 7
|
2020-04-02T11:11:09.000Z
|
2022-02-05T23:19:51.000Z
|
gemd/entity/template/has_property_templates.py
|
CitrineInformatics/gemd-python
|
4f80045c1b481269c7451f6a205755c22093eb74
|
[
"Apache-2.0"
] | 24
|
2020-04-22T16:55:09.000Z
|
2022-03-30T20:44:39.000Z
|
gemd/entity/template/has_property_templates.py
|
CitrineInformatics/gemd-python
|
4f80045c1b481269c7451f6a205755c22093eb74
|
[
"Apache-2.0"
] | 3
|
2020-05-08T00:50:02.000Z
|
2020-12-19T00:48:56.000Z
|
"""For entities that have a property template."""
from gemd.entity.link_by_uid import LinkByUID
from gemd.entity.setters import validate_list
from gemd.entity.template.base_template import BaseTemplate
from gemd.entity.template.property_template import PropertyTemplate
from gemd.entity.bounds.base_bounds import BaseBounds
from typing import Iterable
class HasPropertyTemplates(object):
"""
Mixin-trait for entities that include property templates.
Parameters
----------
properties: List[(PropertyTemplate, BaseBounds)]
A list of tuples containing this entity's property templates as well
as any restrictions on those templates' bounds.
"""
def __init__(self, properties):
self._properties = None
self.properties = properties
@property
def properties(self):
"""
Get the list of property template/bounds tuples.
Returns
-------
List[(PropertyTemplate, bounds)]
List of this entity's property template/bounds pairs
"""
return self._properties
@properties.setter
def properties(self, properties):
"""
Set the list of parameter templates.
Parameters
----------
properties: List[(PropertyTemplate, bounds)]
A list of tuples containing this entity's property templates as well
as any restrictions on those templates' bounds.
Returns
-------
List[(PropertyTemplate, bounds)]
List of this entity's property template/bounds pairs
"""
if isinstance(properties, Iterable):
if any(isinstance(x, BaseBounds) for x in properties):
properties = [properties] # It's a template/bounds tuple (probably)
self._properties = validate_list(properties,
(PropertyTemplate, LinkByUID, list, tuple),
trigger=BaseTemplate._homogenize_ranges
)
| 32.52381
| 84
| 0.625671
| 207
| 2,049
| 6.115942
| 0.323672
| 0.028436
| 0.055292
| 0.060032
| 0.350711
| 0.273302
| 0.273302
| 0.273302
| 0.273302
| 0.273302
| 0
| 0
| 0.295266
| 2,049
| 62
| 85
| 33.048387
| 0.876731
| 0.406052
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.272727
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8e80bc7bd958f10a7a1f279ed0d99283b77f722
| 1,184
|
py
|
Python
|
preprocessing.py
|
Alloooshe/facelib_modular_face_recognition_pipline
|
0313214b6f919e49e84235c1a6a4a4838b813e73
|
[
"MIT"
] | 10
|
2019-12-29T13:38:56.000Z
|
2021-03-15T07:21:52.000Z
|
preprocessing.py
|
Alloooshe/facelib_modular_face_recognition_pipline
|
0313214b6f919e49e84235c1a6a4a4838b813e73
|
[
"MIT"
] | 1
|
2021-03-15T07:45:45.000Z
|
2021-03-17T11:10:53.000Z
|
preprocessing.py
|
Alloooshe/facelib_modular_face_recognition_pipline
|
0313214b6f919e49e84235c1a6a4a4838b813e73
|
[
"MIT"
] | 2
|
2020-05-03T08:33:39.000Z
|
2021-02-06T16:49:54.000Z
|
import cv2
import numpy as np
class preprocessing:
def process_image(self,image, rescale, recolor):
if rescale['req']:
image= self.rescale(image,rescale['width'], rescale['height'])
if recolor['req']:
image = self.rgb2gray(image)
return image
def rescale (self,image,width,height):
image= cv2.resize(image,(width,height))
return image
def rgb2gray(self,image):
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
return image
def crop (self,image,boxes ):
faces = []
for box in boxes :
x=int( round (box[0]))
y=int( round (box[1]))
w=int (round (box[2]) )
h=int (round ( box[3]))
cropped = image[y:h+y,x : w+x,:]
faces.append(cropped)
return faces
def resize2square (self,image,x,y):
resized= cv2.resize(image,(x,y),interpolation=cv2.INTER_AREA)
return resized
def preprocess_facenet(self, images):
ret = np.zeros([len(images),160,160,3])
for image in images :
resized = self.resize2square(image,160,160)
np.append(ret,resized)
return ret
| 28.878049
| 71
| 0.579392
| 152
| 1,184
| 4.486842
| 0.342105
| 0.065982
| 0.064516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033413
| 0.29223
| 1,184
| 41
| 72
| 28.878049
| 0.78043
| 0
| 0
| 0.088235
| 0
| 0
| 0.014847
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.058824
| 0
| 0.441176
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8ebd9a417dcbfc90f2665cef2e143f107c15986
| 497
|
py
|
Python
|
covid_19_stat.py
|
pavelkalinchuk/api
|
3b2eccbb09b012ac2c841dd30c44a285a8f5bdc2
|
[
"Apache-2.0"
] | null | null | null |
covid_19_stat.py
|
pavelkalinchuk/api
|
3b2eccbb09b012ac2c841dd30c44a285a8f5bdc2
|
[
"Apache-2.0"
] | null | null | null |
covid_19_stat.py
|
pavelkalinchuk/api
|
3b2eccbb09b012ac2c841dd30c44a285a8f5bdc2
|
[
"Apache-2.0"
] | null | null | null |
import requests
from datetime import date, timedelta
today = date.today()
yesterday = today - timedelta(days=1)
country = "Russia"
endpoint = f"https://api.covid19api.com/country/{country}/status/confirmed"
params = {"from": str(yesterday), "to": str(today)}
response = requests.get(endpoint, params=params).json()
total_confirmed = 0
for day in response:
cases = day.get("Cases", 0)
total_confirmed += cases
print("\n"f"Total Confirmed Covid-19 cases in {country}: {total_confirmed}")
| 29.235294
| 76
| 0.724346
| 68
| 497
| 5.25
| 0.529412
| 0.156863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016129
| 0.126761
| 497
| 16
| 77
| 31.0625
| 0.806452
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8ee532a04ed15373dc8d2091c28d0c7dca10643
| 2,834
|
py
|
Python
|
MPI/py/plot_mpi_timing.py
|
mlxd/myscripts
|
b8b7d6b270ef24b06028e21f066c2bb587f94cef
|
[
"MIT"
] | null | null | null |
MPI/py/plot_mpi_timing.py
|
mlxd/myscripts
|
b8b7d6b270ef24b06028e21f066c2bb587f94cef
|
[
"MIT"
] | null | null | null |
MPI/py/plot_mpi_timing.py
|
mlxd/myscripts
|
b8b7d6b270ef24b06028e21f066c2bb587f94cef
|
[
"MIT"
] | null | null | null |
#This file plots the results from the MPI timing runs
import sys
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib.markers as mkr
plt_style='ggplot'
plt.rcParams['font.size'] = 11
plt.rcParams['font.family'] = 'serif'
plt.rcParams['axes.labelsize'] = 11
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['xtick.labelsize'] = 9
plt.rcParams['ytick.labelsize'] = 9
plt.rcParams['figure.titlesize'] = 12
#We begin by loading the CSV file of rank pairings and times into the appropriate format
StartStr = str(sys.argv[1])
EndStr = str(sys.argv[2])
start = np.loadtxt(open(StartStr), delimiter=',', dtype={'names': ('A','B','t'), 'formats':('i4','i4','f8')})
end = np.loadtxt(open(EndStr), delimiter=',', dtype={'names': ('A','B','t'), 'formats':('i4','i4','f8')})
ds=[{'%s:%s'%(a,b): (a,b,t) for a,b,t in zip(start['A'],start['B'],start['t']) }]
de=[{'%s:%s'%(a,b): (a,b,t) for a,b,t in zip(end['A'],end['B'],end['t']) }]
#We take note of the starting time over all ranks as a 0 offset
t0 = np.min(start['t'])
#3D Rank A:B vs time diagram
fig = plt.figure()
plt.style.use(plt_style)
fig.clf()
ax = fig.add_subplot(111, projection='3d')
ax.set_zlabel('time [s]')
ax.set_ylabel('Rank To Merge')
ax.set_xlabel('Rank Base')
#Plot the recorded times and connect ranks that have been merged toegther
for a in ds[0].keys():
ax.scatter( ds[0][a][0], ds[0][a][1], ds[0][a][2]-t0, c='r', marker='o') #Plot start
ax.scatter( de[0][a][0], de[0][a][1], de[0][a][2]-t0, c='b', marker='x') #Plot end
ax.plot( [ ds[0][a][0], de[0][a][0] ], [ ds[0][a][1], de[0][a][1] ], [ ds[0][a][2] - t0, de[0][a][2] - t0 ], c='k') #Draw line between start and finish
ax.set_zlim3d([ 0, np.max(end['t']) - t0 ])
ax.set_ylim3d([ np.min([end['A'], end['B']]), np.max([end['A'],end['B']]) ])
ax.set_xlim3d([ np.min([end['A'], end['B']]), np.max([end['A'],end['B']]) ])
plt.show()
#Save the 3D plot output
plt.savefig('3d_%s_%s.pdf'%(StartStr, EndStr))
plt.clf()
plt.style.use( plt_style )
#2D connections diagram
#Draw lines to mark the MPI ranks
for ii in xrange(np.max([start['A'],start['B']])):
plt.axhline(ii, xmin=0, xmax=1, linewidth=0.5)
#Draw lines between the start and end for reducing 2 data sets
for a in ds[0].keys():
plt.plot( [ ds[0][a][2] - t0, de[0][a][2] - t0] , [ds[0][a][1], de[0][a][0]], linestyle='-', linewidth=0.5, c='k', alpha=0.8)
plt.scatter( start['t'] - t0, start['B'], marker='x', c='r', alpha=0.8)
plt.scatter( end['t'] - t0, end['A'], marker='o', c='b', alpha=0.8)
plt.xlabel('time [s]')
plt.ylabel('MPI rank')
plt.title('%s_%s'%(StartStr, EndStr))
plt.xlim([ 0, np.max(end['t']) - t0 ])
plt.ylim([ np.min([end['A'], end['B']]), np.max([end['A'],end['B']]) ])
plt.show()
#Save the 2D plot output
plt.savefig('2d_%s_%s.pdf'%(StartStr, EndStr))
| 38.821918
| 155
| 0.61856
| 538
| 2,834
| 3.228625
| 0.291822
| 0.018423
| 0.018423
| 0.032239
| 0.264824
| 0.200921
| 0.15947
| 0.145653
| 0.138169
| 0.138169
| 0
| 0.036977
| 0.122089
| 2,834
| 72
| 156
| 39.361111
| 0.661174
| 0.181016
| 0
| 0.117647
| 0
| 0
| 0.121317
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.098039
| 0
| 0.098039
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8effc674c65f81f1f4c9fdac1c750120b3d16ef
| 716
|
py
|
Python
|
octavia-cli/unit_tests/test_entrypoint.py
|
pluralsh/airbyte
|
9b1ed03fe482f5154f6c1843b1be76de87f3605d
|
[
"MIT"
] | 1
|
2022-01-27T22:29:38.000Z
|
2022-01-27T22:29:38.000Z
|
octavia-cli/unit_tests/test_entrypoint.py
|
pluralsh/airbyte
|
9b1ed03fe482f5154f6c1843b1be76de87f3605d
|
[
"MIT"
] | null | null | null |
octavia-cli/unit_tests/test_entrypoint.py
|
pluralsh/airbyte
|
9b1ed03fe482f5154f6c1843b1be76de87f3605d
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import pytest
from click.testing import CliRunner
from octavia_cli import entrypoint
def test_octavia():
runner = CliRunner()
result = runner.invoke(entrypoint.octavia)
assert result.exit_code == 0
assert result.output.startswith("Usage: octavia [OPTIONS] COMMAND [ARGS]...")
@pytest.mark.parametrize(
"command",
[entrypoint.init, entrypoint.apply, entrypoint.create, entrypoint.delete, entrypoint._list, entrypoint._import],
)
def test_not_implemented_commands(command):
runner = CliRunner()
result = runner.invoke(command)
assert result.exit_code == 1
assert result.output.endswith("not yet implemented.\n")
| 27.538462
| 116
| 0.734637
| 85
| 716
| 6.082353
| 0.541176
| 0.092843
| 0.081238
| 0.104449
| 0.12766
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009885
| 0.152235
| 716
| 25
| 117
| 28.64
| 0.841845
| 0.075419
| 0
| 0.117647
| 0
| 0
| 0.107903
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 1
| 0.117647
| false
| 0
| 0.235294
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8f2a4e3254c600092c6d8f19d958953e7b804a3
| 5,261
|
py
|
Python
|
src/device/eltako/fsr61_actor.py
|
rosenloecher-it/enocean-mqtt-bridge
|
d56e41a1a67e70bdeb1aa46d10f48ed5a12ca59c
|
[
"MIT"
] | 1
|
2020-12-01T17:10:14.000Z
|
2020-12-01T17:10:14.000Z
|
src/device/eltako/fsr61_actor.py
|
rosenloecher-it/enocean-mqtt-bridge
|
d56e41a1a67e70bdeb1aa46d10f48ed5a12ca59c
|
[
"MIT"
] | 1
|
2021-09-19T13:38:02.000Z
|
2021-09-19T13:38:02.000Z
|
src/device/eltako/fsr61_actor.py
|
rosenloecher-it/enocean-mqtt-bridge
|
d56e41a1a67e70bdeb1aa46d10f48ed5a12ca59c
|
[
"MIT"
] | null | null | null |
import json
import logging
import random
from datetime import datetime
from typing import Optional
from paho.mqtt.client import MQTTMessage
from enocean.protocol.constants import PACKET
from enocean.protocol.packet import RadioPacket
from src.command.switch_command import SwitchCommand
from src.common.json_attributes import JsonAttributes
from src.common.switch_state import SwitchState
from src.device.base.cyclic_device import CheckCyclicTask
from src.device.base.scene_actor import SceneActor
from src.device.eltako.fsr61_eep import Fsr61Eep, Fsr61Action, Fsr61Command
from src.device.misc.rocker_switch_tools import RockerSwitchTools, RockerAction, RockerButton
from src.enocean_connector import EnoceanMessage
from src.tools.enocean_tools import EnoceanTools
from src.tools.pickle_tools import PickleTools
class Fsr61Actor(SceneActor, CheckCyclicTask):
"""
Specialized for: Eltako FSR61-230V (an ON/OFF relay switch)
"""
DEFAULT_REFRESH_RATE = 300 # in seconds
def __init__(self, name):
SceneActor.__init__(self, name)
CheckCyclicTask.__init__(self)
self._current_switch_state = None # type: Optional[SwitchState]
self._last_status_request = None # type: Optional[datetime]
def process_enocean_message(self, message: EnoceanMessage):
packet = message.payload # type: RadioPacket
if packet.packet_type != PACKET.RADIO:
self._logger.debug("skipped packet with packet_type=%s", EnoceanTools.packet_type_to_string(packet.rorg))
return
if packet.rorg == RockerSwitchTools.DEFAULT_EEP.rorg:
props = RockerSwitchTools.extract_props(packet)
self._logger.debug("proceed_enocean - got=%s", props)
action = RockerSwitchTools.extract_action(props) # type: RockerAction
if action.button == RockerButton.ROCK3:
self._current_switch_state = SwitchState.ON
elif action.button == RockerButton.ROCK2:
self._current_switch_state = SwitchState.OFF
else:
self._current_switch_state = SwitchState.ERROR
else:
self._current_switch_state = SwitchState.ERROR
if self._current_switch_state not in [SwitchState.ON, SwitchState.OFF]:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug("proceed_enocean - pickled error packet:\n%s", PickleTools.pickle_packet(packet))
self._logger.debug("proceed_enocean - switch_state=%s", self._current_switch_state)
self._last_status_request = self._now()
self._reset_offline_refresh_timer()
message = self._create_json_message(self._current_switch_state)
self._publish_mqtt(message)
def _create_json_message(self, switch_state: SwitchState):
data = {
JsonAttributes.DEVICE: self.name,
JsonAttributes.STATE: switch_state.value,
JsonAttributes.TIMESTAMP: self._now().isoformat(),
}
json_text = json.dumps(data)
return json_text
def process_mqtt_message(self, message: MQTTMessage):
try:
self._logger.debug('process_mqtt_message: "%s"', message.payload)
command = SwitchCommand.parse(message.payload)
self._logger.debug("mqtt command: '{}'".format(repr(command)))
self._execute_actor_command(command)
except ValueError:
self._logger.error("cannot execute command! message: {}".format(message.payload))
def _execute_actor_command(self, command: SwitchCommand):
if command.is_toggle:
command = SwitchCommand.OFF if self._current_switch_state == SwitchState.ON else SwitchCommand.ON
if command.is_on_or_off:
action = Fsr61Action(
command=Fsr61Command.SWITCHING,
switch_state=SwitchState.ON if command.is_on else SwitchState.OFF,
)
elif command.is_update:
action = Fsr61Action(command=Fsr61Command.STATUS_REQUEST)
elif command.is_learn:
action = Fsr61Action(command=Fsr61Command.SWITCHING, switch_state=SwitchState.ON, learn=True)
else:
raise ValueError("SwitchCommand ({}) not supported!".format(command))
action.sender = self._enocean_sender
action.destination = self._enocean_target or 0xffffffff
props, packet = Fsr61Eep.create_props_and_packet(action)
self._logger.debug("sending '{}' => {}".format(action, props))
self._send_enocean_packet(packet)
def check_cyclic_tasks(self):
self._check_and_send_offline()
self._request_update()
def _request_update(self):
diff_seconds = None
now = self._now()
refresh_rate = self._randomized_refresh_rate
if self._last_status_request is not None:
diff_seconds = (now - self._last_status_request).total_seconds()
if diff_seconds is None or diff_seconds >= refresh_rate:
self._last_status_request = now
self._execute_actor_command(SwitchCommand.UPDATE)
@property
def _randomized_refresh_rate(self) -> int:
return self.DEFAULT_REFRESH_RATE + random.randint(0, self.DEFAULT_REFRESH_RATE * 0.1)
| 40.469231
| 117
| 0.698536
| 599
| 5,261
| 5.851419
| 0.250417
| 0.047076
| 0.043652
| 0.056491
| 0.151213
| 0.103281
| 0.063338
| 0.039372
| 0.039372
| 0
| 0
| 0.009248
| 0.21897
| 5,261
| 129
| 118
| 40.782946
| 0.843758
| 0.030603
| 0
| 0.05
| 0
| 0
| 0.051969
| 0.004134
| 0
| 0
| 0.001969
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.18
| 0.01
| 0.31
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8f361858524234ea8e385c43bd790d28e9507fd
| 1,960
|
py
|
Python
|
neuroml/arraymorph_load_time_benchmark.py
|
NeuralEnsemble/libNeuroML
|
75d1630a0c6354a3997c4068dc8cdc447491b6f8
|
[
"BSD-3-Clause"
] | 20
|
2015-03-11T11:21:32.000Z
|
2021-10-11T16:03:27.000Z
|
neuroml/arraymorph_load_time_benchmark.py
|
NeuralEnsemble/libNeuroML
|
75d1630a0c6354a3997c4068dc8cdc447491b6f8
|
[
"BSD-3-Clause"
] | 48
|
2015-01-15T18:41:01.000Z
|
2022-01-05T13:53:58.000Z
|
neuroml/arraymorph_load_time_benchmark.py
|
NeuralEnsemble/libNeuroML
|
75d1630a0c6354a3997c4068dc8cdc447491b6f8
|
[
"BSD-3-Clause"
] | 16
|
2015-01-14T21:53:46.000Z
|
2019-09-04T23:05:27.000Z
|
import numpy as np
import neuroml
import neuroml.arraymorph as am
class Benchmark:
def __init__(self, num_segments):
self.num_segments = num_segments
def set_up(self):
num_segments = int(1e4) # Per cell
num_vertices = num_segments + 1
x = np.linspace(0, 10, num_vertices)
y = np.zeros(num_vertices)
z = np.zeros(num_vertices)
d = np.linspace(1, 0.01, num_vertices)
vertices = np.array([x, y, z, d]).T
connectivity = range(-1, num_segments)
big_arraymorph = am.ArrayMorphology(
vertices=vertices, connectivity=connectivity
)
transposed_x = x + 10
transposed_vertices = np.array([transposed_x, y, z, d]).T
transposed_arraymorph = am.ArrayMorphology(
vertices=transposed_vertices, connectivity=connectivity
)
bigger_d = d + 0.5
fatter_vertices = np.array([x, y, z, bigger_d]).T
fatter_arraymorph = am.ArrayMorphology(
vertices=fatter_vertices, connectivity=connectivity
)
neuroml_cell = neuroml.Cell(id="cell_4")
neuroml_morphology = neuroml.Morphology(id="my_morph")
neuroml_cell.morphology = neuroml_morphology
self.transposed_arraymorph = transposed_arraymorph
self.fatter_arraymorph = fatter_arraymorph
self.big_arraymorph = big_arraymorph
self.cell_1 = neuroml.Cell(id="cell_1")
self.cell_2 = neuroml.Cell(id="cell_2")
self.cell_3 = neuroml.Cell(id="cell_3")
self.cell_1.morphology = transposed_arraymorph
self.cell_2.morphology = fatter_arraymorph
self.cell_3.morphology = big_arraymorph
self.test_doc = neuroml.NeuroMLDocument(id="TestDocument")
self.test_doc.cells.append(self.cell_1)
self.test_doc.cells.append(self.cell_2)
self.test_doc.cells.append(self.cell_3)
self.test_doc.cells.append(neuroml_cell)
| 31.612903
| 67
| 0.656122
| 245
| 1,960
| 5.004082
| 0.220408
| 0.058728
| 0.044861
| 0.055465
| 0.126427
| 0.102773
| 0.073409
| 0
| 0
| 0
| 0
| 0.019035
| 0.24949
| 1,960
| 61
| 68
| 32.131148
| 0.814412
| 0.004082
| 0
| 0
| 0
| 0
| 0.022564
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.066667
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8f61ba84ff26314734e24f05cd833da5e3ee801
| 2,813
|
py
|
Python
|
pymtl/tools/translation/verilog_bug_test.py
|
belang/pymtl
|
4a96738724b007cbd684753aed0ac3de5b5dbebb
|
[
"BSD-3-Clause"
] | 206
|
2015-01-05T21:53:56.000Z
|
2022-03-14T08:04:49.000Z
|
pymtl/tools/translation/verilog_bug_test.py
|
belang/pymtl
|
4a96738724b007cbd684753aed0ac3de5b5dbebb
|
[
"BSD-3-Clause"
] | 84
|
2015-01-25T19:57:33.000Z
|
2021-05-11T15:46:56.000Z
|
pymtl/tools/translation/verilog_bug_test.py
|
belang/pymtl
|
4a96738724b007cbd684753aed0ac3de5b5dbebb
|
[
"BSD-3-Clause"
] | 99
|
2015-02-17T17:43:44.000Z
|
2022-02-14T17:58:18.000Z
|
#=======================================================================
# verilog_bug_test.py
#=======================================================================
import pytest
from pymtl import *
from exceptions import VerilatorCompileError
pytestmark = requires_verilator
#-----------------------------------------------------------------------
# Point BitStruct
#-----------------------------------------------------------------------
class Point( BitStructDefinition ):
def __init__( s ):
s.x = BitField(4)
s.y = BitField(4)
#-----------------------------------------------------------------------
# setup_sim
#-----------------------------------------------------------------------
def setup_sim( model ):
model = TranslationTool( model )
model.elaborate()
sim = SimulationTool( model )
return model, sim
#-----------------------------------------------------------------------
# test_bitstruct_tick_reg
#-----------------------------------------------------------------------
@pytest.mark.parametrize(
'config', ['Tick','TickFields','Comb','CombFields']
)
def test_bitstruct_reg( config ):
class AssignBitStruct( Model ):
def __init__( s, config=None ):
s.in_ = InPort ( Point() )
s.out = OutPort( Point() )
if config == 'Tick':
@s.tick_rtl
def block():
s.out.next = s.in_
elif config == 'TickFields':
@s.tick_rtl
def block():
s.out.x.next = s.in_.x
s.out.y.next = s.in_.y
elif config == 'Comb':
@s.combinational
def block():
s.out.value = s.in_
elif config == 'CombFields':
@s.combinational
def block():
s.out.x.value = s.in_.x
s.out.y.value = s.in_.y
else: raise Exception( 'Invalid config =', config )
# verify verilator simulation
model, sim = setup_sim( AssignBitStruct( config ) )
for i in range( 10 ):
input_value = concat( *2*[Bits(4,i)] )
model.in_.value = input_value
sim.cycle()
assert model.out == input_value
# read verilog to verify our output signal is being declared as a reg
# (required by Synopsys design compiler)
with open( model.__class__.__name__+'.v', 'r' ) as fp:
assert 'output reg' in fp.read()
#-----------------------------------------------------------------------
# test_verilator_compile_error
#-----------------------------------------------------------------------
def test_verilator_compile_error( ):
class TestVerilatorCompileError( Model ):
def __init__( s ):
s.in_ = InPort(8)
s.out = OutPort(8)
@s.combinational
def logic():
s.in_.value = s.out
with pytest.raises( VerilatorCompileError ):
model = TestVerilatorCompileError()
model, sim = setup_sim( model )
| 29
| 72
| 0.460363
| 265
| 2,813
| 4.686792
| 0.358491
| 0.021739
| 0.028986
| 0.038647
| 0.090177
| 0.088567
| 0.032206
| 0
| 0
| 0
| 0
| 0.00354
| 0.196587
| 2,813
| 96
| 73
| 29.302083
| 0.546018
| 0.335585
| 0
| 0.186441
| 0
| 0
| 0.049189
| 0
| 0
| 0
| 0
| 0
| 0.033898
| 1
| 0.186441
| false
| 0
| 0.050847
| 0
| 0.305085
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8f667d55a6083981558407ab139318c270d5ca3
| 436
|
py
|
Python
|
library/TraverseDirectory-M2.py
|
remytanx/python3-created-in-github
|
83b3dd0f36da6fc4df7c1cc37cac12f178f985a3
|
[
"MIT"
] | null | null | null |
library/TraverseDirectory-M2.py
|
remytanx/python3-created-in-github
|
83b3dd0f36da6fc4df7c1cc37cac12f178f985a3
|
[
"MIT"
] | null | null | null |
library/TraverseDirectory-M2.py
|
remytanx/python3-created-in-github
|
83b3dd0f36da6fc4df7c1cc37cac12f178f985a3
|
[
"MIT"
] | null | null | null |
import os
# Get the list of all files with a specific extension
# In this example, we will take a path of a directory and try to
# list all the files, with a specific extension .py here,
# in the directory and its sub-directories recursively.
path = r'C:\Users\10900225\Documents\Witch\BTX\Workspaces\Library'
for root, dirs, files in os.walk(path):
for file in files:
if(file.endswith(".py")):
print(os.path.join(root,file))
| 33.538462
| 66
| 0.733945
| 76
| 436
| 4.210526
| 0.618421
| 0.05625
| 0.0625
| 0.1125
| 0.16875
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021978
| 0.165138
| 436
| 13
| 67
| 33.538462
| 0.857143
| 0.518349
| 0
| 0
| 0
| 0
| 0.286408
| 0.271845
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8f71840564fdc1ff2e1787b21b4d5173407d801
| 1,509
|
py
|
Python
|
Modules/carlosma7/wizard/create_appointment.py
|
Carlosma7/Odoo
|
c234fcc18d15d4d8369e237286bee610fd76ceee
|
[
"CC0-1.0"
] | null | null | null |
Modules/carlosma7/wizard/create_appointment.py
|
Carlosma7/Odoo
|
c234fcc18d15d4d8369e237286bee610fd76ceee
|
[
"CC0-1.0"
] | null | null | null |
Modules/carlosma7/wizard/create_appointment.py
|
Carlosma7/Odoo
|
c234fcc18d15d4d8369e237286bee610fd76ceee
|
[
"CC0-1.0"
] | null | null | null |
#-*- coding: utf-8-*-
from odoo import api, fields, models, _
# Wizard class
class CreateAppointmentWizard(models.TransientModel):
_name = "create.appointment.wizard"
_description = "Create Appointment Wizard"
date_appointment = fields.Date(string='Date', required=False)
patient_id = fields.Many2one('hospital.patient', string="Patient", required=True)
# Wizard function
def action_create_appointment(self):
print("Wizard button is clicked")
vals = {
'patient_id': self.patient_id.id,
'date_appointment': self.date_appointment
}
# Create a new record
appointment_rec = self.env['hospital.appointment'].create(vals)
return {
'name': _('Appointment'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'hospital.appointment',
'res_id': appointment_rec.id,
}
# View appointment
def action_view_appointment(self):
# Method 1
# action = self.env.ref('carlosma7.action_hospital_appointment').read()[0]
# action['domain'] = [('patient_id', '=', self.patient_id.id)]
# return action
# Method 2
# action = self.env.['ir.actions.actions']._for_xml_id('carlosma7.action_hospital_appointment')
# action['domain'] = [('patient_id', '=', self.patient_id.id)]
# return action
# Method 3
return {
'type': 'ir.actions.act_window',
'name': 'Appointments',
'res_model': 'hospital.appointment',
'view_type': 'form',
'domain': [('patient_id', '=', self.patient_id.id)],
'view_mode': 'tree,form',
'target': 'current',
}
| 29.019231
| 97
| 0.686547
| 181
| 1,509
| 5.519337
| 0.364641
| 0.081081
| 0.052052
| 0.08008
| 0.206206
| 0.162162
| 0.138138
| 0.108108
| 0.108108
| 0.108108
| 0
| 0.006216
| 0.147117
| 1,509
| 52
| 98
| 29.019231
| 0.770008
| 0.284957
| 0
| 0.2
| 0
| 0
| 0.34334
| 0.062852
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.033333
| 0.033333
| 0.333333
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8f838e818d81e237d9d5d8fa11595a921a6fae3
| 4,731
|
py
|
Python
|
groups.py
|
davidmehren/udm_group_matrix
|
ae71feef4bf299588aa473c95e9073c7d2f5f23e
|
[
"MIT"
] | null | null | null |
groups.py
|
davidmehren/udm_group_matrix
|
ae71feef4bf299588aa473c95e9073c7d2f5f23e
|
[
"MIT"
] | null | null | null |
groups.py
|
davidmehren/udm_group_matrix
|
ae71feef4bf299588aa473c95e9073c7d2f5f23e
|
[
"MIT"
] | 1
|
2019-12-06T14:59:39.000Z
|
2019-12-06T14:59:39.000Z
|
#!/bin/env python3
import re
from typing import List
import numpy as np
import matplotlib.pyplot as plt
filtered_users = ["join-backup", "join-slave", "ucs-sso"]
filtered_groups = ["computers", "dc backup hosts", "dc slave hosts"]
class LDAPUser:
name: str
def __init__(self, name):
self.name = name
def __eq__(self, o: 'LDAPUser') -> bool:
return self.name == o.name
def __lt__(self, o: 'LDAPUser') -> bool:
return self.name < o.name
def __hash__(self) -> int:
return self.name.__hash__()
class LDAPGroupList:
content: List['LDAPGroup']
def __init__(self):
self.content = []
def add(self, group):
if group.name not in filtered_groups:
self.content.append(group)
def get_by_name(self, name):
for _group in self.content:
if _group.name == name:
return _group
return None
def get_user_list(self):
user_list = set()
for group in self.content:
user_list.update(group.members)
return list(user_list)
def tidy(self):
new_content = []
for group in self.content:
if group.samba_rid < 0:
continue
if len(group.members) > 0:
new_content.append(group)
self.content = sorted(new_content)
class LDAPGroup:
name: str
samba_rid: int
subgroups: List[str]
members: List[LDAPUser]
def __str__(self) -> str:
_repr = f"{self.name}\n Mitglieder:\n"
for member in self.members:
_repr = _repr + f" {member.name}\n"
_repr = _repr + " Untergruppen:\n"
for _group in self.subgroups:
_repr = _repr + f" {_group}\n"
return _repr
def __lt__(self, o: 'LDAPGroup') -> bool:
return self.name < o.name
def __init__(self, name: str):
self.name = name.lower()
self.subgroups = []
self.members = []
def add_subgroup(self, group: str):
self.subgroups.append(group.lower())
def parse_subgroups(self, global_groups: LDAPGroupList):
for group_name in self.subgroups:
ldap_group = global_groups.get_by_name(group_name)
if ldap_group is None:
print(f"can't find group '{group_name}'")
else:
for member in ldap_group.members:
if member not in self.members:
self.members.append(member)
def add_member(self, member):
if member.name not in filtered_users:
self.members.append(member)
def read_groupdump():
_group_list = LDAPGroupList()
with open("groupdump.txt", "r") as file:
current_group = None
for line in file:
if line == "\n":
continue
if line.startswith("DN"):
current_group = LDAPGroup(re.findall(r"cn=(.*?),", line)[0])
_group_list.add(current_group)
# print(current_user)
if current_group.name.startswith("dns-") or current_group.name.startswith(
"ucs-") or current_group.name.startswith("join-"):
continue
if line.startswith(" users"):
user = LDAPUser(re.findall(r"uid=(.*?),", line)[0])
# print(" ", group)
current_group.add_member(user)
if line.startswith(" nestedGroup"):
subgroup = re.findall(r"cn=(.*?),", line)[0]
# print(" ", group)
current_group.add_subgroup(subgroup)
if line.startswith(" sambaRID:"):
rid = re.findall(r"([0-9]{1,4})", line)[0]
current_group.samba_rid = int(rid)
return _group_list
def paint_matrix(groups: LDAPGroupList):
user_list = sorted(groups.get_user_list(), reverse=True)
x_count = len(groups.content)
y_count = len(user_list)
matrix = np.zeros((x_count, y_count))
for g_index, group in enumerate(groups.content):
for user in group.members:
matrix[g_index][user_list.index(user)] = 1
plt.pcolor(matrix.T, edgecolors='k', cmap="Greys", vmin=0, vmax=1)
x_locations = [x + 0.5 for x in range(x_count)]
y_locations = [x + 0.5 for x in range(y_count)]
plt.xticks(x_locations, [group.name for group in groups.content], rotation=45, fontsize=4, ha="right")
plt.yticks(y_locations, [user.name for user in user_list], fontsize=2)
plt.tight_layout()
plt.savefig("groups.png", dpi=600)
if __name__ == '__main__':
groups = read_groupdump()
for group in groups.content:
group.parse_subgroups(groups)
groups.tidy()
paint_matrix(groups)
| 31.125
| 106
| 0.581484
| 595
| 4,731
| 4.406723
| 0.233613
| 0.030511
| 0.022883
| 0.021358
| 0.179252
| 0.11251
| 0.101449
| 0.047292
| 0.029748
| 0.029748
| 0
| 0.007541
| 0.299302
| 4,731
| 151
| 107
| 31.331126
| 0.783409
| 0.015853
| 0
| 0.09322
| 0
| 0
| 0.073087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144068
| false
| 0
| 0.033898
| 0.033898
| 0.330508
| 0.008475
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8f8f117d6dace7d4b6c578a60f491f9e6393f0d
| 1,836
|
py
|
Python
|
common_tools/report_dialog.py
|
jamiecook/AequilibraE
|
b1013d59cbeaf6fc4e1a944cf31f20460a2a4156
|
[
"MIT"
] | null | null | null |
common_tools/report_dialog.py
|
jamiecook/AequilibraE
|
b1013d59cbeaf6fc4e1a944cf31f20460a2a4156
|
[
"MIT"
] | null | null | null |
common_tools/report_dialog.py
|
jamiecook/AequilibraE
|
b1013d59cbeaf6fc4e1a944cf31f20460a2a4156
|
[
"MIT"
] | null | null | null |
"""
-----------------------------------------------------------------------------------------------------------
Package: AequilibraE
Name: Report dialog
Purpose: Dialog for showing the report from algorithm runs
Original Author: Pedro Camargo (c@margo.co)
Contributors:
Last edited by: Pedro Camargo
Website: www.AequilibraE.com
Repository: https://github.com/AequilibraE/AequilibraE
Created: 2014-03-19
Updated: 30/09/2016
Copyright: (c) AequilibraE authors
Licence: See LICENSE.TXT
-----------------------------------------------------------------------------------------------------------
"""
from qgis.core import *
from PyQt4 import QtGui, uic
from PyQt4.QtGui import *
import sys
import os
from auxiliary_functions import standard_path
FORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), 'forms/ui_report.ui'))
class ReportDialog(QtGui.QDialog, FORM_CLASS):
def __init__(self, iface, reporting):
QDialog.__init__(self)
self.iface = iface
self.setupUi(self)
self.path = standard_path()
self.reporting = reporting
for t in reporting:
self.all_data.append(t)
self.but_save_log.clicked.connect(self.save_log)
self.but_close.clicked.connect(self.exit_procedure)
def save_log(self):
file_types = "Text files(*.txt)"
new_name = QFileDialog.getSaveFileName(None, 'Save log', self.path, file_types)
if len(new_name) > 0:
if new_name[-3].upper() != 'TXT':
new_name = new_name + '.txt'
outp = open(new_name, 'w')
for t in self.reporting:
print >> outp, t
outp.flush()
outp.close()
self.exit_procedure()
def exit_procedure(self):
self.close()
| 30.6
| 108
| 0.56427
| 206
| 1,836
| 4.854369
| 0.490291
| 0.042
| 0.033
| 0.04
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014114
| 0.228214
| 1,836
| 59
| 109
| 31.118644
| 0.691602
| 0.337691
| 0
| 0
| 0
| 0
| 0.042749
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.1875
| 0
| 0.3125
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8f9b47386e455dd9e70d1f591e4c141b1b8e828
| 21,580
|
py
|
Python
|
gui/robot_data_visualizer.py
|
wh1210/robot-data-visualizer
|
ebb59687233a8d09c8ed327c66ed1d69c4623136
|
[
"MIT"
] | null | null | null |
gui/robot_data_visualizer.py
|
wh1210/robot-data-visualizer
|
ebb59687233a8d09c8ed327c66ed1d69c4623136
|
[
"MIT"
] | 13
|
2018-11-20T22:55:39.000Z
|
2022-03-11T23:36:18.000Z
|
gui/robot_data_visualizer.py
|
wh1210/robot-data-visualizer
|
ebb59687233a8d09c8ed327c66ed1d69c4623136
|
[
"MIT"
] | 2
|
2018-11-09T01:48:07.000Z
|
2018-12-29T23:10:53.000Z
|
import os
import sys
sys.path.append('.')
sys.path.append('..')
import warnings
warnings.filterwarnings("ignore")
from datetime import datetime
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.lines as lines
import matplotlib.image as mpimg
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import tkinter as tk
from tools.get_dates_umich import get_dates_umich
from tools.staticmap_for_gps import map_for_gps
from tools.data_manager import DataManager
from tools.view_lidar import hokuyo_plot
from tools.view_lidar import threshold_lidar_pts
class VisualizerFrame(tk.Frame):
"""
This is the main window where the robot data is seen by the user.
"""
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
self.label = None
self.ax_map = None
self.ax_gps = None
self.ax_lidar = None
self.map_plot = None
self.gps_plot = None
self.lidar_plot = None
self.canvas = None
self.data_manager = None
self.gps_data = None
self.lidar_data = None
self.gps_on = False
self.map_on = False
self.lidar_on = False
self.map_image = None
self.widgets()
def widgets(self):
"""
Set up widgets for the frame.
:return: None
"""
self.label = tk.Label(self, text="Viewer")
self.label.pack(side=tk.TOP)
self.fig = Figure(figsize=(5, 4), dpi=100)
self.ax_map = self.fig.add_subplot(111)
self.ax_gps = self.fig.add_subplot(111)
self.ax_lidar = self.fig.add_subplot(111)
self.canvas = FigureCanvasTkAgg(self.fig, master=self.master)
self.canvas.draw()
self.canvas.get_tk_widget().pack(fill=tk.BOTH, expand=True)
def callback_initialize_data_manager(self):
"""
This callback responds to the *Load Data* button.
:return: None
"""
date = self.parent.toolbar.date.get()
if self.data_manager is None:
self.setup_data(date)
else:
if self.data_manager.date is not date:
os.chdir('../..') # TODO patched here - add this to end of load_gps() / load_lidar() functions
self.setup_data(date)
else:
pass
def setup_data(self, date):
"""
This function sets up all of the data (except lidar) needed by the application.
:param date: Determines which date from the robotics dataset to use.
:type date: str.
:return: None
"""
if self.data_manager is not None:
os.chdir(self.data_manager.owd)
self.ax_gps.clear()
self.ax_map.clear()
self.ax_lidar.clear()
self.canvas.draw()
self.gps_on = False
self.map_on = False
self.lidar_on = False
self.parent.set_status('DM_START', hold=True)
self.data_manager = DataManager(date)
self.data_manager.setup_data_files('sensor_data')
self.data_manager.load_gps()
x_coords, y_coords = map_for_gps(self.data_manager.data_dict, self.data_manager.data_dir)
self.lidar_data = None
self.gps_data = [x_coords, y_coords] # in image coords
self.map_image = mpimg.imread(os.path.join(self.data_manager.data_dir, 'map.png'))
self.label.config(text='Viewer')
self.parent.set_status('DM_READY')
def callback_gps_on(self):
"""
This callback responds to the *On* button under the *GPS Control* menu.
:return: None
"""
if not self.lidar_on:
if not self.gps_on:
self.gps_on = True
self.parent.set_status('GPS_START')
idx = self.get_idx_for_gps_update()
self.update_timestamp(idx)
self.gps_plot = self.ax_gps.plot(self.gps_data[0][:idx], self.gps_data[1][:idx], 'r')[0]
self.canvas.show()
self.parent.set_status('GPS_READY')
else:
pass
else:
self.callback_lidar_off()
self.callback_gps_on()
def callback_gps_off(self):
"""
This callback responds to the *Off* button under the *GPS Control* menu.
:return: None
"""
if self.gps_on:
self.gps_on = False
self.update_gps(0)
self.label.config(text='Viewer')
self.parent.set_status('GPS_REMOVE')
else:
pass
def callback_gps_slider_changed(self, event):
"""
This callback responds to the scale position changing under the *GPS Control* menu.
:return: None
"""
self.gps_on = True
idx = self.get_idx_for_gps_update()
self.update_gps(idx)
self.update_timestamp(idx)
self.parent.set_status('GPS_UPDATE')
def update_gps(self, idx):
"""
This function updates the GPS data that is displayed in the main viewing window.
:param idx: Index into the array of GPS data that is to be displayed.
:type idx: int.
:return: None
"""
if self.gps_data is not None:
self.gps_plot.set_xdata(self.gps_data[0][:idx])
self.gps_plot.set_ydata(self.gps_data[1][:idx])
self.canvas.draw()
else:
pass
def update_timestamp(self, idx):
"""
This function updates the timestamp in the main viewing window.
:param idx: Index into the array of GPS data to be used for retrieval of the time stamp.
:type idx: int.
:return: None
"""
curr_tstamp = self.get_timestamp_for_gps_update(idx)
self.label.config(text=str('time stamp: ' + curr_tstamp))
def get_idx_for_gps_update(self):
"""
This function returns the index to be used for updating the GPS data.
:return: int -- the index to be used for the GPS update
"""
slider_val = self.parent.control.gps_control.selection_scale.get()
idx_ratio = len(self.gps_data[0]) / 100
return int(slider_val * idx_ratio)
def get_timestamp_for_gps_update(self, gps_data_idx):
"""
This function returns the timestamp in a readable format for the given GPS data index.
:param gps_data_idx: Index into the array of GPS data to be used for retrieval of the time stamp.
:return: str -- the timestamp
"""
idx_ratio = len(self.data_manager.data_dict['gps']['tstamp']) / len(self.gps_data[0])
idx = int(gps_data_idx * idx_ratio) - 1
ts = int(self.data_manager.data_dict['gps']['tstamp'][idx] / 1000000)
return datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
def callback_map_on(self):
"""
This callback responds to the *On* button under the *Map Control* menu.
:return: None
"""
if not self.lidar_on:
if not self.map_on:
self.map_on = True
if self.map_image is not None:
self.ax_map.imshow(self.map_image)
# draw scale on the map
map_scale = self.get_map_scale()
line = lines.Line2D([0, 200], [0, 0], linewidth=4, color='b')
self.ax_map.add_line(line)
distance = map_scale * 200
if distance > 1000:
scale_str = "scale = " + str(float("%.2f" % (distance / 1000))) + " kilometers"
else:
scale_str = "scale = " + str(float("%.2f" % (distance))) + " meters"
self.ax_map.text(0, -10, scale_str, fontsize=8)
self.canvas.draw()
self.parent.set_status('MAP_READY')
else:
self.parent.set_status('MAP_ERROR')
else:
pass
else:
self.callback_lidar_off()
self.callback_map_on()
def callback_map_off(self):
"""
This callback responds to the *Off* button under the *Map Control* menu.
:return: None
"""
if self.map_on:
self.map_on = False
self.ax_map.clear()
if self.gps_on:
self.gps_on = False
self.callback_gps_on() # because the previous line clears both map and gps
self.canvas.draw()
else:
pass
def callback_date_changed(self):
"""
This callback responds to a change in the date selection menu in the toolbar.
:return: None
"""
new_date = self.parent.toolbar.date.get() # Need to call get() because this is a StringVar object
if self.parent.toolbar.date is not new_date:
self.parent.toolbar.date.set(new_date)
else:
pass
def get_map_scale(self):
"""
This function calculates the map scale in units of meters per pixel.
:return: float64 -- map scale (m/px)
"""
k = 111000 # meters per degree of latitude (approx.)
lat_range = self.data_manager.data_dict['gps_range'][0]
d_lat_range = abs(lat_range[0] - lat_range[1])
d_x_pixels = abs(max(self.gps_data[0]) - min(self.gps_data[0]))
map_scale = d_lat_range * k / d_x_pixels
return map_scale # units of meters per pixel
def callback_lidar_slider_changed(self, event):
"""
This callback responds to the scale position changing under the *Lidar Control* menu.
:return: None
"""
self.lidar_on = True
idx = self.get_idx_for_lidar_update()
self.update_lidar(idx)
# self.update_timestamp(idx)
self.parent.set_status('Lidar updated')
def get_idx_for_lidar_update(self):
"""
This function returns the index to be used for updating the Lidar data.
:return: int -- the index to be used for the Lidar update
"""
slider_val = self.parent.control.lidar_control.selection_scale.get()
idx_ratio = len(self.lidar_data) / 100
return max(int(slider_val * idx_ratio) - 1, 0)
def update_lidar(self, idx):
"""
This function updates the Lidar data that is displayed in the main viewing window.
:param idx: Index into the array of Lidar data that is to be displayed.
:type idx: int.
:return: None
"""
if self.lidar_data is not None:
yt, xt, _ = threshold_lidar_pts(self.lidar_data[idx])
self.lidar_plot.set_xdata(xt)
self.lidar_plot.set_ydata(yt)
self.canvas.draw()
else:
pass
def callback_lidar_on(self):
"""
This callback responds to the *On* button under the *Lidar Control* menu.
:return: None
"""
if not self.lidar_on:
self.lidar_on = True
self.callback_map_off()
self.callback_gps_off()
if self.data_manager is None:
self.callback_initialize_data_manager()
if not 'lidar' in self.data_manager.data_dict.keys():
self.data_manager.setup_data_files('hokuyo')
pickled = True
delete_pickle = False
self.data_manager.load_lidar(4000, pickled, delete_pickle) # TODO - global constant for lidar samples
self.lidar_data = self.data_manager.data_dict['lidar']
xlimits, ylimits = [-32, 32], [-32, 32]
self.ax_lidar.set_xlim(xlimits)
self.ax_lidar.set_ylim(ylimits)
hokuyo_plot(self.ax_lidar)
yt, xt, _ = threshold_lidar_pts(self.lidar_data[0])
self.lidar_plot = self.ax_lidar.plot(xt, yt, 'r.')[0]
self.canvas.show()
else:
pass
def callback_lidar_off(self):
"""
This callback responds to the *Off* button under the *Lidar Control* menu.
:return: None
"""
if self.lidar_on:
self.lidar_on = False
self.ax_lidar.clear()
self.canvas.draw()
else:
pass
class ToolbarFrame(tk.Frame):
"""
This class represents the toolbar at the top of the window.
"""
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
self.date = None
self.dates = get_dates_umich()
self.load_button = None
self.option_menu = None
self.widgets()
def widgets(self):
"""
Set up widgets for the frame.
:return: None
"""
self.dates = get_dates_umich()
self.load_button = tk.Button(self, text="Load Data")
self.load_button.pack(side=tk.LEFT, padx=2, pady=2)
self.date = tk.StringVar(self)
self.date.set(self.dates[24])
self.option_menu = tk.OptionMenu(self, self.date, *self.dates, command=self.callback_date_changed)
self.option_menu.pack(side=tk.LEFT, padx=2, pady=2)
def bind_widgets(self):
"""
Bind widgets to their callback functions.
:return: None
"""
self.load_button.config(command=self.parent.window.callback_initialize_data_manager)
def callback_date_changed(self, event):
self.parent.window.callback_date_changed()
class ControlFrame(tk.Frame):
"""
This class represents the controls on the right hand side of the main
window. There are two nested classes for the slam and map controls.
"""
def __init__(self, parent):
tk.Frame.__init__(self, parent, width=400)
self.parent = parent
self.root = parent
self.slam_control = None
self.map_control = None
self.lidar_control = None
self.widgets()
class GpsControlFrame(tk.Frame):
def __init__(self, parent, root):
tk.Frame.__init__(self, parent, width=400)
self.parent = parent
self.root = root
self.selection_scale = None
self.scale_val = None
self.on_button = None
self.off_button = None
self.widgets()
def widgets(self):
"""
Set up widgets for the frame.
:return: None
"""
label = tk.Label(self, text="GPS Control", bg="blue", fg="white")
label.pack(side=tk.TOP, fill=tk.X)
self.selection_scale = tk.Scale(self, orient=tk.HORIZONTAL, to=100, variable=self.scale_val)
self.selection_scale.set(100)
self.selection_scale.pack(side=tk.TOP)
self.on_button = tk.Button(self, text="On", bg="green", fg="white")
self.on_button.pack(side=tk.LEFT)
self.off_button = tk.Button(self, text="Off", bg="red", fg="white")
self.off_button.pack(side=tk.RIGHT)
def bind_widgets(self):
"""
Bind widgets to their callback functions.
:return: None
"""
self.on_button.config(command=self.root.window.callback_gps_on)
self.off_button.config(command=self.root.window.callback_gps_off)
self.selection_scale.bind("<ButtonRelease-1>", self.root.window.callback_gps_slider_changed)
class MapControlFrame(tk.Frame):
def __init__(self, parent, root):
tk.Frame.__init__(self, parent, width=400)
self.parent = parent
self.root = root
self.on_button = None
self.off_button = None
self.widgets()
def widgets(self):
"""
Set up widgets for the frame.
:return: None
"""
label = tk.Label(self, text="Map Control", bg="blue", fg="white")
label.pack(fill=tk.X)
self.on_button = tk.Button(self, text="On", bg="green", fg="white")
self.on_button.pack(side=tk.LEFT)
self.off_button = tk.Button(self, text="Off", bg="red", fg="white")
self.off_button.pack(side=tk.RIGHT)
def bind_widgets(self):
"""
Bind widgets to their callback functions.
:return: None
"""
self.on_button.config(command=self.root.window.callback_map_on)
self.off_button.config(command=self.root.window.callback_map_off)
class LidarControlFrame(tk.Frame):
def __init__(self, parent, root):
tk.Frame.__init__(self, parent, width=400)
self.parent = parent
self.root = root
self.scale_val = None
self.on_button = None
self.off_button = None
self.widgets()
def widgets(self):
"""
Set up widgets for the frame.
:return: None
"""
label = tk.Label(self, text="Lidar Control", bg="blue", fg="white")
label.pack(side=tk.TOP, fill=tk.X)
self.selection_scale = tk.Scale(self, orient=tk.HORIZONTAL, to=100, variable=self.scale_val)
self.selection_scale.set(100)
self.selection_scale.pack(side=tk.TOP)
self.on_button = tk.Button(self, text="On", bg="green", fg="white")
self.on_button.pack(side=tk.LEFT)
self.off_button = tk.Button(self, text="Off", bg="red", fg="white")
self.off_button.pack(side=tk.RIGHT)
def bind_widgets(self):
"""
Bind widgets to their callback functions.
:return: None
"""
self.on_button.config(command=self.root.window.callback_lidar_on)
self.off_button.config(command=self.root.window.callback_lidar_off)
self.selection_scale.bind("<ButtonRelease-1>", self.root.window.callback_lidar_slider_changed)
def widgets(self):
"""
Set up widgets for the frame.
:return: None
"""
self.gps_control = self.GpsControlFrame(self, self.root)
self.gps_control.pack(fill=tk.X)
self.map_control = self.MapControlFrame(self, self.root)
self.map_control.pack(fill=tk.X)
self.lidar_control = self.LidarControlFrame(self, self.root)
self.lidar_control.pack(fill=tk.X)
def bind_widgets(self):
"""
Bind widgets to their callback functions.
:return: None
"""
self.gps_control.bind_widgets()
self.map_control.bind_widgets()
self.lidar_control.bind_widgets()
class MainWindow(tk.Tk):
"""
This is the main window for the application. Here the main layout is
established using a combination of the above classes and individual
tkinter widgets.
"""
def __init__(self, parent):
tk.Tk.__init__(self, parent)
self.parent = parent
self.status_text = dict(READY="Ready",
DM_START="Initializing data manager ...",
DM_READY="Data is ready",
DM_NOT_READY="Data not loaded",
GPS_START="GPS loading ...",
GPS_READY="GPS is ready",
GPS_REMOVE="GPS removed",
GPS_UPDATE="GPS updated",
MAP_START="Map loading ...",
MAP_READY="Map is ready",
MAP_REMOVE="Map removed",
MAP_ERROR="Must load data before map can be displayed")
self.STATUS_DELAY = 2000 # (ms) delay between status changes
self.title("Robot Data Visualizer")
self.mainWidgets()
def mainWidgets(self):
"""
Set up widgets for the main window frame.
:return: None
"""
# Toolbar
self.toolbar = ToolbarFrame(self)
self.toolbar.pack(side=tk.TOP, fill=tk.X)
# Status bar
self.status = tk.Label(self, text=self.status_text['READY'], bd=1, relief=tk.SUNKEN, anchor=tk.W)
self.status.pack(side=tk.BOTTOM, fill=tk.X)
# Controls - GPS and Map
self.control = ControlFrame(self)
self.control.pack(side=tk.RIGHT, fill=tk.Y)
# Main viewing window
self.window = VisualizerFrame(self)
self.window.pack(side=tk.LEFT, padx=2, pady=2)
# Bind widgets to their callback functions
self.toolbar.bind_widgets()
self.control.bind_widgets()
def set_status(self, status, hold=False):
"""
This function sets the status bar at the bottom of the window (with a time delay).
:param status: Key to look up status message in the status_text dictionary.
:type status: str.
:param hold: When *hold=True*, the status update will not time out.
:type hold: bool.
:return: None
"""
if status in self.status_text.keys():
self.status.config(text=self.status_text[status])
if not hold:
self.status.after(self.STATUS_DELAY, lambda: self.status.config(text=self.status_text['READY']))
else:
self.status.config(text=str(status))
if not hold:
self.status.after(self.STATUS_DELAY, lambda: self.status.config(text=self.status_text['READY']))
if __name__ == '__main__':
app = MainWindow(None)
app.mainloop()
| 34.091627
| 117
| 0.580445
| 2,749
| 21,580
| 4.381593
| 0.121863
| 0.027895
| 0.023661
| 0.018265
| 0.575924
| 0.500789
| 0.441013
| 0.391449
| 0.371523
| 0.321129
| 0
| 0.008758
| 0.317424
| 21,580
| 632
| 118
| 34.14557
| 0.808961
| 0.185218
| 0
| 0.402174
| 0
| 0
| 0.040773
| 0
| 0
| 0
| 0
| 0.003165
| 0
| 1
| 0.105978
| false
| 0.027174
| 0.040761
| 0
| 0.17663
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8fa3bb594a67f398ad5e9f8e305ca9da2fda5ed
| 1,780
|
py
|
Python
|
day10/day10.py
|
BroderickCarlin/AdventOfCode
|
52d12d16f3d291a51984e6d85dbe97e604abc005
|
[
"MIT"
] | null | null | null |
day10/day10.py
|
BroderickCarlin/AdventOfCode
|
52d12d16f3d291a51984e6d85dbe97e604abc005
|
[
"MIT"
] | null | null | null |
day10/day10.py
|
BroderickCarlin/AdventOfCode
|
52d12d16f3d291a51984e6d85dbe97e604abc005
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
lengths = "187,254,0,81,169,219,1,190,19,102,255,56,46,32,2,216"
suffix = [17, 31, 73, 47, 23]
num_rounds = 64
def puzzle1():
knot = range(256)
skip_size = 0
idx1 = 0
for l in [int(a) for a in lengths.split(",")]:
idx2 = idx1 + l
k = []
if idx2 >= len(knot):
k = knot[idx1:] + knot[:idx2 - len(knot)]
else:
k = knot[idx1:idx2]
k = list(reversed(k))
if idx2 >= len(knot):
knot[idx1:] = k[:len(knot) - idx1]
knot[:idx2 - len(knot)] = k[len(knot) - idx1:]
else:
knot[idx1:idx2] = k
idx1 += skip_size + l
while idx1 >= len(knot): idx1 -= len(knot)
skip_size += 1
return knot[0] * knot[1]
def puzzle2():
knot = range(256)
hash_knot = ""
skip_size = 0
idx1 = 0
for _ in range(num_rounds):
for l in list(bytearray(lengths)) + suffix:
idx2 = idx1 + l
k = []
if idx2 >= len(knot):
k = knot[idx1:] + knot[:idx2 - len(knot)]
else:
k = knot[idx1:idx2]
k = list(reversed(k))
if idx2 >= len(knot):
knot[idx1:] = k[:len(knot) - idx1]
knot[:idx2 - len(knot)] = k[len(knot) - idx1:]
else:
knot[idx1:idx2] = k
idx1 += skip_size + l
while idx1 >= len(knot): idx1 -= len(knot)
skip_size += 1
for x in range(16):
s = 0
for y in range(16):
s ^= knot[x * 16 + y]
hash_knot += "%0.2X" % s
return hash_knot
if __name__ == "__main__":
print("1: {}".format(puzzle1()))
print("2: {}".format(puzzle2()))
| 24.383562
| 64
| 0.455056
| 240
| 1,780
| 3.291667
| 0.283333
| 0.141772
| 0.111392
| 0.050633
| 0.559494
| 0.559494
| 0.516456
| 0.516456
| 0.516456
| 0.516456
| 0
| 0.104641
| 0.382584
| 1,780
| 72
| 65
| 24.722222
| 0.614195
| 0.023596
| 0
| 0.654545
| 0
| 0.018182
| 0.043779
| 0.029954
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036364
| false
| 0
| 0
| 0
| 0.072727
| 0.036364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8fc7cc35ebc665797970c840fc5d039b1988b5c
| 1,914
|
py
|
Python
|
17tensorflow/tf2/2my_model.py
|
cheerfulwang/python-tutorial
|
d0f7348e1da4ff954e3add66e1aae55d599283ee
|
[
"Apache-2.0"
] | 2
|
2021-01-04T10:44:44.000Z
|
2022-02-13T07:53:41.000Z
|
17tensorflow/tf2/2my_model.py
|
zm79287/python-tutorial
|
d0f7348e1da4ff954e3add66e1aae55d599283ee
|
[
"Apache-2.0"
] | null | null | null |
17tensorflow/tf2/2my_model.py
|
zm79287/python-tutorial
|
d0f7348e1da4ff954e3add66e1aae55d599283ee
|
[
"Apache-2.0"
] | 2
|
2020-11-23T08:58:51.000Z
|
2022-02-13T07:53:42.000Z
|
# -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
"""
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
# 超参
num_words = 2000
num_tags = 12
num_departments = 4
# 输入
body_input = keras.Input(shape=(None,), name='body')
title_input = keras.Input(shape=(None,), name='title')
tag_input = keras.Input(shape=(num_tags,), name='tag')
# 嵌入层
body_feat = layers.Embedding(num_words, 64)(body_input)
title_feat = layers.Embedding(num_words, 64)(title_input)
# 特征提取层
body_feat = layers.LSTM(32)(body_feat)
title_feat = layers.LSTM(128)(title_feat)
features = layers.concatenate([title_feat,body_feat, tag_input])
# 分类层
priority_pred = layers.Dense(1, activation='sigmoid', name='priority')(features)
department_pred = layers.Dense(num_departments, activation='softmax', name='department')(features)
# 构建模型
model = keras.Model(inputs=[body_input, title_input, tag_input],
outputs=[priority_pred, department_pred])
model.summary()
keras.utils.plot_model(model, 'multi_model.png', show_shapes=True)
model.compile(optimizer=keras.optimizers.RMSprop(1e-3),
loss={'priority': 'binary_crossentropy',
'department': 'categorical_crossentropy'},
loss_weights=[1., 0.2])
import numpy as np
# 载入输入数据
title_data = np.random.randint(num_words, size=(1280, 10))
body_data = np.random.randint(num_words, size=(1280, 100))
tag_data = np.random.randint(2, size=(1280, num_tags)).astype('float32')
# 标签
priority_label = np.random.random(size=(1280, 1))
department_label = np.random.randint(2, size=(1280, num_departments))
# 训练
history = model.fit(
{'title': title_data, 'body':body_data, 'tag':tag_data},
{'priority':priority_label, 'department':department_label},
batch_size=32,
epochs=5
)
model.save('model_save.h5')
del model
model = keras.models.load_model('model_save.h5')
| 29.446154
| 98
| 0.719436
| 269
| 1,914
| 4.925651
| 0.371747
| 0.030189
| 0.045283
| 0.045283
| 0.179623
| 0.179623
| 0.093585
| 0.05283
| 0
| 0
| 0
| 0.036527
| 0.127482
| 1,914
| 65
| 99
| 29.446154
| 0.756886
| 0.056426
| 0
| 0
| 0
| 0
| 0.102235
| 0.013408
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.102564
| 0
| 0.102564
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8ffacba13563fc63e94eff5bc851a3e548d81b6
| 4,566
|
py
|
Python
|
rain/cloud/system/system.py
|
SuPerCxyz/rain
|
578b6d125f535414d3ea3fcfee4015b70fed560c
|
[
"Apache-2.0"
] | 2
|
2018-12-20T01:38:56.000Z
|
2018-12-29T14:49:36.000Z
|
rain/cloud/system/system.py
|
SuPerCxyz/rain
|
578b6d125f535414d3ea3fcfee4015b70fed560c
|
[
"Apache-2.0"
] | null | null | null |
rain/cloud/system/system.py
|
SuPerCxyz/rain
|
578b6d125f535414d3ea3fcfee4015b70fed560c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import json
import platform
import time
from getdevinfo import getdevinfo
import psutil
from rain.common import rain_log
from rain.common import utils
from rain.common.utils import async_call
logger = rain_log.logg(__name__)
class SystemInfo(object):
"""system information.
Collect system information, including cpu, memory, hostname, boot time,
login information...
"""
def __init__(self):
self.thread = {}
def _load_stat(self):
"""Collecting system load.
"""
cpu_count = psutil.cpu_count()
with open("/proc/loadavg") as f:
con = f.read().split()
load_1 = con[0]
load_5 = con[1]
load_15 = con[2]
sys_load_1 = round(float(load_1)/cpu_count * 100, 2)
sys_load_5 = round(float(load_5)/cpu_count * 100, 2)
sys_load_15 = round(float(load_15)/cpu_count * 100, 2)
system_load = {
'sys_load_1': sys_load_1,
'sys_load_5': sys_load_5,
'sys_load_15': sys_load_15,
'load_1': load_1,
'load_5': load_5,
'load_15': load_15
}
logger.info('Collect system load.')
return system_load
@async_call
def _cpu_percent(self):
tmp = psutil.cpu_percent(interval=1, percpu=True)
self.thread['cpu_percent'] = tmp
@async_call
def _cpus_times_percent(self):
tmp = psutil.cpu_times_percent(interval=1, percpu=True)
self.thread['cpus_times_percent'] = tmp
def get_cpuinfo_info(self):
"""Collect the number of cpu and usage information and
return the dictionary type.
"""
cpu_count = psutil.cpu_count()
self._cpu_percent()
self._cpus_times_percent()
while True:
if len(self.thread.keys()) == 2:
break
time.sleep(0.1)
cpu_percent_info = []
for cpu in self.thread['cpus_times_percent']:
percent_info = {
'user': cpu.user,
'system': cpu.system,
'idle': cpu.idle,
'iowait': cpu.iowait
}
cpu_percent_info.append(percent_info)
system_load = self._load_stat()
cpu_info_dict = {
'cpu_count': cpu_count,
'cpu_percent': self.thread['cpu_percent'],
'cpu_percent_info': cpu_percent_info,
'system_load': system_load
}
logger.info('Collect cpu related information.')
return cpu_info_dict
def get_memcache_info(self):
"""Collect memory and swap information and return dictionary type.
"""
memcache_info = psutil.virtual_memory()
memcache_total = memcache_info.total / 1024 ** 2
memcache_used = memcache_info.used / 1024 ** 2
memcache_available = memcache_info.available / 1024 ** 2
memcache_buff = memcache_info.cached / 1024 ** 2
memcache_cached = memcache_info.cached / 1024 ** 2
memcache_percent = memcache_info.percent
memcache_info_dict = {
'memcache_total_MB': memcache_total,
'memcache_used_MB': memcache_used,
'memcache_available_MB': memcache_available,
'memcache_buff_MB': memcache_buff,
'memcache_cached_MB': memcache_cached,
'memcache_percent': memcache_percent
}
logger.info('Collect memory related information.')
return memcache_info_dict
def _get_user(self):
"""Collect login user information.
"""
user_info_list = []
user_list = psutil.users()
for user in user_list:
user_dict = {}
user_dict['name'] = user.name
user_dict['host'] = user.host
user_dict['conn_time'] = utils.str_time(user.started)
user_info_list.append(user_dict)
return user_info_list
def get_system_info(self):
"""Collect system information.
"""
system_info = {}
system_info['python_version'] = platform.python_version()
system_info['hostname'] = platform.node()
system_info['system_info'] = platform.platform()
system_info['boot_time'] = utils.str_time(psutil.boot_time())
system_info['time'] = time.asctime(time.localtime(time.time()))
system_info['user'] = self._get_user()
logger.info('Collect user login information.')
return system_info
| 33.328467
| 75
| 0.592641
| 537
| 4,566
| 4.724395
| 0.210428
| 0.039417
| 0.025621
| 0.01419
| 0.135199
| 0.067797
| 0.02838
| 0
| 0
| 0
| 0
| 0.023307
| 0.304643
| 4,566
| 136
| 76
| 33.573529
| 0.775748
| 0.091765
| 0
| 0.038462
| 0
| 0
| 0.116769
| 0.005141
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.211538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8ffe69de767e55075d5f9e090d7f69a2c93dd80
| 7,517
|
py
|
Python
|
models.py
|
rudrasohan/Trust-Region-Policy-Optimization
|
bbaadf37aa3ea4ccc35907038eea4add9e5e050c
|
[
"MIT"
] | 3
|
2019-11-16T15:40:14.000Z
|
2021-12-28T14:26:36.000Z
|
models.py
|
rudrasohan/Trust-Region-Policy-Optimization
|
bbaadf37aa3ea4ccc35907038eea4add9e5e050c
|
[
"MIT"
] | null | null | null |
models.py
|
rudrasohan/Trust-Region-Policy-Optimization
|
bbaadf37aa3ea4ccc35907038eea4add9e5e050c
|
[
"MIT"
] | null | null | null |
"""Model Definations for trpo."""
import gym
import numpy as np
import torch
import time
import scipy.optimize
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from distributions import DiagonalGaussian
from helpers import get_flat_params, set_flat_params, get_flat_grads
#from helpers import sample_trajectories, compute_advantage_returns, get_flat_params
class Model(object):
"""Generic Model Template"""
def __init__(self,
observation_space,
action_space,
**kwargs):
#super(Model).__init__(**kwargs)
self.observation_space = observation_space
self.action_space = action_space
self.obs_dim = None
self.act_dim = None
if isinstance(self.observation_space, gym.spaces.Box):
self.obs_dim = np.prod(self.observation_space.shape)
else:
self.obs_dim = self.observation_space.n
if isinstance(self.action_space, gym.spaces.Box):
self.act_dim = np.prod(self.action_space.shape)
else:
self.act_dim = self.action_space.n
class MLP_Policy(nn.Module):
"""MLP model fo the network"""
def __init__(self, input_dim, output_dim, name, **kwargs):
super(MLP_Policy, self).__init__()
self.name = name
self.use_new_head = False
self.fc1 = nn.Linear(input_dim, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, output_dim)
self.fc3.weight.data.mul_(0.1)
self.fc3.bias.data.mul_(0.0)
if bool(kwargs):
self.use_new_head = kwargs["use_new_head"]
self.fc4 = nn.Linear(64, output_dim)
else:
self.log_std = nn.Parameter(torch.zeros(output_dim))
#print(self.log_std.size())
#self.bn1 = nn.BatchNorm1d(64)
#self.bn2 = nn.BatchNorm1d(64)
def forward(self, x):
#print(self.fc1(x))
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
mean = self.fc3(x)
if self.use_new_head:
std = self.fc4(x)
else:
std = self.log_std.expand(mean.size())
#print(mean)
return mean, std
class MLP_Value(nn.Module):
"""MLP model fo the network"""
def __init__(self, input_dim, output_dim, name, **kwargs):
super(MLP_Value, self).__init__()
self.name = name
self.fc1 = nn.Linear(input_dim, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, output_dim)
self.fc3.weight.data.mul_(0.1)
self.fc3.bias.data.mul_(0.0)
def forward(self, x):
#print(self.fc1(x))
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
out = self.fc3(x)
return out
class GaussianMLPPolicy(Model):
"""Gaussian MLP Policy"""
def __init__(self, observation_space, action_space, **kwargs):
Model.__init__(self, observation_space, action_space, **kwargs)
#self.mean_network = MLP(self.obs_dim, self.act_dim, "mean").type(torch.float64)
self.std_net = None
#self.std_network = None
#print(kwargs)
if bool(kwargs):
self.std_net = kwargs["use_std_net"]
if self.std_net:
self.network = MLP_Policy(self.obs_dim, self.act_dim, "MLP_policy", use_new_head=True)#.type(torch.float64)
else:
self.network = MLP_Policy(self.obs_dim, self.act_dim, "MLP_policy")#.type(torch.float64)
def actions(self, obs):
obs = torch.from_numpy(obs)
mean, log_std = self.network(obs)
dist = DiagonalGaussian(mean, log_std)
sample = dist.sample()
return sample, dist.logli(sample)
def get_dists(self, obs):
obs = torch.from_numpy(obs)
mean, log_std = self.network(obs)
dist = DiagonalGaussian(mean, log_std)
return dist
def clear_grads(self):
self.network.zero_grad()
class MLPBaseline(Model):
""""MLP Baseline"""
def __init__(self, observation_space, action_space, **kwargs):
Model.__init__(self, observation_space, action_space, **kwargs)
self.value = MLP_Value(self.obs_dim, 1, "MLP_baseline")
#self.criterion = nn.MSELoss()
#self.optimizer = torch.optim.LBFGS(self.value.parameters())
def predict(self, obs):
obs = torch.tensor(obs)
with torch.no_grad():
val = self.value(obs)
return val
def compute_baseline(self, obs):
obs = Variable(torch.tensor(obs))
return self.value(obs)
def clear_grads(self):
self.value.zero_grad()
def update(self, trajs):
obs = np.asarray(trajs["state"])
#obs = torch.from_numpy(obs)
returns = trajs["returns"]
baselines = trajs["baselines"]
targets = returns * 0.9 + 0.1 * baselines
#returns =
#targets = Variable(returns)
#print(targets)
'''
def closure():
self.clear_grads()
values = self.value(torch.from_numpy(obs))
self.optimizer.zero_grad()
loss = self.criterion(values, targets)
print("LBFGS_LOSS:{}".format(loss))
loss.backward()
return loss
'''
#self.optimizer.step(closure)
#curr_params = get_flat_params(self.value.parameters()).data.detach().double().numpy()
curr_flat_params = get_flat_params(self.value).detach().double().numpy()
def val_loss_grad(x):
set_flat_params(self.value, torch.tensor(x))
self.clear_grads()
#for param in self.value.parameters():
#if param.grad is not None:
#print("HHAHAHAHAHHA")
#param.grad.data.fill_(0)
#values_ = #self.value(torch.from_numpy(obs))
values_ = self.compute_baseline(obs)
#print("VALUES",values_.size())
#print("TARGETS",targets.size())
#print((values_-targets).size())
#time1 = time.time()
vf_loss = (values_ - targets).pow(2).mean()
#print("LBFGS_LOSS:{}".format(vf_loss))
#time2 = time.time()
#print("TIME:{}".format(time2-time1))
#for param in self.value.parameters():
# vf_loss += param.pow(2).sum() * 1e-2
vf_loss.backward()
flat_grad = get_flat_grads(self.value)
return (vf_loss.data.double().numpy(), flat_grad.data.double().numpy())
new_params, _, opt_info = scipy.optimize.fmin_l_bfgs_b(val_loss_grad, curr_flat_params, maxiter=25)
set_flat_params(self.value, torch.tensor(new_params))
print(opt_info)
def test_policy_value():
env = gym.make("MountainCarContinuous-v0")
policy = GaussianMLPPolicy(env.observation_space, env.action_space, use_std_net=True)
paths = sample_trajectories(env, policy, 1000)
print(len(paths["rewards"]))
baseline = MLPBaseline(env.observation_space, env.action_space)
compute_advantage_returns(paths, baseline, 0.9, 0.1)
print(paths.keys())
baseline.update(paths)
print(paths['dist'].keys())
flat_params_mean = get_flat_params(policy.mean_network.parameters())
flat_params_std = get_flat_params(policy.std_network.parameters())
print(flat_params)
#test_policy_value()
| 33.261062
| 119
| 0.600905
| 947
| 7,517
| 4.543823
| 0.182682
| 0.032535
| 0.041831
| 0.027888
| 0.346967
| 0.318383
| 0.261678
| 0.231466
| 0.221241
| 0.221241
| 0
| 0.016339
| 0.275376
| 7,517
| 226
| 120
| 33.261062
| 0.773637
| 0.168285
| 0
| 0.295455
| 0
| 0
| 0.018916
| 0.00409
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.075758
| 0
| 0.287879
| 0.037879
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7405313149ad1d453f1faa1ff9ea0b0aec012d46
| 3,572
|
py
|
Python
|
keeper/v2api/projects.py
|
lsst-sqre/ltd-keeper
|
c658bcce726764e7416a8a386b418e83912b0f32
|
[
"Apache-2.0",
"MIT"
] | 5
|
2016-05-16T18:46:26.000Z
|
2019-07-08T15:16:41.000Z
|
keeper/v2api/projects.py
|
lsst-sqre/ltd-keeper
|
c658bcce726764e7416a8a386b418e83912b0f32
|
[
"Apache-2.0",
"MIT"
] | 46
|
2016-02-18T16:54:36.000Z
|
2022-03-25T19:43:45.000Z
|
keeper/v2api/projects.py
|
lsst-sqre/ltd-keeper
|
c658bcce726764e7416a8a386b418e83912b0f32
|
[
"Apache-2.0",
"MIT"
] | 4
|
2016-08-20T23:10:07.000Z
|
2022-03-25T19:52:09.000Z
|
"""Handlers for project-related APIs."""
from __future__ import annotations
from typing import Dict, Tuple
from flask import request
from flask_accept import accept_fallback
from keeper.auth import token_auth
from keeper.logutils import log_route
from keeper.models import Organization, Product, db
from keeper.services.createproduct import create_product
from keeper.services.updateproduct import update_product
from keeper.taskrunner import launch_tasks
from keeper.v2api import v2api
from ._models import (
ProjectPatchRequest,
ProjectPostRequest,
ProjectResponse,
ProjectsResponse,
)
from ._urls import url_for_project
__all__ = ["get_projects", "get_project", "create_project", "update_project"]
@v2api.route("/orgs/<org>/projects", methods=["GET"])
@accept_fallback
@log_route()
@token_auth.login_required
def get_projects(org: str) -> str:
products = (
Product.query.join(
Organization, Organization.id == Product.organization_id
)
.filter(Organization.slug == org)
.all()
)
response = ProjectsResponse.from_products(products)
return response.json()
@v2api.route("/orgs/<org>/projects/<slug>", methods=["GET"])
@accept_fallback
@log_route()
@token_auth.login_required
def get_project(org: str, slug: str) -> str:
product = (
Product.query.join(
Organization, Organization.id == Product.organization_id
)
.filter(Organization.slug == org)
.filter(Product.slug == slug)
.first_or_404()
)
response = ProjectResponse.from_product(product)
return response.json()
@v2api.route("/orgs/<org>/projects", methods=["POST"])
@accept_fallback
@log_route()
@token_auth.login_required
def create_project(org: str) -> Tuple[str, int, Dict[str, str]]:
request_data = ProjectPostRequest.parse_obj(request.json)
organization = Organization.query.filter(
Organization.slug == org
).first_or_404()
try:
product, default_edition = create_product(
org=organization,
slug=request_data.slug,
doc_repo=request_data.source_repo_url,
title=request_data.title,
default_edition_mode=(
request_data.default_edition_mode
if request_data.default_edition_mode is not None
else None
),
)
except Exception:
db.session.rollback()
raise
task = launch_tasks()
response = ProjectResponse.from_product(product, task=task)
project_url = url_for_project(product)
return response.json(), 201, {"Location": project_url}
@v2api.route("/orgs/<org>/projects/<slug>", methods=["PATCH"])
@accept_fallback
@log_route()
@token_auth.login_required
def update_project(org: str, slug: str) -> Tuple[str, int, Dict[str, str]]:
request_data = ProjectPatchRequest.parse_obj(request.json)
product = (
Product.query.join(
Organization, Organization.id == Product.organization_id
)
.filter(Organization.slug == org)
.filter(Product.slug == slug)
.first_or_404()
)
try:
product = update_product(
product=product,
new_doc_repo=request_data.source_repo_url,
new_title=request_data.title,
)
except Exception:
db.session.rollback()
raise
task = launch_tasks()
response = ProjectResponse.from_product(product, task=task)
project_url = url_for_project(product)
return response.json(), 200, {"Location": project_url}
| 28.349206
| 77
| 0.676932
| 406
| 3,572
| 5.724138
| 0.226601
| 0.042599
| 0.024096
| 0.02926
| 0.562823
| 0.491394
| 0.477625
| 0.433735
| 0.39673
| 0.326162
| 0
| 0.007505
| 0.216685
| 3,572
| 125
| 78
| 28.576
| 0.823088
| 0.009518
| 0
| 0.417476
| 0
| 0
| 0.04983
| 0.015289
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038835
| false
| 0
| 0.126214
| 0
| 0.203884
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7405685566287cf4e859fe85e98cb0c021c50b86
| 2,237
|
py
|
Python
|
plugins/markdown_extensions/katex.py
|
raabrp/rraabblog
|
a1d47ede918f4838ac3bbcff9ef4e7c67f851c32
|
[
"MIT"
] | null | null | null |
plugins/markdown_extensions/katex.py
|
raabrp/rraabblog
|
a1d47ede918f4838ac3bbcff9ef4e7c67f851c32
|
[
"MIT"
] | null | null | null |
plugins/markdown_extensions/katex.py
|
raabrp/rraabblog
|
a1d47ede918f4838ac3bbcff9ef4e7c67f851c32
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Allow server-side KaTeX rendering for Markdown through node.js
The markdown extension adds regex patterns for `$` and `$$` in the source `.md`
file, and applies KaTeX to the intermediate text with a `python-bond` call to
node.js
requires
* node
* npm
* katex (npm install katex)
* python-bond (pip3 install --user python-bond)
KaTeX: https://github.com/Khan/KaTeX
"""
import markdown
from markdown.util import etree
import bond
JS = bond.make_bond('JavaScript')
JS.eval_block(
r'''
katex = require('katex');
function render(s, is_block) {
return katex.renderToString(s, {
displayMode: is_block,
throwOnError: false
});
}
'''
)
katex = JS.callable('render')
memoise = {}
###############################################################################
class MathPattern(markdown.inlinepatterns.Pattern):
def __init__(self, tag, pattern):
super().__init__(pattern)
self.tag = tag
def handleMatch(self, m):
global memoise
node = markdown.util.etree.Element(self.tag)
node.set('class', 'math')
orig = m.group('math')
entry = (orig, self.tag == 'div')
if entry in memoise:
result = memoise[entry]
else:
result = katex(orig, self.tag == 'div')
memoise[entry] = result
node.text = result
return node
class Katex(markdown.Extension):
def extendMarkdown(self, md, md_globals):
# Regex to detect math delimiters
math_inline_regex = \
r'(?P<prefix>\$)(?P<math>.+?)(?P<suffix>(?<!\s)\2)'
math_block_regex = \
r'(?P<prefix>\$\$|\\begin\{(.+?)\}|\\\[)(?P<math>.+?)(?P<suffix>\2|\\end\{\3\}|\\\])'
# Process math before escapes are processed since escape processing
# will interfere. The order in which the displayed and inlined math
# is registered below matters
md.inlinePatterns.add(
'math_block',
MathPattern('div', math_block_regex),
'<escape'
)
md.inlinePatterns.add(
'math_inline',
MathPattern('span', math_inline_regex),
'<escape'
)
| 24.053763
| 97
| 0.565042
| 252
| 2,237
| 4.924603
| 0.456349
| 0.028203
| 0.017728
| 0.022562
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003041
| 0.265087
| 2,237
| 92
| 98
| 24.315217
| 0.751825
| 0.259723
| 0
| 0.095238
| 0
| 0.02381
| 0.150765
| 0.094683
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7408452dfdbed6f56d0e2243de45d1e90b286cdf
| 1,490
|
py
|
Python
|
simpleclassroom/urls.py
|
cbetheridge/simpleclassroom
|
9e99262ffdb4efc0e27566855866dfc26244bf26
|
[
"MIT"
] | null | null | null |
simpleclassroom/urls.py
|
cbetheridge/simpleclassroom
|
9e99262ffdb4efc0e27566855866dfc26244bf26
|
[
"MIT"
] | null | null | null |
simpleclassroom/urls.py
|
cbetheridge/simpleclassroom
|
9e99262ffdb4efc0e27566855866dfc26244bf26
|
[
"MIT"
] | null | null | null |
"""simpleclassroom URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from views import views
from views import io
urlpatterns = [
url(r'^$', views.display_classrooms, name='index'),
url(r'^classrooms/', views.display_classrooms, name='classrooms'),
url(r'^student_list/', views.display_students, name='student list'),
url(r'^student_details/', views.display_student_details, name='student view'),
url(r'^io/add_class/', io.add_classroom, name='add class'),
url(r'^io/del_class/', io.delete_classroom, name='delete class'),
url(r'^io/add_student/', io.add_student, name='add student'),
url(r'^io/del_student/', io.delete_student, name='delete student'),
url(r'^io/enroll/', io.enroll_student, name='enroll student'),
url(r'^io/unenroll/', io.unenroll_student, name='unenroll student'),
url(r'^admin/', admin.site.urls),
]
| 42.571429
| 80
| 0.713423
| 228
| 1,490
| 4.574561
| 0.280702
| 0.053691
| 0.034516
| 0.023011
| 0.16395
| 0.16395
| 0.071908
| 0.071908
| 0
| 0
| 0
| 0.006178
| 0.130872
| 1,490
| 34
| 81
| 43.823529
| 0.799228
| 0.42953
| 0
| 0
| 0
| 0
| 0.297746
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.235294
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd9ec9af338573f552a9119ee09d53bff7f7cebd
| 4,939
|
py
|
Python
|
simplereg/data_writer.py
|
gift-surg/SimpleReg
|
9d9a774f5b7823c2256844c9d0260395604fb396
|
[
"BSD-3-Clause"
] | 18
|
2017-11-10T15:09:41.000Z
|
2021-01-12T07:48:46.000Z
|
simplereg/data_writer.py
|
gift-surg/SimpleReg
|
9d9a774f5b7823c2256844c9d0260395604fb396
|
[
"BSD-3-Clause"
] | null | null | null |
simplereg/data_writer.py
|
gift-surg/SimpleReg
|
9d9a774f5b7823c2256844c9d0260395604fb396
|
[
"BSD-3-Clause"
] | 3
|
2019-03-20T14:13:03.000Z
|
2020-01-15T01:32:51.000Z
|
# \file DataWriter.py
# \brief Class to read data
#
# \author Michael Ebner (michael.ebner.14@ucl.ac.uk)
# \date June 2018
import os
import sys
import numpy as np
import nibabel as nib
import SimpleITK as sitk
import pysitk.python_helper as ph
import pysitk.simple_itk_helper as sitkh
from simplereg.definitions import ALLOWED_IMAGES
from simplereg.definitions import ALLOWED_LANDMARKS
from simplereg.definitions import ALLOWED_TRANSFORMS
from simplereg.definitions import ALLOWED_TRANSFORMS_DISPLACEMENTS
class DataWriter(object):
@staticmethod
def write_image(image_sitk, path_to_file, verbose=0):
extension = ph.strip_filename_extension(path_to_file)[1]
if extension not in ALLOWED_IMAGES:
raise IOError("Image file extension must be of type %s " %
", or ".join(ALLOWED_IMAGES))
if isinstance(image_sitk, sitk.Image):
sitkh.write_nifti_image_sitk(
image_sitk=image_sitk,
path_to_file=path_to_file,
verbose=verbose)
else:
sitkh.write_nifti_image_itk(
image_itk=image_sitk,
path_to_file=path_to_file,
verbose=verbose)
@staticmethod
def write_vector_image(vector_image_sitk, path_to_file, verbose=0):
extension = ph.strip_filename_extension(path_to_file)[1]
if extension not in ALLOWED_IMAGES:
raise IOError("Image file extension must be of type %s " %
", or ".join(ALLOWED_IMAGES))
if isinstance(vector_image_sitk, sitk.Image):
sitkh.write_sitk_vector_image(
vector_image_sitk,
path_to_file,
verbose=verbose,
)
else:
raise ValueError("Only implemented for SimpleITK images")
@staticmethod
def write_landmarks(landmarks_nda, path_to_file, verbose=0):
extension = ph.strip_filename_extension(path_to_file)[1]
if extension not in ALLOWED_LANDMARKS:
raise IOError("Landmark file extension must be of type %s " %
", or ".join(ALLOWED_LANDMARKS))
ph.write_array_to_file(
path_to_file, landmarks_nda, delimiter=" ", access_mode="w",
verbose=verbose)
@staticmethod
def write_transform(transform_sitk, path_to_file, verbose=0):
extension = ph.strip_filename_extension(path_to_file)[1]
if extension not in ALLOWED_TRANSFORMS and \
extension not in ALLOWED_TRANSFORMS_DISPLACEMENTS:
raise IOError("Transform file extension must be of type "
"%s (transformation) or %s (displacements)" % (
", ".join(ALLOWED_TRANSFORMS),
", ".join(ALLOWED_TRANSFORMS_DISPLACEMENTS)))
if extension in ALLOWED_TRANSFORMS:
if isinstance(transform_sitk, sitk.Image):
raise IOError("Cannot convert displacement field (%s) to "
"transform (%s)" % (
", ".join(ALLOWED_TRANSFORMS_DISPLACEMENTS),
", ".join(ALLOWED_TRANSFORMS),
))
if isinstance(transform_sitk, sitk.Transform):
ph.create_directory(os.path.dirname(path_to_file))
sitk.WriteTransform(transform_sitk, path_to_file)
if verbose:
ph.print_info("Transform written to '%s'" % path_to_file)
elif isinstance(transform_sitk, np.ndarray):
ph.write_array_to_file(
path_to_file,
transform_sitk,
delimiter=" ",
access_mode="w",
verbose=verbose)
else:
raise IOError("Transform must be of type "
"sitk.Transform or np.ndarray")
else:
if isinstance(transform_sitk, sitk.Transform):
raise IOError("Cannot convert transform (%s) to "
"displacement field (%s)" % (
", ".join(ALLOWED_TRANSFORMS),
", ".join(ALLOWED_TRANSFORMS_DISPLACEMENTS),
))
elif isinstance(transform_sitk, sitk.Image):
sitkh.write_nifti_image_sitk(
image_sitk=transform_sitk,
path_to_file=path_to_file,
verbose=verbose)
elif isinstance(transform_sitk, nib.nifti1.Nifti1Image):
ph.create_directory(os.path.dirname(path_to_file))
nib.save(transform_sitk, path_to_file)
else:
raise IOError("Transform must be of type "
"sitk.Image or nibabel.nifti1.Nifti1Image")
| 40.154472
| 78
| 0.578255
| 519
| 4,939
| 5.246628
| 0.194605
| 0.052883
| 0.080793
| 0.046272
| 0.654793
| 0.565553
| 0.471172
| 0.38744
| 0.366875
| 0.271392
| 0
| 0.00559
| 0.348046
| 4,939
| 122
| 79
| 40.483607
| 0.840062
| 0.023284
| 0
| 0.46
| 0
| 0
| 0.110004
| 0.005396
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.11
| 0
| 0.16
| 0.01
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd9f005c2266883ac0727dd4f11b65c0cc61acbf
| 3,881
|
py
|
Python
|
configman/datetime_util.py
|
peterbe/configman
|
724d80b25a0ebbb2e75ad69e92a6611494cd68b4
|
[
"BSD-3-Clause"
] | null | null | null |
configman/datetime_util.py
|
peterbe/configman
|
724d80b25a0ebbb2e75ad69e92a6611494cd68b4
|
[
"BSD-3-Clause"
] | null | null | null |
configman/datetime_util.py
|
peterbe/configman
|
724d80b25a0ebbb2e75ad69e92a6611494cd68b4
|
[
"BSD-3-Clause"
] | null | null | null |
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is configman
#
# The Initial Developer of the Original Code is
# Mozilla Foundation
# Portions created by the Initial Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# K Lars Lohn, lars@mozilla.com
# Peter Bengtsson, peterbe@mozilla.com
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import datetime
def datetime_from_ISO_string(s):
""" Take an ISO date string of the form YYYY-MM-DDTHH:MM:SS.S
and convert it into an instance of datetime.datetime
"""
try:
return datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S')
except ValueError:
try:
return datetime.datetime.strptime(s, '%Y-%m-%d')
except ValueError:
return datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%f')
def date_from_ISO_string(s):
""" Take an ISO date string of the form YYYY-MM-DD
and convert it into an instance of datetime.date
"""
return datetime.datetime.strptime(s, '%Y-%m-%d').date()
def datetime_to_ISO_string(aDate):
""" Take a datetime and convert to string of the form YYYY-MM-DDTHH:MM:SS.S
"""
return aDate.isoformat()
def date_to_ISO_string(aDate):
""" Take a datetime and convert to string of the form YYYY-MM-DD
"""
return aDate.strftime('%Y-%m-%d')
def hours_str_to_timedelta(hoursAsString):
return datetime.timedelta(hours=int(hoursAsString))
def timedelta_to_seconds(td):
return td.days * 24 * 60 * 60 + td.seconds
def str_to_timedelta(input_str):
""" a string conversion function for timedelta for strings in the format
DD:HH:MM:SS
"""
days, hours, minutes, seconds = 0, 0, 0, 0
details = input_str.split(':')
if len(details) >= 4:
days = int(details[-4])
if len(details) >= 3:
hours = int(details[-3])
if len(details) >= 2:
minutes = int(details[-2])
if len(details) >= 1:
seconds = int(details[-1])
return datetime.timedelta(days=days,
hours=hours,
minutes=minutes,
seconds=seconds)
def timedelta_to_str(aTimedelta):
""" a conversion function for time deltas to string in the form
DD:HH:MM:SS
"""
days = aTimedelta.days
temp_seconds = aTimedelta.seconds
hours = temp_seconds / 3600
minutes = (temp_seconds - hours * 3600) / 60
seconds = temp_seconds - hours * 3600 - minutes * 60
return '%d:%d:%d:%d' % (days, hours, minutes, seconds)
| 34.651786
| 79
| 0.67328
| 597
| 3,881
| 4.331658
| 0.291457
| 0.017401
| 0.019335
| 0.021268
| 0.306265
| 0.228925
| 0.210364
| 0.208817
| 0.152359
| 0.152359
| 0
| 0.016322
| 0.226488
| 3,881
| 111
| 80
| 34.963964
| 0.845103
| 0.551662
| 0
| 0.097561
| 0
| 0
| 0.044758
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.195122
| false
| 0
| 0.02439
| 0.04878
| 0.463415
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cda678a982b6a913bc586a56ae657d42e29745b5
| 508
|
py
|
Python
|
main.py
|
ki-ljl/Scaffold-Federated-Learning
|
12e04217df3af2c326ea90fef6cff47beaaec485
|
[
"MIT"
] | 9
|
2022-03-02T13:58:29.000Z
|
2022-03-31T06:45:40.000Z
|
main.py
|
ki-ljl/Scaffold-Federated-Learning
|
12e04217df3af2c326ea90fef6cff47beaaec485
|
[
"MIT"
] | null | null | null |
main.py
|
ki-ljl/Scaffold-Federated-Learning
|
12e04217df3af2c326ea90fef6cff47beaaec485
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
@Time:2022/05/05 12:57
@Author:KI
@File:main.py
@Motto:Hungry And Humble
"""
from data_process import clients_wind
from server import Scaffold
def main():
K, C, E, B, r = 10, 0.5, 30, 50, 10
input_dim = 28
lr = 0.08
options = {'K': K, 'C': C, 'E': E, 'B': B, 'r': r, 'clients': clients_wind,
'input_dim': input_dim, 'lr': lr}
scaffold = Scaffold(options)
scaffold.server()
scaffold.global_test()
if __name__ == '__main__':
main()
| 20.32
| 79
| 0.582677
| 79
| 508
| 3.556962
| 0.582278
| 0.085409
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071979
| 0.234252
| 508
| 24
| 80
| 21.166667
| 0.650386
| 0.185039
| 0
| 0
| 0
| 0
| 0.076355
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cda9eb07b967369dac4f17bb21af05cd80acf296
| 1,472
|
py
|
Python
|
Data Analysis with Pandas Intermediate/Pandas Internals_ Series-145.py
|
vipmunot/Data-Analysis-using-Python
|
34586d8cbbc336508c4a7a68abe14944f1096252
|
[
"MIT"
] | null | null | null |
Data Analysis with Pandas Intermediate/Pandas Internals_ Series-145.py
|
vipmunot/Data-Analysis-using-Python
|
34586d8cbbc336508c4a7a68abe14944f1096252
|
[
"MIT"
] | null | null | null |
Data Analysis with Pandas Intermediate/Pandas Internals_ Series-145.py
|
vipmunot/Data-Analysis-using-Python
|
34586d8cbbc336508c4a7a68abe14944f1096252
|
[
"MIT"
] | null | null | null |
## 1. Data Structures ##
import pandas as pd
fandango = pd.read_csv('fandango_score_comparison.csv')
print(fandango.head(2))
## 2. Integer Indexes ##
fandango = pd.read_csv('fandango_score_comparison.csv')
series_film = fandango['FILM']
series_rt = fandango['RottenTomatoes']
print(series_film[:5])
print(series_rt[:5])
## 3. Custom Indexes ##
# Import the Series object from pandas
from pandas import Series
film_names = series_film.values
rt_scores = series_rt.values
series_custom=pd.Series(index = film_names, data = rt_scores)
## 4. Integer Index Preservation ##
series_custom = Series(rt_scores , index=film_names)
series_custom[['Minions (2015)', 'Leviathan (2014)']]
fiveten = series_custom[5:10]
print(fiveten)
## 5. Reindexing ##
original_index = series_custom.index.tolist()
sorted_by_index = series_custom.reindex(index = sorted(original_index))
## 6. Sorting ##
sc2 = series_custom.sort_index()
sc3 = series_custom.sort_values()
print(sc2.head(10))
print(sc3.head(10))
## 7. Transforming Columns With Vectorized Operations ##
series_normalized = series_custom/20
## 8. Comparing and Filtering ##
criteria_one = series_custom > 50
criteria_two = series_custom < 75
both_criteria = series_custom[criteria_one & criteria_two]
## 9. Alignment ##
rt_critics = Series(fandango['RottenTomatoes'].values, index=fandango['FILM'])
rt_users = Series(fandango['RottenTomatoes_User'].values, index=fandango['FILM'])
rt_mean =(rt_users + rt_critics) / 2
| 25.37931
| 81
| 0.754076
| 203
| 1,472
| 5.231527
| 0.364532
| 0.135593
| 0.026365
| 0.032015
| 0.12806
| 0.080979
| 0.080979
| 0.080979
| 0
| 0
| 0
| 0.029163
| 0.11481
| 1,472
| 58
| 82
| 25.37931
| 0.785879
| 0.16712
| 0
| 0.068966
| 0
| 0
| 0.123737
| 0.048822
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.068966
| 0
| 0.068966
| 0.206897
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdae861a30ba2bb3bd941147a704995ddbb3e7b8
| 4,894
|
py
|
Python
|
pytest_ipynb/plugin.py
|
kevingerman/pytest-ipynb
|
04b5fed4f280983f64254b01e3b24b7733e99224
|
[
"BSD-3-Clause"
] | 104
|
2015-01-21T16:10:46.000Z
|
2021-05-31T06:53:35.000Z
|
pytest_ipynb/plugin.py
|
kevingerman/pytest-ipynb
|
04b5fed4f280983f64254b01e3b24b7733e99224
|
[
"BSD-3-Clause"
] | 26
|
2015-04-09T04:12:48.000Z
|
2018-12-22T18:41:33.000Z
|
pytest_ipynb/plugin.py
|
kevingerman/pytest-ipynb
|
04b5fed4f280983f64254b01e3b24b7733e99224
|
[
"BSD-3-Clause"
] | 21
|
2015-02-06T10:07:28.000Z
|
2021-04-19T21:31:48.000Z
|
import pytest
import os,sys
import warnings
try:
from exceptions import Exception, TypeError, ImportError
except:
pass
from runipy.notebook_runner import NotebookRunner
wrapped_stdin = sys.stdin
sys.stdin = sys.__stdin__
sys.stdin = wrapped_stdin
try:
from Queue import Empty
except:
from queue import Empty
# code copied from runipy main.py
with warnings.catch_warnings():
try:
from IPython.utils.shimmodule import ShimWarning
warnings.filterwarnings('error', '', ShimWarning)
except ImportError:
class ShimWarning(Warning):
"""Warning issued by iPython 4.x regarding deprecated API."""
pass
try:
# IPython 3
from IPython.nbformat import reads, NBFormatError
except ShimWarning:
# IPython 4
from nbformat import reads, NBFormatError
except ImportError:
# IPython 2
from IPython.nbformat.current import reads, NBFormatError
finally:
warnings.resetwarnings()
class IPyNbException(Exception):
""" custom exception for error reporting. """
def pytest_collect_file(path, parent):
if path.fnmatch("test*.ipynb"):
return IPyNbFile(path, parent)
def get_cell_description(cell_input):
"""Gets cell description
Cell description is the first line of a cell,
in one of this formats:
* single line docstring
* single line comment
* function definition
"""
try:
first_line = cell_input.split("\n")[0]
if first_line.startswith(('"', '#', 'def')):
return first_line.replace('"','').replace("#",'').replace('def ', '').replace("_", " ").strip()
except:
pass
return "no description"
class IPyNbFile(pytest.File):
def collect(self):
with self.fspath.open() as f:
payload = f.read()
self.notebook_folder = self.fspath.dirname
try:
# Ipython 3
self.nb = reads(payload, 3)
except (TypeError, NBFormatError):
# Ipython 2
self.nb = reads(payload, 'json')
self.runner = NotebookRunner(self.nb)
cell_num = 1
for cell in self.runner.iter_code_cells():
yield IPyNbCell(self.name, self, cell_num, cell)
cell_num += 1
def setup(self):
self.fixture_cell = None
def teardown(self):
self.runner.shutdown_kernel()
class IPyNbCell(pytest.Item):
def __init__(self, name, parent, cell_num, cell):
super(IPyNbCell, self).__init__(name, parent)
self.cell_num = cell_num
self.cell = cell
self.cell_description = get_cell_description(self.cell.input)
def runtest(self):
self.parent.runner.km.restart_kernel()
if self.parent.notebook_folder:
self.parent.runner.kc.execute(
"""import os
os.chdir("%s")""" % self.parent.notebook_folder)
if ("SKIPCI" in self.cell_description) and ("CI" in os.environ):
pass
else:
if self.parent.fixture_cell:
self.parent.runner.kc.execute(self.parent.fixture_cell.input, allow_stdin=False)
msg_id = self.parent.runner.kc.execute(self.cell.input, allow_stdin=False)
if self.cell_description.lower().startswith("fixture") or self.cell_description.lower().startswith("setup"):
self.parent.fixture_cell = self.cell
timeout = 20
while True:
try:
msg = self.parent.runner.kc.get_shell_msg(block=True, timeout=timeout)
if msg.get("parent_header", None) and msg["parent_header"].get("msg_id", None) == msg_id:
break
except Empty:
raise IPyNbException(self.cell_num, self.cell_description,
self.cell.input,
"Timeout of %d seconds exceeded executing cell: %s" % (timeout, self.cell.input))
reply = msg['content']
if reply['status'] == 'error':
raise IPyNbException(self.cell_num, self.cell_description, self.cell.input, '\n'.join(reply['traceback']))
def repr_failure(self, excinfo):
""" called when self.runtest() raises an exception. """
if isinstance(excinfo.value, IPyNbException):
return "\n".join([
"Notebook execution failed",
"Cell %d: %s\n\n"
"Input:\n%s\n\n"
"Traceback:\n%s\n" % excinfo.value.args,
])
else:
return "pytest plugin exception: %s" % str(excinfo.value)
def _makeid(self):
description = self.parent.nodeid + "::" + self.name
description += "::" + "cell %d" % self.cell_num
if self.cell_description:
description += ", " + self.cell_description
return description
| 32.845638
| 122
| 0.599918
| 556
| 4,894
| 5.160072
| 0.307554
| 0.055769
| 0.05298
| 0.025096
| 0.177762
| 0.076333
| 0.056117
| 0.043221
| 0.043221
| 0.043221
| 0
| 0.003452
| 0.289743
| 4,894
| 148
| 123
| 33.067568
| 0.821922
| 0.079281
| 0
| 0.166667
| 0
| 0
| 0.065792
| 0
| 0.009259
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0.037037
| 0.12037
| 0
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdb4d928fe81a97440ce0c56dea2317a5512f228
| 2,258
|
py
|
Python
|
setup.py
|
vbrinnel/ztflc
|
b1ccab67e5e0e385d8406f179c1ad0c346afa129
|
[
"Apache-2.0"
] | 1
|
2020-04-07T14:36:49.000Z
|
2020-04-07T14:36:49.000Z
|
setup.py
|
vbrinnel/ztflc
|
b1ccab67e5e0e385d8406f179c1ad0c346afa129
|
[
"Apache-2.0"
] | 3
|
2020-01-16T18:25:46.000Z
|
2021-05-19T20:51:52.000Z
|
setup.py
|
vbrinnel/ztflc
|
b1ccab67e5e0e385d8406f179c1ad0c346afa129
|
[
"Apache-2.0"
] | 1
|
2021-03-31T19:47:33.000Z
|
2021-03-31T19:47:33.000Z
|
#! /usr/bin/env python
#
DESCRIPTION = "ztflc: Force photometry lc fitter"
LONG_DESCRIPTION = """ Force photometry lc fitter"""
DISTNAME = "ztflc"
AUTHOR = "Mickael Rigault"
MAINTAINER = "Mickael Rigault"
MAINTAINER_EMAIL = "m.rigault@ipnl.in2p3.fr"
URL = "https://github.com/MickaelRigault/ztflc/"
LICENSE = "BSD (3-clause)"
DOWNLOAD_URL = "https://github.com/MickaelRigault/ztflc/tarball/0.2"
VERSION = "0.2.3"
try:
from setuptools import setup, find_packages
_has_setuptools = True
except ImportError:
from distutils.core import setup
_has_setuptools = False
def check_dependencies():
install_requires = []
# Just make sure dependencies exist, I haven't rigorously
# tested what the minimal versions that will work are
# (help on that would be awesome)
try:
import ztfquery
except ImportError:
install_requires.append("ztfquery")
try:
import pandas
except ImportError:
install_requires.append("pandas")
return install_requires
if __name__ == "__main__":
install_requires = check_dependencies()
if _has_setuptools:
packages = find_packages()
print(packages)
else:
# This should be updated if new submodules are added
packages = ["ztflc"]
setup(
name=DISTNAME,
author=AUTHOR,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
scripts=["bin/forcephoto.py"],
packages=packages,
include_package_data=True,
# package_data={'pysedm': ['data/*.*']},
classifiers=[
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering :: Astronomy",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
],
)
| 26.880952
| 68
| 0.637733
| 234
| 2,258
| 5.987179
| 0.504274
| 0.074946
| 0.024268
| 0.032834
| 0.105639
| 0.051392
| 0
| 0
| 0
| 0
| 0
| 0.007186
| 0.260407
| 2,258
| 83
| 69
| 27.204819
| 0.831737
| 0.11116
| 0
| 0.098361
| 0
| 0
| 0.2695
| 0.0225
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016393
| false
| 0
| 0.114754
| 0
| 0.147541
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdb6e8d6090040ad0dc31239d89e99153192bd44
| 1,927
|
py
|
Python
|
wordfinds/raw.py
|
GrandMoff100/WordFinds
|
4b56532f399178e5f2b18b246084644061c5bfc2
|
[
"MIT"
] | 2
|
2021-05-22T19:19:56.000Z
|
2021-08-16T11:34:11.000Z
|
wordfinds/raw.py
|
GrandMoff100/WordFinds
|
4b56532f399178e5f2b18b246084644061c5bfc2
|
[
"MIT"
] | null | null | null |
wordfinds/raw.py
|
GrandMoff100/WordFinds
|
4b56532f399178e5f2b18b246084644061c5bfc2
|
[
"MIT"
] | 1
|
2021-11-09T13:55:43.000Z
|
2021-11-09T13:55:43.000Z
|
import random
from .utils import filler
from .array import RawWordFindArray, WordArray
class RawWordFind(RawWordFindArray):
def __init__(self, size, wordbank):
super().__init__(size, wordbank)
for word in wordbank.words:
if not self.valid_word_length(word):
raise ValueError(
'The word "{}" cannot fit into a {}x{} array.' .format(word, *self.size) +
'Try using less words or shorter ones.')
total = sum([len(word) for word in wordbank.words])
w,h = size
if total > w * h:
raise ValueError(f'Cannot fit {total} characters in a {w}x{h} array. Try using less words or shorter ones.')
self.letter_array = self.generate()
def directions(self, x, y, word):
return [
(x-len(word), y-len(word)),
(x-len(word), y),
(x-len(word),y+len(word)),
(x, y-len(word)),
(x, y),
(x,y+len(word)),
(x+len(word), y-len(word)),
(x+len(word), y),
(x+len(word),y+len(word)),
]
def find_spots(self, grid, word):
w, h = self.size
for x in range(w):
for y in range(h):
for end in self.directions(x,y,word):
try:
grid.place_word(word,(x,y),end,True)
yield (x,y), end
except (ValueError, IndexError):
pass
def generate(self):
w, h = self.size
grid = WordArray([['.' for _ in range(w)] for _ in range(h)])
for word in self.wordbank.words:
start, end = random.choice(list(self.find_spots(grid, word)))
grid.place_word(word, start, end)
return WordArray([[x if x != '.' else filler() for x in row] for row in grid])
class WordFind(RawWordFind):
pass
| 32.661017
| 120
| 0.511676
| 249
| 1,927
| 3.891566
| 0.273092
| 0.093911
| 0.049536
| 0.055728
| 0.21259
| 0.155831
| 0.155831
| 0.084623
| 0.084623
| 0.084623
| 0
| 0
| 0.359107
| 1,927
| 58
| 121
| 33.224138
| 0.784615
| 0
| 0
| 0.085106
| 0
| 0.021277
| 0.088266
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0.042553
| 0.06383
| 0.021277
| 0.234043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdb7047c417fa314c5e02129e1672265cc3318ba
| 2,969
|
py
|
Python
|
src/neon/frontend/aeon_shim.py
|
MUTTERSCHIFF/ngraph-neon
|
762e8ea639cdc671311ee4929bd1ee8cdf83e8bb
|
[
"Apache-2.0"
] | 13
|
2018-03-17T00:27:18.000Z
|
2020-06-18T01:36:34.000Z
|
src/neon/frontend/aeon_shim.py
|
MUTTERSCHIFF/ngraph-neon
|
762e8ea639cdc671311ee4929bd1ee8cdf83e8bb
|
[
"Apache-2.0"
] | 20
|
2018-03-17T14:49:04.000Z
|
2018-04-19T17:47:38.000Z
|
src/neon/frontend/aeon_shim.py
|
NervanaSystems/ngraph-neon
|
8988ab90ee81c8b219ea5c374702e56d7f383302
|
[
"Apache-2.0"
] | 5
|
2018-03-23T22:47:17.000Z
|
2020-10-21T16:15:02.000Z
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import print_function, absolute_import
import logging
from builtins import object
import neon as ng
logger = logging.getLogger(__name__)
try:
from aeon import DataLoader
except ImportError:
msg = "\n".join(["",
"Unable to import Aeon module.",
"Please see installation instructions at:",
"*****************",
"https://github.com/NervanaSystems/aeon/blob/rc1-master/README.md",
"*****************",
""])
logger.error(msg)
raise ImportError(msg)
NAME_MAP = {"channels": "C",
"height": "H",
"width": "W",
"frames": "D"}
"""Converts aeon axis names to canonical ngraph axis types."""
class AeonDataLoader(object):
def __init__(self, config, *args, **kwargs):
self.config = config
self._dataloader = DataLoader(config)
self.ndata = self._dataloader.ndata
if self.ndata < self._dataloader.batch_size:
raise ValueError('Number of examples is smaller than the batch size')
def __next__(self):
bufs = next(self._dataloader)
bufs_dict = dict((key, val) for key, val in bufs)
if 'label' in bufs_dict:
bufs_dict['label'] = bufs_dict['label'].flatten()
return bufs_dict
def __iter__(self):
return self
def make_placeholders(self, include_iteration=False):
placeholders = {}
batch_axis = ng.make_axis(self._dataloader.batch_size, name="N")
for placeholder_name, axis_info in self._dataloader.axes_info:
p_axes = ng.make_axes([batch_axis])
for nm, sz in axis_info:
if placeholder_name == 'label':
continue
if nm in NAME_MAP:
nm = NAME_MAP[nm]
p_axes += ng.make_axis(name=nm, length=sz)
placeholders[placeholder_name] = ng.placeholder(p_axes)
if include_iteration:
placeholders['iteration'] = ng.placeholder(axes=())
return placeholders
def reset(self):
self._dataloader.reset()
def ndata(self):
self._dataloader.ndata
| 35.345238
| 88
| 0.583025
| 337
| 2,969
| 4.967359
| 0.462908
| 0.066906
| 0.015532
| 0.019116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005942
| 0.263052
| 2,969
| 83
| 89
| 35.771084
| 0.759141
| 0.241495
| 0
| 0.036364
| 0
| 0
| 0.127415
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109091
| false
| 0
| 0.145455
| 0.018182
| 0.327273
| 0.018182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdb91795db8c176b9e6d1d2b0ffc0bc2b063adbd
| 857
|
py
|
Python
|
Lessons/Chapter9Exercise1.py
|
Luderio/Scientific-Computing-with-Python
|
c7eebcc3b46b68b3d5c08ad25fb802ae9ff42f7f
|
[
"MIT"
] | null | null | null |
Lessons/Chapter9Exercise1.py
|
Luderio/Scientific-Computing-with-Python
|
c7eebcc3b46b68b3d5c08ad25fb802ae9ff42f7f
|
[
"MIT"
] | null | null | null |
Lessons/Chapter9Exercise1.py
|
Luderio/Scientific-Computing-with-Python
|
c7eebcc3b46b68b3d5c08ad25fb802ae9ff42f7f
|
[
"MIT"
] | null | null | null |
wordCounter = dict()
while True :
inputFile = input('Enter a file: ')
try :
fileName = open(inputFile)
except :
fileName = 'invalid'
if fileName == 'invalid' :
if inputFile == 'done' :
break
else :
print('Invalid Input')
continue
for lines in fileName :
lines = lines.rstrip()
words = lines.split()
for wordItems in words :
wordCounter[wordItems] = wordCounter.get(wordItems, 0) + 1
largestWordCount = None
largestWord = None
for word,count in wordCounter.items() :
if largestWordCount is None or count > largestWordCount :
largestWord = word
largestWordCount = count
print('Largest Word:', largestWord, 'Count:', largestWordCount)
print(wordCounter)
continue
| 25.969697
| 70
| 0.574096
| 79
| 857
| 6.227848
| 0.493671
| 0.060976
| 0.069106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003527
| 0.33839
| 857
| 32
| 71
| 26.78125
| 0.864198
| 0
| 0
| 0.074074
| 0
| 0
| 0.074766
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdb9f5699b06eaa0f164fb54a701bb1fdb951c1f
| 3,321
|
py
|
Python
|
src/Featurizers/DateTimeFeaturizerData/Tools/JsonGenerator.py
|
Bhaskers-Blu-Org2/FeaturizersLibrary
|
229ae38ea233bfb02a6ff92ec3a67c1751c58005
|
[
"MIT"
] | 15
|
2019-12-14T07:54:18.000Z
|
2021-03-14T14:53:28.000Z
|
src/Featurizers/DateTimeFeaturizerData/Tools/JsonGenerator.py
|
Lisiczka27/FeaturizersLibrary
|
dc7b42abd39589af0668c896666affb4abe8a622
|
[
"MIT"
] | 30
|
2019-12-03T20:58:56.000Z
|
2020-04-21T23:34:39.000Z
|
src/Featurizers/DateTimeFeaturizerData/Tools/JsonGenerator.py
|
Lisiczka27/FeaturizersLibrary
|
dc7b42abd39589af0668c896666affb4abe8a622
|
[
"MIT"
] | 13
|
2020-01-23T00:18:47.000Z
|
2021-10-04T17:46:45.000Z
|
# ----------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License
# ----------------------------------------------------------------------
"""Generates JSON files based on data previously pickled"""
import lzma
import os
import pickle
import sys
import json
# Note that this isn't used directly, but is required by the picked python content
import pandas as pd
import CommonEnvironment
from CommonEnvironment import CommandLine
from CommonEnvironment import FileSystem
from CommonEnvironment.StreamDecorator import StreamDecorator
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
@CommandLine.EntryPoint(
)
@CommandLine.Constraints(
zipped_input_filename=CommandLine.FilenameTypeInfo(),
output_stream=None,
)
def EntryPoint(
zipped_input_filename,
output_stream=sys.stdout,
):
"""Generates JSON files based on data previously pickled"""
with StreamDecorator(output_stream).DoneManager(
line_prefix="",
prefix="\nResults: ",
suffix="\n",
) as dm:
output_dir = os.path.join(_script_dir, "..", "GeneratedCode")
FileSystem.RemoveTree(output_dir)
FileSystem.MakeDirs(output_dir)
df = _holiday_data_loader(zipped_input_filename)
#with open('holidays.json', 'w') as f:
#f.write(df.to_json(orient='records', lines=True))
allCountryNames = list(set((df['countryOrRegion'])))
for countryName in allCountryNames:
dfByCountry = df.loc[df['countryOrRegion'] == countryName]
date = [int(x.timestamp()) for x in list(dfByCountry['date'])]
name = list(dfByCountry['normalizeHolidayName'])
date_dict = {"Date" : date}
name_dict = {"Holiday" : name}
out = {}
out.update(date_dict)
out.update(name_dict)
jsonPath = os.path.join(output_dir, "{}.json".format(countryName))
with open(jsonPath, 'w') as f:
json.dump(out, f)
return dm.result
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def _holiday_data_loader(_path):
"""Load holiday data as a static initializer."""
with lzma.open(_path, "rb") as fr:
df = pickle.loads(fr.read())
df['countryRegionCode'] = df['countryRegionCode'] \
.apply(lambda x: x if type(x) == str else None)
df['isPaidTimeOff'] = df['isPaidTimeOff'] \
.apply(lambda x: x if type(x) == bool else None)
return df
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
if __name__ == "__main__":
try:
sys.exit(CommandLine.Main())
except KeyboardInterrupt:
pass
| 34.957895
| 82
| 0.504065
| 291
| 3,321
| 5.597938
| 0.463918
| 0.022099
| 0.034991
| 0.028238
| 0.081031
| 0.081031
| 0.081031
| 0.056476
| 0
| 0
| 0
| 0
| 0.197531
| 3,321
| 94
| 83
| 35.329787
| 0.611257
| 0.336344
| 0
| 0
| 0
| 0
| 0.078657
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0.017241
| 0.172414
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdba82790169d516d43e4d1c83b7c0a26c10e1fe
| 7,152
|
py
|
Python
|
fer.py
|
MahmoudSabra1/Emotion-recognition-song-recommendation
|
5cad8413b6c98cee12798334009fe8942a420527
|
[
"MIT"
] | 11
|
2020-11-11T14:52:05.000Z
|
2022-03-11T11:37:42.000Z
|
fer.py
|
MahmoudSabra1/Emotion-recognition-song-recommendation
|
5cad8413b6c98cee12798334009fe8942a420527
|
[
"MIT"
] | 1
|
2021-06-21T06:42:59.000Z
|
2021-06-21T06:42:59.000Z
|
fer.py
|
MahmoudSabra1/Emotion-recognition-song-recommendation
|
5cad8413b6c98cee12798334009fe8942a420527
|
[
"MIT"
] | 7
|
2021-01-26T03:40:12.000Z
|
2021-12-20T12:24:34.000Z
|
# Two lines that remove tensorflow GPU logs
# import os
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.optimizers import Adam
from keras.models import Sequential, model_from_json
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout, BatchNormalization, Activation
from keras.preprocessing.image import ImageDataGenerator
from sklearn import model_selection
from math import ceil
# Loads csv files and appends pixels to X and labels to y
def preprocess_data():
data = pd.read_csv('fer2013.csv')
labels = pd.read_csv('fer2013new.csv')
orig_class_names = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear', 'contempt',
'unknown', 'NF']
n_samples = len(data)
w = 48
h = 48
y = np.array(labels[orig_class_names])
X = np.zeros((n_samples, w, h, 1))
for i in range(n_samples):
X[i] = np.fromstring(data['pixels'][i], dtype=int, sep=' ').reshape((h, w, 1))
return X, y
def clean_data_and_normalize(X, y):
orig_class_names = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear', 'contempt',
'unknown', 'NF']
# Using mask to remove unknown or NF images
y_mask = y.argmax(axis=-1)
mask = y_mask < orig_class_names.index('unknown')
X = X[mask]
y = y[mask]
# Convert to probabilities between 0 and 1
y = y[:, :-2] * 0.1
# Add contempt to neutral and remove it
y[:, 0] += y[:, 7]
y = y[:, :7]
# Normalize image vectors
X = X / 255.0
return X, y
def split_data(X, y):
test_size = ceil(len(X) * 0.1)
# Split Data
x_train, x_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=test_size, random_state=42)
x_train, x_val, y_train, y_val = model_selection.train_test_split(x_train, y_train, test_size=test_size,
random_state=42)
return x_train, y_train, x_val, y_val, x_test, y_test
def data_augmentation(x_train):
shift = 0.1
datagen = ImageDataGenerator(
rotation_range=20,
horizontal_flip=True,
height_shift_range=shift,
width_shift_range=shift)
datagen.fit(x_train)
return datagen
def show_augmented_images(datagen, x_train, y_train):
it = datagen.flow(x_train, y_train, batch_size=1)
plt.figure(figsize=(10, 7))
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(it.next()[0][0], cmap='gray')
# plt.xlabel(class_names[y_train[i]])
plt.show()
def define_model(input_shape=(48, 48, 1), classes=7):
num_features = 64
model = Sequential()
# 1st stage
model.add(Conv2D(num_features, kernel_size=(3, 3), input_shape=input_shape))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(Conv2D(num_features, kernel_size=(3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(Dropout(0.5))
# 2nd stage
model.add(Conv2D(num_features, (3, 3), activation='relu'))
model.add(Conv2D(num_features, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# 3rd stage
model.add(Conv2D(2 * num_features, kernel_size=(3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(Conv2D(2 * num_features, kernel_size=(3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
# 4th stage
model.add(Conv2D(2 * num_features, (3, 3), activation='relu'))
model.add(Conv2D(2 * num_features, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# 5th stage
model.add(Conv2D(4 * num_features, kernel_size=(3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(Conv2D(4 * num_features, kernel_size=(3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(Flatten())
# Fully connected neural networks
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(classes, activation='softmax'))
return model
def plot_acc_loss(history):
# Plot accuracy graph
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('accuracy')
plt.ylim([0, 1.0])
plt.legend(loc='upper left')
plt.show()
# Plot loss graph
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
# plt.ylim([0, 3.5])
plt.legend(loc='upper right')
plt.show()
def save_model_and_weights(model, test_acc):
# Serialize and save model to JSON
test_acc = int(test_acc * 10000)
model_json = model.to_json()
with open('Saved-Models\\model' + str(test_acc) + '.json', 'w') as json_file:
json_file.write(model_json)
# Serialize and save weights to JSON
model.save_weights('Saved-Models\\model' + str(test_acc) + '.h5')
print('Model and weights are saved in separate files.')
def load_model_and_weights(model_path, weights_path):
# Loading JSON model
json_file = open(model_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# Loading weights
model.load_weights(weights_path)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
print('Model and weights are loaded and compiled.')
def run_model():
fer_classes = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear']
X, y = preprocess_data()
X, y = clean_data_and_normalize(X, y)
x_train, y_train, x_val, y_val, x_test, y_test = split_data(X, y)
datagen = data_augmentation(x_train)
epochs = 100
batch_size = 64
print("X_train shape: " + str(x_train.shape))
print("Y_train shape: " + str(y_train.shape))
print("X_test shape: " + str(x_test.shape))
print("Y_test shape: " + str(y_test.shape))
print("X_val shape: " + str(x_val.shape))
print("Y_val shape: " + str(y_val.shape))
# Training model from scratch
model = define_model(input_shape=x_train[0].shape, classes=len(fer_classes))
model.summary()
model.compile(optimizer=Adam(lr=0.0001), loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(datagen.flow(x_train, y_train, batch_size=batch_size), epochs=epochs,
steps_per_epoch=len(x_train) // batch_size,
validation_data=(x_val, y_val), verbose=2)
test_loss, test_acc = model.evaluate(x_test, y_test, batch_size=batch_size)
plot_acc_loss(history)
save_model_and_weights(model, test_acc)
run_model()
| 32.216216
| 115
| 0.65646
| 1,028
| 7,152
| 4.38035
| 0.224708
| 0.055074
| 0.046414
| 0.053742
| 0.408616
| 0.349545
| 0.310904
| 0.271597
| 0.257384
| 0.249167
| 0
| 0.025919
| 0.201622
| 7,152
| 221
| 116
| 32.361991
| 0.762697
| 0.085011
| 0
| 0.251701
| 0
| 0
| 0.100936
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068027
| false
| 0
| 0.061224
| 0
| 0.163265
| 0.054422
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdbd2bded66eee36ec46ada4de75a010512f317b
| 2,962
|
py
|
Python
|
app/requests.py
|
gabrielcoder247/News-Highlight-v2
|
595f4ee9739b173142d1012bdda63526818930e4
|
[
"Unlicense"
] | null | null | null |
app/requests.py
|
gabrielcoder247/News-Highlight-v2
|
595f4ee9739b173142d1012bdda63526818930e4
|
[
"Unlicense"
] | null | null | null |
app/requests.py
|
gabrielcoder247/News-Highlight-v2
|
595f4ee9739b173142d1012bdda63526818930e4
|
[
"Unlicense"
] | null | null | null |
import urllib.request,json
from .models import Source,Article
from . import main
# Getting Api Key
api_Key = None
#Getting the base urls
sources_base_url = None
articles_base_url = None
def configure_request(app):
'''
Function to acquire the api key and base urls
'''
global api_Key,sources_base_url,articles_base_url
api_Key = app.config['NEWS_API_KEY']
sources_base_url = app.config['NEWS_SOURCES_BASE_URL']
articles_base_url = app.config['NEWS_ARTICLES_BASE_URL']
def get_sources(category):
'''
Function that gets the json response to our url request
'''
get_sources_url = sources_base_url.format(category)
with urllib.request.urlopen(get_sources_url,data=None) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
sources_results = None
if get_sources_response['sources']:
sources_results_list = get_sources_response['sources']
sources_results = process_sources(sources_results_list)
# print(sources_results)
return sources_results
def process_sources(sources_results):
'''
Function that processes the sources result and transform them to a list of Objects
Args:
sources_results: A list of dictionaries that contain sources details
Returns :
sources_list: A list of sources objects
'''
sources_list = []
for source_item in sources_results:
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
url = source_item.get('url')
category = source_item.get('category')
source_object = Source(id,name,description,url,category)
sources_list.append(source_object)
return sources_list
def get_articles(source):
'''
Function that gets the json response to our url request
'''
get_articles_url = articles_base_url.format(source,api_Key)
with urllib.request.urlopen(get_articles_url,data=None) as url:
get_articles_data = url.read()
get_articles_response = json.loads(get_articles_data)
articles_results = None
if get_articles_response['articles']:
articles_results_list = get_articles_response['articles']
articles_results = process_articles(articles_results_list)
return articles_results
def process_articles(articles_results):
'''
Function that processes the articles result and transform them to a list of Objects
Args:
articles_results: A list of dictionaries that contain articles details
Returns :
articles_list: A list of articles objects
'''
articles_list = []
for article_item in articles_results:
name = article_item.get('name')
author = article_item.get('author')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
urlToImage = article_item.get('urlToImage')
publishedAt = article_item.get('publishedAt')
if publishedAt and author and urlToImage:
article_object = Article(name,author,title,description,url,urlToImage,publishedAt)
articles_list.append(article_object)
return articles_list
| 30.854167
| 85
| 0.778528
| 416
| 2,962
| 5.278846
| 0.173077
| 0.038251
| 0.044627
| 0.02459
| 0.310565
| 0.234062
| 0.116576
| 0.082878
| 0.082878
| 0.082878
| 0
| 0
| 0.135044
| 2,962
| 96
| 86
| 30.854167
| 0.857143
| 0.219109
| 0
| 0
| 0
| 0
| 0.072188
| 0.019043
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089286
| false
| 0
| 0.053571
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdc3ceae4eb0b0fc7a29f9482fb7047dcfef58b4
| 727
|
py
|
Python
|
main.py
|
csmyth93/solo_scoring
|
6c1a32a3430058aa7d51be604dcc02d11ce85edd
|
[
"MIT"
] | null | null | null |
main.py
|
csmyth93/solo_scoring
|
6c1a32a3430058aa7d51be604dcc02d11ce85edd
|
[
"MIT"
] | null | null | null |
main.py
|
csmyth93/solo_scoring
|
6c1a32a3430058aa7d51be604dcc02d11ce85edd
|
[
"MIT"
] | null | null | null |
def get_names():
names = []
while True:
name = input("Enter players name: ")
if name != 'done':
print(f'{name} added to the list of players')
names.append(name)
continue
else:
break
return names
def get_player_scores(players):
for player in players:
scores = []
while True:
score = input(f"What are {player}'s final cards? ")
if score != 'end':
scores.append(score)
continue
else:
break
return scores
if __name__ == '__main__':
players = get_names()
print(players)
scores = get_player_scores(players)
print(scores)
| 22.71875
| 63
| 0.515818
| 78
| 727
| 4.628205
| 0.448718
| 0.033241
| 0.094183
| 0.127424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.389271
| 727
| 31
| 64
| 23.451613
| 0.813063
| 0
| 0
| 0.296296
| 0
| 0
| 0.141678
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0
| 0
| 0.148148
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdc442d6b9ce4b9876165256e71bc1dbffd0f620
| 760
|
py
|
Python
|
python/twisted/web_echo.py
|
y2ghost/work
|
b7f5b02db9dc0df6157bc799ddb4a1ac9d574cf3
|
[
"MIT"
] | null | null | null |
python/twisted/web_echo.py
|
y2ghost/work
|
b7f5b02db9dc0df6157bc799ddb4a1ac9d574cf3
|
[
"MIT"
] | null | null | null |
python/twisted/web_echo.py
|
y2ghost/work
|
b7f5b02db9dc0df6157bc799ddb4a1ac9d574cf3
|
[
"MIT"
] | null | null | null |
from twisted.protocols import basic
from twisted.internet import protocol, reactor
class HTTPEchoProtocol(basic.LineReceiver):
def __init__(self):
self.lines = []
def lineReceived(self, line):
self.lines.append(line.decode())
if not line:
self.sendResponse()
def sendResponse(self):
self.sendLine(b"HTTP/1.1 200 OK")
self.sendLine(b"")
responseBody = "You said:\r\n\r\n" + "\r\n".join(self.lines)
data = responseBody.encode()
self.transport.write(data)
self.transport.loseConnection()
class HTTPEchoFactory(protocol.ServerFactory):
def buildProtocol(self, addr):
return HTTPEchoProtocol()
reactor.listenTCP(8000, HTTPEchoFactory())
reactor.run()
| 27.142857
| 68
| 0.661842
| 86
| 760
| 5.802326
| 0.546512
| 0.054108
| 0.052104
| 0.016032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015126
| 0.217105
| 760
| 27
| 69
| 28.148148
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0.047431
| 0
| 0.047619
| 0
| 0
| 0
| 0
| 1
| 0.190476
| false
| 0
| 0.095238
| 0.047619
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdc5fa09b3e8bd5d035d3ebb8b21feb4b7b64279
| 2,183
|
py
|
Python
|
core/thirdparty/load_openpose.py
|
jshuhnow/OddEyeCam
|
ed76cd1c29701b7b49f20bcd61e7e72d3140fda8
|
[
"MIT"
] | 8
|
2020-10-08T13:32:33.000Z
|
2021-12-08T10:59:03.000Z
|
core/thirdparty/load_openpose.py
|
jshuhnow/OddEyeCam
|
ed76cd1c29701b7b49f20bcd61e7e72d3140fda8
|
[
"MIT"
] | null | null | null |
core/thirdparty/load_openpose.py
|
jshuhnow/OddEyeCam
|
ed76cd1c29701b7b49f20bcd61e7e72d3140fda8
|
[
"MIT"
] | 1
|
2021-04-15T23:50:13.000Z
|
2021-04-15T23:50:13.000Z
|
import os
import sys
this_dir = os.path.dirname(__file__)
import numpy as np
openpose_path = os.path.join(this_dir, 'openpose')
op_release_path = os.path.join(openpose_path, 'Release')
model_path = os.path.join(openpose_path, 'models')
print(op_release_path)
sys.path.append(op_release_path);
os.environ['PATH'] = os.environ['PATH'] + ';' + openpose_path + '/x64/Release;' + openpose_path + '/bin;'
import pyopenpose as op
opWrapper = op.WrapperPython()
params = dict()
params["model_folder"] = model_path
params["number_people_max"] = 1
params["net_resolution"]="-1x160"
params["body"] = 1
params["output_resolution"] = "-1x-1"
params["disable_multi_thread"] = True
opWrapper.configure(params)
opWrapper.start()
class PoseEstimator():
def __init__(self):
self.RShColor = (0, 140, 255)
self.LShColor = (0, 255, 215)
self.NeckColor = (0, 0, 215)
self.NoseColor = (215, 0, 215)
def _keypoint_to_index(self,keypoints):
v = keypoints[:,1]
u = keypoints[:,0]
idx = np.array([v,u]).astype(np.int).transpose()
return idx
def find_body_on_2D(self, src_img, verts):
datum = op.Datum()
datum.cvInputData = src_img
opWrapper.emplaceAndPop([datum])
self.op_img = datum.cvOutputData
#print(datum.poseKeypoints)
# Check validity
if not str(datum.poseKeypoints.shape) == '(1, 25, 3)':
return np.zeros((25, 3)).astype(np.int)
data = datum.poseKeypoints
# self.RShoulder2D = np.array([data[0,2,0], data[0,2,1]])
# self.LShoulder2D = np.array([data[0,5,0], data[0,5,1]])
# self.Neck2D = np.array([data[0,1,0], data[0,1,1]])
# keypoint = np.array([self.RShoulder2D, self.LShoulder2D, self.Neck2D]).astype(np.int)
# return keypoint
keypoints = data[0]
# switch (u,v) -> (v,u)
idx = self._keypoint_to_index(keypoints)
return idx
def just_find_body_on_2D(self, src_img):
datum = op.Datum()
datum.cvInputData = src_img
opWrapper.emplaceAndPop([datum])
self.op_img = datum.cvOutputData
return datum.cvOutputData, datum.poseKeypoints
| 35.209677
| 107
| 0.639487
| 295
| 2,183
| 4.566102
| 0.328814
| 0.025984
| 0.022272
| 0.03118
| 0.200445
| 0.200445
| 0.161841
| 0.129176
| 0.129176
| 0.129176
| 0
| 0.041812
| 0.211177
| 2,183
| 62
| 108
| 35.209677
| 0.740418
| 0.150252
| 0
| 0.204082
| 0
| 0
| 0.082792
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0
| 0.081633
| 0
| 0.265306
| 0.020408
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdc72216af29eaceb6c114484063fc2831f99596
| 420
|
py
|
Python
|
ABC127C/resolve.py
|
staguchi0703/problems_easy
|
82804b99b3ce8104762c3f6f5cc60b009a17bdc8
|
[
"MIT"
] | null | null | null |
ABC127C/resolve.py
|
staguchi0703/problems_easy
|
82804b99b3ce8104762c3f6f5cc60b009a17bdc8
|
[
"MIT"
] | null | null | null |
ABC127C/resolve.py
|
staguchi0703/problems_easy
|
82804b99b3ce8104762c3f6f5cc60b009a17bdc8
|
[
"MIT"
] | null | null | null |
def resolve():
'''
code here
'''
N , M = [int(item) for item in input().split()]
LRs = [[int(item) for item in input().split()] for _ in range(M)]
L_max = 0
R_min = N
for L, R in LRs:
L_max = max(L_max, L)
R_min = min(R_min, R)
delta = R_min - L_max
if delta >= 0:
print(delta + 1)
else:
print(0)
if __name__ == "__main__":
resolve()
| 16.8
| 69
| 0.490476
| 66
| 420
| 2.863636
| 0.393939
| 0.084656
| 0.10582
| 0.148148
| 0.275132
| 0.275132
| 0.275132
| 0
| 0
| 0
| 0
| 0.014815
| 0.357143
| 420
| 24
| 70
| 17.5
| 0.685185
| 0.021429
| 0
| 0
| 0
| 0
| 0.020253
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0
| 0
| 0.066667
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdc9ffbc19062cc077e25fb215d33c0447db75e0
| 7,109
|
py
|
Python
|
om10/plotting.py
|
drphilmarshall/OM10
|
009c16f0ef4e1c5f8f78c78df3c7711b7be24938
|
[
"MIT"
] | 5
|
2017-02-17T19:43:54.000Z
|
2021-05-19T09:30:53.000Z
|
om10/plotting.py
|
drphilmarshall/OM10
|
009c16f0ef4e1c5f8f78c78df3c7711b7be24938
|
[
"MIT"
] | 55
|
2015-02-06T19:25:58.000Z
|
2021-03-09T07:57:04.000Z
|
om10/plotting.py
|
drphilmarshall/OM10
|
009c16f0ef4e1c5f8f78c78df3c7711b7be24938
|
[
"MIT"
] | 16
|
2015-01-29T23:55:45.000Z
|
2021-04-16T03:06:38.000Z
|
# ======================================================================
# Globally useful modules, imported here and then accessible by all
# functions in this file:
from __future__ import print_function
# Fonts, latex:
import matplotlib
matplotlib.rc('font',**{'family':'serif', 'serif':['TimesNewRoman']})
matplotlib.rc('text', usetex=True)
import corner
import pylab, sys, numpy as np
# ======================================================================
def plot_sample(sample, saveImg=False, fig=None, color='black',
parameters=('MAGI','IMSEP','VELDISP','ZLENS','ZSRC')):
"""
Given an OM10 sample, make a corner plot of the required quantities.
Parameters
----------
parameters : str, tuple
Names of the lens parameters to plot
saveImg : bool
If true, save image with standardized name.
IQ : float
Image quality, for reference.
fig : matplotlib figure object
Overlay plot on an existing figure
Returns
-------
fig : matplotlib figure object
New or updated figure
"""
features, labels = extract_features(sample, parameters)
if fig is None:
fig = corner.corner(features, labels=labels, color=color, smooth=1.0)
else:
_ = corner.corner(features, labels=labels, color=color, smooth=1.0, fig=fig)
for ax in fig.axes:
for item in ([ax.xaxis.label, ax.yaxis.label]):
item.set_fontsize(20)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(16)
if saveImg:
pngfile = "om10_sample.png"
pylab.savefig(pngfile)
print("OM10: Sample plot saved to file:", pngfile)
return fig
# ======================================================================
def extract_features(x, names):
"""
Given an OM10 table of lenses, extract the required parameters and
provide labels for them.
Parameters
----------
x : Table
OM10 lens sample.
names : str, tuple
Names of features required.
Returns
-------
features : float, ndarray
Values of requested features, for each lens in the Table
labels : str, list
Corresponding axis labels
"""
features = np.array([])
labels = []
p = len(names)
n = len(x)
for name in names:
features = np.append(features, x[name])
labels.append(axis_labels[name])
return features.reshape(p,n).transpose(), labels
# ======================================================================
def plot_lens(lens, saveImg=False, IQ=0.7):
"""
Given an OM10 lens, compute some basic quantities
and use them to plot a cartoon visualization of the lens.
Parameters
----------
saveImg : bool
If true, save image with standardized name.
IQ : float
Image quality, for reference.
"""
# # Force matplotlib to not use any Xwindows backend:
# if saveImg:
# try: matplotlib.use('Agg')
# except: pass
# else:
# try: matplotlib.use('TkAgg')
# except: pass
# Pull out data for ease of use:
id = lens['LENSID'][0]
xi = lens['XIMG'][0]
yi = lens['YIMG'][0]
nim = lens['NIMG'][0]
mui = lens['MAG'][0]
md = lens['APMAG_I'][0]
ms = lens['MAGI_IN'][0]
xs = lens['XSRC'][0]
ys = lens['YSRC'][0]
xd = 0.0
yd = 0.0
zd = lens['ZLENS'][0]
zs = lens['ZSRC'][0]
q = 1.0 - lens['ELLIP'][0]
phi = lens['PHIE'][0]
print("OM10: Plotting image configuration of lens ID ",id)
# Compute image magnitudes:
mi = np.zeros(nim)
lfi = np.zeros(nim)
for i in range(nim):
mi[i] = ms - 2.5*np.log10(np.abs(mui[i]))
lfi[i] = 0.4*(24-mi[i])
print("OM10: lens, image magnitudes:",md,mi)
lfd = 0.4*(24-md)
# print("om10.plot_lens: lens, image log fluxes:",lfd,lfi)
# ------------------------------------------------------------------
# Compute caustics and critical curves:
# ------------------------------------------------------------------
# Start figure:
fig = pylab.figure(figsize=(8,8))
# ,aspect='equal')
# Axes limits, useful sizes:
xmax = 1.99
dm = 1.0/10
# Plot command sets its own axes. 'bp' = blue pentagons
# pylab.plot(xi, yi, 'bp')
pylab.plot(xi, yi, color='blue', \
marker='+', markersize=10, markeredgewidth=2, \
linestyle='')
pylab.plot(xs, ys, color='lightblue', \
marker='+', markersize=10, markeredgewidth=2, \
linestyle='')
pylab.plot(xd, yd, color='orange', \
marker='+', markersize=10, markeredgewidth=2, \
linestyle='')
# Ellipse to represent lens brightness:
ell = matplotlib.patches.Ellipse((xd,yd), width=2*dm*lfd, height=2*q*dm*lfd, angle=phi, alpha=0.2, fc='orange')
pylab.gca().add_patch(ell)
# Circles to represent image brightness:
for i in range(nim):
cir = pylab.Circle((xi[i],yi[i]), radius=dm*lfi[i], alpha=0.2, fc='blue')
pylab.gca().add_patch(cir)
# Circle to represent seeing:
cir = pylab.Circle((1.5,-1.5), radius=IQ/2.0, alpha=0.1, fc='grey')
pylab.gca().add_patch(cir)
text = '{:3.1f}" seeing'.format(IQ)
pylab.annotate(text, (370,5), xytext=None, fontsize=14, \
xycoords='axes points',textcoords='axes points')
# Legend giving lens, source redshift:
text1 = "$z_d$ = %5.2f" % zd
text2 = "$z_s$ = %5.2f" % zs
pylab.annotate(text1, (10,430), xytext=None, fontsize=14, \
xycoords='axes points',textcoords='axes points')
pylab.annotate(text2, (10,410), xytext=None, fontsize=14, \
xycoords='axes points',textcoords='axes points')
# Plot title:
title = "OM10 lensed QSO, ID="+str(id)
pylab.title(title,fontsize=20)
# Set axes labels:
pylab.xlabel("x / arcsec",fontsize=20)
pylab.ylabel("y / arcsec",fontsize=20)
# Set axis limits:
pylab.axis([-xmax,xmax,-xmax,xmax])
# Add a grid:
pylab.grid(color='grey', linestyle='--', linewidth=0.5)
# Plot graph to file:
if saveImg:
pngfile = "om10_qso_ID="+str(id)+".png"
pylab.savefig(pngfile)
print("OM10: Lens plot saved to file:",pngfile)
# ======================================================================
axis_labels = {}
axis_labels['ZLENS'] = '$z_{\\rm d}$'
axis_labels['VELDISP'] = '$\sigma_{\\rm d}$ / km/s'
axis_labels['ELLIP'] = '$\epsilon_{\\rm d}$'
axis_labels['PHIE'] = '$\phi_{\\rm d}$ / km/s'
axis_labels['GAMMA'] = '$\gamma$'
axis_labels['PHIG'] = '$\phi_{\gamma}$'
axis_labels['ZSRC'] = '$z_{\\rm s}$'
axis_labels['MAGI'] = '$i_3$'
axis_labels['MAGI_IN'] = '$i_{\\rm s}$'
axis_labels['IMSEP'] = '$\Delta \\theta$ / arcsec'
axis_labels['i_SDSS_lens'] = '$i_{\\rm d}$ (AB mag)'
axis_labels['i_SDSS_quasar'] = '$i_{\\rm s}$ (AB mag)'
axis_labels['ug'] = '$u-g$ color'
axis_labels['gr'] = '$g-r$ color'
axis_labels['ri'] = '$r-i$ color'
axis_labels['iz'] = '$i-z$ color'
axis_labels['ug'] = '$u-g$ color'
| 30.251064
| 115
| 0.549163
| 904
| 7,109
| 4.253319
| 0.320796
| 0.052016
| 0.011443
| 0.025748
| 0.212484
| 0.183875
| 0.138362
| 0.138362
| 0.111313
| 0.111313
| 0
| 0.024881
| 0.231115
| 7,109
| 234
| 116
| 30.380342
| 0.678558
| 0.319032
| 0
| 0.169643
| 0
| 0
| 0.177336
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026786
| false
| 0
| 0.035714
| 0
| 0.080357
| 0.044643
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdccadfab450a4e9a57ce9f5439e430bde2038d3
| 527
|
py
|
Python
|
tfutils/losses/losses.py
|
njchiang/tf-keras-utils
|
6ea5e51ef3ca5729fbc71bf3cffecf4faec033dd
|
[
"MIT"
] | null | null | null |
tfutils/losses/losses.py
|
njchiang/tf-keras-utils
|
6ea5e51ef3ca5729fbc71bf3cffecf4faec033dd
|
[
"MIT"
] | null | null | null |
tfutils/losses/losses.py
|
njchiang/tf-keras-utils
|
6ea5e51ef3ca5729fbc71bf3cffecf4faec033dd
|
[
"MIT"
] | null | null | null |
# this actually won't work with keras... not exactly a keras utility
import tensorflow as tf
def ae_loss_fn(model, x, y, training=None):
pred = model(x, training)
mse = tf.keras.losses.MSE(y, pred)
return tf.reduce_mean(mse), pred
# function is untested
def vae_loss_fn(model, x, y, training=None):
z, m, v = model.encoder(x, training)
pred = model.decoder(z)
mse = tf.reduce_sum(tf.keras.losses.MSE(y, pred))
kld = -0.5 * tf.reduce_sum(1 + v - tf.pow(m, 2) - tf.exp(v))
return mse + kld, pred
| 35.133333
| 68
| 0.660342
| 94
| 527
| 3.62766
| 0.5
| 0.052786
| 0.064516
| 0.070381
| 0.269795
| 0.269795
| 0.146628
| 0
| 0
| 0
| 0
| 0.009456
| 0.197343
| 527
| 15
| 69
| 35.133333
| 0.79669
| 0.165085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdd5f8ad7b2f42d4bfe80a22a6bf9fc481e565ca
| 2,750
|
py
|
Python
|
U-NET/utils.py
|
HarshZ26/Object-Detection
|
1d73f6aeb7452b0b26bd2713e69f340d129a5ba5
|
[
"MIT"
] | 1
|
2022-03-23T15:49:02.000Z
|
2022-03-23T15:49:02.000Z
|
U-NET/utils.py
|
HarshZ26/Object-Detection
|
1d73f6aeb7452b0b26bd2713e69f340d129a5ba5
|
[
"MIT"
] | null | null | null |
U-NET/utils.py
|
HarshZ26/Object-Detection
|
1d73f6aeb7452b0b26bd2713e69f340d129a5ba5
|
[
"MIT"
] | null | null | null |
from init import *
VOC_CLASSES = [
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"potted plant",
"sheep",
"sofa",
"train",
"tv/monitor",
]
VOC_COLORMAP = [
[0, 0, 0],
[128, 0, 0],
[0, 128, 0],
[128, 128, 0],
[0, 0, 128],
[128, 0, 128],
[0, 128, 128],
[128, 128, 128],
[64, 0, 0],
[192, 0, 0],
[64, 128, 0],
[192, 128, 0],
[64, 0, 128],
[192, 0, 128],
[64, 128, 128],
[192, 128, 128],
[0, 64, 0],
[128, 64, 0],
[0, 192, 0],
[128, 192, 0],
[0, 64, 128],
]
palette = np.array(VOC_COLORMAP)
custom_transforms = [transforms.Normalize(mean=[-0.485, -0.456,-0.406], std=[1/0.229, 1/0.224,1/0.225])]
inv_trans = torchvision.transforms.Compose(custom_transforms)
transform = A.Compose([A.Resize(512,512),
A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),ToTensorV2()
])
def calculate_weight(loader):
weight_map = torch.zeros(21)
for i,(_,mask) in enumerate(loader):
mask = mask.permute(0,3,1,2)
index,counts = torch.unique(torch.argmax(mask,axis = 1),sorted = True,return_counts=True)
for i in range(len(index)):
weight_map[index[i]] = counts[i]
weight_map = (mask.size(2)*mask.size(3)*len(loader))/(weight_map)
return weight_map/21
def calculate_acc(grnd,predicted):
grnd = torch.argmax(grnd,axis = 1)
predicted = torch.argmax(predicted,axis = 1)
x = torch.eq(grnd,predicted).int()
acc= torch.sum(x)/(grnd.size(1)*grnd.size(1))
return acc
def collate_fn(batch):
data = [] #filled with 64 elements thorugh for loops
target = []
for item in batch: #batch = 64 items list one item = [image,label]
im = item[0]
open_cv_image = np.array(im)
open_cv_image = open_cv_image.copy()
transformed = transform(image=open_cv_image,mask = item[1])
im = transformed['image']
mask = transformed['mask']
data.append(im)
target.append(mask)
target = torch.stack(target,dim =0)
data = torch.stack(data,dim=0)
return [data, target]
def test_img(loader):
tes_img = iter(loader)
images,masks = tes_img.next()
print("images",images.size())
print("labels",masks.size())
print(np.shape(images))
img = images[0].squeeze()
img = inv_trans(img)
img = img.numpy()
im2display = img.transpose((1,2,0))
grnd_mask = masks.numpy().transpose[0]
a1 = np.argmax(grnd_mask,axis = 2)
g_mask = palette[a1]
plt.imshow(im2display, interpolation='nearest')
plt.imshow(g_mask)
| 25.229358
| 105
| 0.577818
| 394
| 2,750
| 3.951777
| 0.34264
| 0.012845
| 0.028259
| 0.011561
| 0.094412
| 0.050096
| 0.035967
| 0.035967
| 0.035967
| 0
| 0
| 0.105036
| 0.241818
| 2,750
| 108
| 106
| 25.462963
| 0.641727
| 0.032
| 0
| 0
| 0
| 0
| 0.060737
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0
| 0.010204
| 0
| 0.081633
| 0.030612
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdd78b4b371ac658a03d1638d8afdbda0805a759
| 24,528
|
py
|
Python
|
datawinners/accountmanagement/admin.py
|
ICT4H/dcs-web
|
fb0f53fad4401cfac1c1789ff28b9d5bda40c975
|
[
"Apache-2.0"
] | 1
|
2015-11-02T09:11:12.000Z
|
2015-11-02T09:11:12.000Z
|
datawinners/accountmanagement/admin.py
|
ICT4H/dcs-web
|
fb0f53fad4401cfac1c1789ff28b9d5bda40c975
|
[
"Apache-2.0"
] | null | null | null |
datawinners/accountmanagement/admin.py
|
ICT4H/dcs-web
|
fb0f53fad4401cfac1c1789ff28b9d5bda40c975
|
[
"Apache-2.0"
] | null | null | null |
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
import datetime
import logging
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserChangeForm
from django.core.exceptions import ValidationError
from django.forms import CharField
from django.utils.translation import ugettext_lazy as _
from django.contrib import admin
from django.contrib.auth.models import User, Group
from django_digest.models import PartialDigest
from django.contrib import messages
from django.utils.safestring import mark_safe
from django.contrib.admin.views.main import ChangeList
from datawinners.common.admin.utils import get_text_search_filter, get_admin_panel_filter
from datawinners.project.submission.export import create_excel_response
from datawinners.search.index_utils import get_elasticsearch_handle
from forms import forms
from datawinners.accountmanagement.models import OrganizationSetting, SMSC, PaymentDetails, MessageTracker, Organization, NGOUserProfile, OutgoingNumberSetting
from mangrove.form_model.field import ExcelDate
from mangrove.utils.types import is_empty, is_not_empty
from datawinners.countrytotrialnumbermapping.models import Country, Network
from datawinners.utils import get_database_manager_for_org
from datawinners.feeds.database import feeds_db_for
from django.db.models import Q
admin.site.disable_action('delete_selected')
class DatawinnerAdmin(admin.ModelAdmin):
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request):
return False
class OrganizationSettingAdmin(DatawinnerAdmin):
list_display = ('organization_name', 'organization_id', 'type', 'payment_details', 'activation_date', 'admin_email')
fields = ('sms_tel_number', 'outgoing_number')
search_fields = ['organization__name','organization__org_id']
ordering = ('-organization__active_date',)
def organization_name(self, obj):
return obj.organization.name
organization_name.admin_order_field = "organization__name"
def _get_ngo_admin(self, organization_setting):
user_profiles = NGOUserProfile.objects.filter(org_id=organization_setting.organization.org_id)
admin_users = [x.user for x in user_profiles if x.user.groups.filter(name="NGO Admins")]
#right now there is only one ngo admin
return admin_users[0] if is_not_empty(admin_users) else NullAdmin()
def admin_email(self, obj):
return self._get_ngo_admin(obj).email
def organization_id(self, obj):
return obj.organization.org_id
organization_id.admin_order_field = "organization__org_id"
def payment_details(self, obj):
organization = obj.organization
payment_details = PaymentDetails.objects.filter(organization=organization)
if not is_empty(payment_details):
return payment_details[0].preferred_payment
return "--"
def type(self, obj):
return obj.organization.account_type
type.admin_order_field = 'organization__account_type'
def activation_date(self, obj):
return obj.organization.active_date if obj.organization.active_date is not None else '--'
activation_date.admin_order_field = "organization__active_date"
activation_date.short_description = "Created on"
class MessageTrackerAdmin(DatawinnerAdmin):
list_display = ("organization_name", "organization_id","type", "month", "combined_total_incoming",
"total_incoming_per_month", "total_messages", "total_outgoing_messages", "outgoing_sms_count","outgoing_sms_charged_count",
"sent_reminders_count","sent_reminders_charged_count", "send_message_count","send_message_charged_count", "sms_api_usage_count","sms_api_usage_charged_count", "sms_submission", "incoming_sp_count",
"incoming_web_count", "sms_registration_count")
search_fields = ['organization__name', 'organization__org_id', 'month']
ordering = ('-month',)
def __init__(self, *args, **kwargs):
super(MessageTrackerAdmin, self).__init__(*args, **kwargs)
self.list_display_links = (None,)
def organization_name(self, obj):
return obj.organization.name
organization_name.short_description = mark_safe('Organisation<br/>name')
def type(self,obj):
return obj.organization.account_type
def organization_id(self, obj):
return obj.organization.org_id
organization_id.short_description = mark_safe('Organisation<br/>ID')
def combined_total_incoming(self, obj):
return obj.total_incoming_in_total()
combined_total_incoming.short_description = mark_safe('Total<br/>incoming<br/>Submissions<br/>(In total)')
def total_incoming_per_month(self, obj):
return obj.total_monthly_incoming_messages()
total_incoming_per_month.short_description = mark_safe('Total<br/>Incoming<br/>Submissions<br/>')
def current_month(self, obj):
return datetime.datetime.strftime(obj.month, "%m-%Y")
current_month.short_description = "Month"
def total_outgoing_messages(self, obj):
return obj.outgoing_message_count()
total_outgoing_messages.short_description = mark_safe('Outgoing Charged SMS:<br/>Total')
def total_messages(self, obj):
return obj.total_messages()
total_messages.short_description = mark_safe('Total SMS<br/>(incoming<br/>and<br/>outgoing)')
def combined_total_messages(self, obj):
return obj.combined_total_messages()
combined_total_messages.short_description = mark_safe('Total SMS<br/>(in total)')
def sms_submission(self, obj):
return obj.incoming_sms_count - obj.sms_registration_count
sms_submission.short_description = mark_safe('SMS<br/>Submissions')
def export_message_tracker_details_to_excel(modeladmin, request, query_set):
headers = ["Organization Name", "Organization Id","Type", "Month", "Total Incoming Submissions (In total)", "Total Incoming Submissions",
"Total SMS (incoming and outgoing)", "Outgoing Charged SMS: Total", "Outgoing SMS: Auto Reply", "Outgoing Charged SMS: Auto Reply",
"Outgoing SMS: Reminders", "Outgoing Charged SMS: Reminders", "Outgoing SMS: Send Message", "Outgoing Charged SMS: Send Message",
"Outgoing SMS: API", "Outgoing Charged SMS: API", "SMS Submissions", "SP Submissions", "Web Submissions", "SMS Subject Registration"]
list = []
textSearchFilter = get_text_search_filter(request.GET,MessageTrackerAdmin.search_fields)
adminPanelFilter = get_admin_panel_filter(request.GET)
filteredSms = MessageTracker.objects.all().filter(Q(**adminPanelFilter) & (textSearchFilter))
for messageTracker in filteredSms:
sms_tracker_month = ExcelDate(datetime.datetime.combine(messageTracker.month, datetime.datetime.min.time()),
'dd.mm.yyyy') if messageTracker.month else None
list.append([modeladmin.organization_name(messageTracker), modeladmin.organization_id(messageTracker),
modeladmin.type(messageTracker),
sms_tracker_month, messageTracker.total_incoming_in_total(),
messageTracker.total_monthly_incoming_messages(),
messageTracker.total_messages(), messageTracker.outgoing_message_count(),
messageTracker.outgoing_sms_count, messageTracker.outgoing_sms_charged_count,
messageTracker.sent_reminders_count, messageTracker.sent_reminders_charged_count,
messageTracker.send_message_count,
messageTracker.send_message_charged_count, messageTracker.sms_api_usage_count,
messageTracker.sms_api_usage_charged_count,
modeladmin.sms_submission(messageTracker), messageTracker.incoming_sp_count,
messageTracker.incoming_web_count, messageTracker.sms_registration_count])
response = create_excel_response(headers, list, 'tracker_list')
return response
actions = [export_message_tracker_details_to_excel]
class OrganizationChangeList(ChangeList):
def get_query_set(self):
if not self.params.get("q", ""):
return super(OrganizationChangeList, self).get_query_set()
from django.db import connection
cursor = connection.cursor()
query = """Select array_agg(DISTINCT o.org_id) from accountmanagement_organization o
inner join accountmanagement_ngouserprofile p on p.org_id = o.org_id
inner join auth_user u on u.id = p.user_id inner join auth_user_groups ug on ug.user_id = u.id
inner join auth_group g on ug.group_id = g.id and g.name = %s """
params = ["NGO Admins"]
for index, keyword in enumerate(self.params.get("q").split()):
from django_countries.countries import COUNTRIES
codes = ["'" + code + "'" for code, name in COUNTRIES if unicode(name).lower().find(keyword.lower()) != -1 ]
country_codes = ', '.join(codes) if len(codes) else "''"
query += "and " if index else "where"
query += " (o.country in (%s) " % country_codes
query += """OR u.email ilike %s OR u.first_name||u.last_name ilike %s OR o.name ilike %s
OR p.mobile_phone ilike %s OR o.address||o.addressline2||o.city||o.zipcode||o.state ilike %s
OR o.office_phone ilike %s OR o.website ilike %s OR o.org_id ilike %s
OR to_char(o.active_date, 'YYYY-MM-DD HH:MI:SS') ilike %s) """
params.extend(["%" + keyword + "%"] * 9)
cursor.execute(query, params)
org_ids = cursor.fetchone()[0]
qs = Organization.objects.filter(org_id__in=org_ids or [])
if self.order_field:
qs = qs.order_by('%s%s' % ((self.order_type == 'desc' and '-' or ''), self.order_field))
else:
qs = qs.order_by('-active_date')
return qs
class OrganizationChangeList(ChangeList):
def get_query_set(self):
if not self.params.get("q", ""):
return super(OrganizationChangeList, self).get_query_set()
from django.db import connection
cursor = connection.cursor()
query = """Select array_agg(DISTINCT o.org_id) from accountmanagement_organization o
inner join accountmanagement_ngouserprofile p on p.org_id = o.org_id
inner join auth_user u on u.id = p.user_id inner join auth_user_groups ug on ug.user_id = u.id
inner join auth_group g on ug.group_id = g.id and g.name = %s """
params = ["NGO Admins"]
for index, keyword in enumerate(self.params.get("q").split()):
from django_countries.countries import COUNTRIES
codes = ["'" + code + "'" for code, name in COUNTRIES if unicode(name).lower().find(keyword.lower()) != -1 ]
country_codes = ', '.join(codes) if len(codes) else "''"
query += "and " if index else "where"
query += " (o.country in (%s) " % country_codes
query += """OR u.email ilike %s OR u.first_name||u.last_name ilike %s OR o.name ilike %s
OR p.mobile_phone ilike %s OR o.address||o.addressline2||o.city||o.zipcode||o.state ilike %s
OR o.office_phone ilike %s OR o.website ilike %s OR o.org_id ilike %s
OR to_char(o.active_date, 'YYYY-MM-DD HH:MI:SS') ilike %s) """
params.extend(["%" + keyword + "%"] * 9)
cursor.execute(query, params)
org_ids = cursor.fetchone()[0]
qs = Organization.objects.filter(org_id__in=org_ids or [])
if self.order_field:
qs = qs.order_by('%s%s' % ((self.order_type == 'desc' and '-' or ''), self.order_field))
else:
qs = qs.order_by('-active_date')
return qs
class OrganizationAdmin(DatawinnerAdmin):
list_display = (
'name', 'org_id', 'complete_address', 'office_phone', 'website', 'paid', 'active_date', 'admin_name',
'admin_email', 'admin_mobile_number', 'sms_api_users', 'status')
actions = ['deactivate_organizations', 'activate_organizations', 'delete_organizations']
search_fields = ['name', 'address', 'addressline2', 'city', 'zipcode', 'state', 'office_phone', 'website']
ordering = ('-active_date',)
def get_changelist(self, request, **kwargs):
return OrganizationChangeList
def get_query_set(self, request, queryset, search_term):
queryset, use_distinct = super(OrganizationAdmin, self).get_search_results(request, queryset, search_term)
if search_term:
queryset = queryset.filter(ngouserprofile__title__icontains=search_term)
return queryset, use_distinct
def deactivate_organizations(modeladmin, request, queryset):
queryset.exclude(status__in=['Deactivated','Pending Activation']).update(status='Deactivated',
status_changed_datetime=datetime.datetime.now())
messages.success(request, _('The accounts selected have been deactivated successfully.'))
selected = request.POST.getlist(admin.ACTION_CHECKBOX_NAME)
orgs_id = Organization.objects.filter(org_id__in=selected).exclude(status='Pending Activation').\
values_list('org_id', flat=True)
User.objects.filter(ngouserprofile__org_id__in=orgs_id).update(is_active=False)
deactivate_organizations.short_description = "Deactivate accounts"
def activate_organizations(modeladmin, request, queryset):
queryset.exclude(status__in=['Activated','Pending Activation']).update(status='Activated', status_changed_datetime=datetime.datetime.now())
messages.success(request, _('The accounts selected have been activated successfully.'))
selected = request.POST.getlist(admin.ACTION_CHECKBOX_NAME)
orgs_id = Organization.objects.filter(org_id__in=selected).exclude(status='Pending Activation').\
values_list('org_id', flat=True)
User.objects.filter(ngouserprofile__org_id__in=orgs_id).update(is_active=True)
activate_organizations.short_description = "Activate accounts"
def delete_organizations(modeladmin, request, queryset):
orgs = queryset.filter(status__in=['Deactivated', "Pending Activation"])
for organization in orgs:
dbm = get_database_manager_for_org(organization)
organization.purge_all_data()
del dbm.server[dbm.database_name]
feed_database_name = "feed_" + dbm.database_name
feed_dbm = feeds_db_for(feed_database_name)
del feed_dbm.server[feed_database_name]
es = get_elasticsearch_handle()
try:
es.delete_index(dbm.database_name)
except Exception as e:
logging.info("Could not delete index " + str(e.message))
delete_organizations.short_description = "Delete accounts"
class Media:
css = {"all": ("/media/css/plugins/jqueryUI/jquery-ui-1.8.13.custom.css",)}
js = ("/media/javascript/jquery.js", "/media/javascript/jqueryUI/jquery-ui-1.8.13.custom.min.js",)
def sms_api_users(self, organization):
user_profiles = NGOUserProfile.objects.filter(org_id=organization.org_id)
return " , ".join([x.user.username for x in user_profiles if x.user.groups.filter(name="SMS API Users")])
def paid(self, obj):
return "No" if obj.in_trial_mode else "Yes"
def _get_ngo_admin(self, organization):
user_profiles = NGOUserProfile.objects.filter(org_id=organization.org_id)
admin_users = [x.user for x in user_profiles if x.user.groups.filter(name="NGO Admins")]
#right now there is only one ngo admin
return admin_users[0] if is_not_empty(admin_users) else NullAdmin()
def admin_email(self, obj):
return self._get_ngo_admin(obj).email
def admin_office_phone(self, obj):
admin_user = self._get_ngo_admin(obj)
return admin_user.get_profile().office_phone
def admin_mobile_number(self, obj):
admin_user = self._get_ngo_admin(obj)
return admin_user.get_profile().mobile_phone
def admin_name(self, obj):
admin_user = self._get_ngo_admin(obj)
return admin_user.first_name
def complete_address(self, obj):
complete_address = [obj.address, obj.addressline2, obj.city, obj.zipcode, obj.state, obj.country_name()]
return ", ".join([element for element in complete_address if is_not_empty(element)])
def get_readonly_fields(self, request, obj=None):
if obj:
return self.readonly_fields + ('status',)
return self.readonly_fields
class NullAdmin:
def __init__(self):
self.email = ''
self.mobile_phone = ''
self.office_phone = ''
self.first_name = ''
def get_profile(self):
return self
class CountryAdmin(admin.ModelAdmin):
ordering = ('country_name_en',)
list_display = ('country_name_en', 'country_code')
class NetworkAdmin(admin.ModelAdmin):
ordering = ('network_name',)
list_display = ('network_name', 'trial_sms_number', 'country_name')
filter_horizontal = ['country']
def country_name(self, obj):
return ' ,'.join([country.country_name for country in obj.country.all()])
class UserAdminForm(forms.ModelForm):
class Meta:
model = User
def clean(self):
cleaned_data = self.cleaned_data
if 'email' in cleaned_data:
username = cleaned_data.get('email').strip()
if not len(username):
raise forms.ValidationError("This email address is required")
existing_users_with_username = User.objects.filter(username=username)
if existing_users_with_username.count() > 0 and existing_users_with_username[0] != self.instance:
raise forms.ValidationError(
"This email address is already in use. Please supply a different email address")
cleaned_data['email'] = username
return cleaned_data
class NgoUserAdmin(DatawinnerAdmin):
list_display = ('organization_name', 'country', 'organization_id', 'admin_name', 'admin_email')
fields = ('email', )
form = UserAdminForm
def organization_name(self, obj):
profile = obj.get_profile()
return Organization.objects.get(org_id=profile.org_id).name
def country(self, obj):
return (Organization.objects.get(org_id=obj.get_profile().org_id)).country_name()
def organization_id(self, obj):
return obj.get_profile().org_id
def admin_name(self, obj):
return obj.first_name
def admin_email(self, obj):
return obj.email
def queryset(self, request):
qs = super(NgoUserAdmin, self).queryset(request)
return qs.filter(groups=Group.objects.filter(name="NGO Admins"))
def save_model(self, request, obj, form, change):
username = form.cleaned_data['email']
obj.username = username
obj.email = username
obj.save()
class DWUserChangeForm(UserChangeForm):
organization_id = CharField(label="Organization ID")
def __init__(self, *args, **kwargs):
super(DWUserChangeForm, self).__init__(*args, **kwargs)
self.fields['organization_id'] = CharField(label="Organization ID")
if self.instance:
self.organization_id_field()
self.fields['password'].widget.attrs['readonly'] = 'readonly'
self.fields['first_name'].label = "Name"
class Meta:
model = User
def organization_id_field(self):
org_id = ''
try:
user_profile = NGOUserProfile.objects.get(user=self.instance)
org_id = user_profile.org_id
except:
pass
self.fields['organization_id'] = CharField(label="Organization ID", initial=org_id)
def clean_organization_id(self):
org_id = self.cleaned_data.get('organization_id', '')
try:
org = Organization.objects.get(org_id__iexact=org_id)
return org.org_id
except Organization.DoesNotExist:
raise ValidationError('Organization with id : %s does not exist.Please enter a valid id' % org_id)
def _remove_default_name_fields():
user_display_fields = list(UserAdmin.list_display)
user_display_fields.remove('first_name')
user_display_fields.remove('last_name')
return tuple(user_display_fields)
def export_user_list_to_excel(a,b,c):
#Custom Method to export user details.
def is_required(user):
return True if user.groups.filter(name="NGO Admins").count() or user.groups.filter(name="Project Managers").count() else False
def user_role(user):
if user.groups.filter(name='NGO Admins').count():
return 'Admin'
elif user.groups.filter(name='Project Managers').count():
return 'User'
list = []
for ngo_user in NGOUserProfile.objects.all():
try:
user = User.objects.get(id=ngo_user.user_id)
if is_required(user) and not user.is_superuser:
details = []
details.append(user.first_name + ' ' + user.last_name)
details.append(user.username)
org_id = ngo_user.org_id
organization = Organization.objects.get(org_id = org_id)
details.append(organization.name)
details.append(organization.status)
details.append(organization.language)
details.append(user_role(user))
list.append(details)
except Exception:
continue
headers = ['Name', 'email', 'Organization Name', 'Status', 'Account language','User Role']
response = create_excel_response(headers,list,'user_list')
return response
class DWUserAdmin(UserAdmin):
list_filter = ('groups__name',)
UserAdmin.fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
(_('Membership'), {'fields': ('groups', 'organization_id')}),
)
readonly_fields = ('last_login', 'date_joined')
list_display = _remove_default_name_fields() + ('name','organization_name', 'organization_id')
form = DWUserChangeForm
actions = [export_user_list_to_excel]
def name(self,obj):
return obj.first_name
def organization_name(self, obj):
org_id = NGOUserProfile.objects.get(user=obj).org_id
return Organization.objects.get(org_id=org_id).name
def organization_id(self, obj):
return NGOUserProfile.objects.get(user=obj).org_id
def save_model(self, request, obj, form, change):
super(DWUserAdmin, self).save_model(request, obj, form, change)
if change:
if 'email' in form.changed_data or 'username' in form.changed_data:
try:
existing_digests = PartialDigest.objects.filter(user=obj)
if existing_digests:
for existing_digest in existing_digests:
existing_digest.delete()
except PartialDigest.DoesNotExist:
pass
if form.cleaned_data.get('organization_id') is not None:
try:
user_profile = NGOUserProfile.objects.get(user=obj)
user_profile.org_id = form.cleaned_data['organization_id']
user_profile.save()
except NGOUserProfile.DoesNotExist:
user_profile = NGOUserProfile()
user_profile.org_id = form.cleaned_data['organization_id']
user_profile.title = 'Title'
user_profile.user = obj
user_profile.save()
admin.site.unregister(Group)
admin.site.unregister(User)
admin.site.register(OrganizationSetting, OrganizationSettingAdmin)
admin.site.register(OutgoingNumberSetting, admin.ModelAdmin)
admin.site.register(SMSC, admin.ModelAdmin)
admin.site.register(MessageTracker, MessageTrackerAdmin)
admin.site.register(Organization, OrganizationAdmin)
admin.site.register(Country, CountryAdmin)
admin.site.register(Network, NetworkAdmin)
admin.site.register(User, DWUserAdmin)
| 44.194595
| 218
| 0.677471
| 2,949
| 24,528
| 5.391319
| 0.127501
| 0.016668
| 0.019624
| 0.017108
| 0.428392
| 0.376565
| 0.341657
| 0.303981
| 0.265614
| 0.253915
| 0
| 0.001405
| 0.216569
| 24,528
| 554
| 219
| 44.274368
| 0.825978
| 0.006197
| 0
| 0.294393
| 0
| 0.028037
| 0.192647
| 0.037996
| 0
| 0
| 0
| 0
| 0
| 1
| 0.13785
| false
| 0.009346
| 0.067757
| 0.067757
| 0.432243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cddc0485c396754b68315d1f0f82db760ff25dc5
| 2,580
|
py
|
Python
|
floodfill_pathfinding.py
|
mnursey/Battlesnake-2021
|
884b9cf1b40c9b03cc49bd1594135e7caf41ee82
|
[
"MIT"
] | null | null | null |
floodfill_pathfinding.py
|
mnursey/Battlesnake-2021
|
884b9cf1b40c9b03cc49bd1594135e7caf41ee82
|
[
"MIT"
] | null | null | null |
floodfill_pathfinding.py
|
mnursey/Battlesnake-2021
|
884b9cf1b40c9b03cc49bd1594135e7caf41ee82
|
[
"MIT"
] | null | null | null |
import board
class Floodfill:
frontier = []
grid = None
board = None
def __init__(self, game_board, start_cord):
self.board = game_board
self.grid = [[None for i in range(self.board.width)] for j in range(self.board.width)]
start_node = self.create_node(start_cord["x"], start_cord["y"], False, None)
self.frontier_add(start_node)
self.grid[start_cord['x']][start_cord['y']] = start_node
self.solve()
return
def solve(self):
while len(self.frontier) > 0:
current = self.frontier_pop()
if not current["blocked"]:
for n in self.board.neighbours(current["x"], current["y"]):
# Add to frontier if we haven't seen it
if self.grid[n['x']][n['y']] == None:
unseen_node = self.create_node(n['x'], n['y'], self.board.isBlocked(n['x'], n['y']), current)
self.grid[n['x']][n['y']] = unseen_node
self.frontier_add(unseen_node)
return
def path(self, target_cord):
node = self.grid[target_cord['x']][target_cord['y']]
path = []
while node:
path.append({"x" : node["x"], "y" : node["y"]})
node = node["from"]
path.reverse()
return path
def frontier_add(self, node):
self.frontier.append(node)
return
def frontier_pop(self):
return self.frontier.pop(0)
def create_node(self, x, y, blocked, prev):
return {"x" : x, "y" : y, "blocked" : blocked, "from" : prev}
def print(self):
output = "Grid:\n"
for y in range(self.board.width):
line = "\n"
for x in range(self.board.width):
node = self.grid[x][self.board.width - y - 1]
value = "-"
if node:
if node["from"] == None:
value = "s"
elif node["blocked"]:
value = "x"
else:
if node["from"]["x"] < node["x"]:
value = "<"
if node["from"]["x"] > node["x"]:
value = ">"
if node["from"]["y"] < node["y"]:
value = "v"
if node["from"]["y"] > node["y"]:
value = "^"
line = line + value
output += line
print(output)
return
| 27.446809
| 117
| 0.445736
| 293
| 2,580
| 3.829352
| 0.204778
| 0.064171
| 0.062389
| 0.057041
| 0.206774
| 0.131907
| 0.074866
| 0.046346
| 0.046346
| 0.046346
| 0
| 0.001963
| 0.407752
| 2,580
| 93
| 118
| 27.741935
| 0.73233
| 0.014341
| 0
| 0.063492
| 0
| 0
| 0.037387
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.015873
| 0.031746
| 0.301587
| 0.031746
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cddc0ce80665ce382edeabc67713697083130041
| 3,736
|
py
|
Python
|
Gobot-Omni/robot.py
|
FRC1076/2019-Parade
|
3824449ed10e33b401efb646fd2e6470c3941c8b
|
[
"MIT"
] | null | null | null |
Gobot-Omni/robot.py
|
FRC1076/2019-Parade
|
3824449ed10e33b401efb646fd2e6470c3941c8b
|
[
"MIT"
] | 2
|
2019-06-17T23:38:23.000Z
|
2019-06-17T23:39:43.000Z
|
Gobot-Omni/robot.py
|
FRC1076/2019-Parade
|
3824449ed10e33b401efb646fd2e6470c3941c8b
|
[
"MIT"
] | null | null | null |
import wpilib
import ctre
from wpilib.drive import DifferentialDrive
from wpilib.interfaces import GenericHID
#MOTOR PORTS
LEFT = 1
RIGHT = 3
CENTER1 = 2
CENTER2 = 4
#BALL MANIPULATOR
BALL_MANIP_ID = 5
GATHER_SPEED = 1.0
SPIT_SPEED = -1.0
STOP_SPEED = 0.0
LEFT_HAND = GenericHID.Hand.kLeft
RIGHT_HAND = GenericHID.Hand.kRight
class MyRobot(wpilib.TimedRobot):
def robotInit(self):
"""Robot initialization function"""
# object that handles basic drive operations
self.leftVictor = ctre.WPI_VictorSPX(LEFT)
self.rightVictor = ctre.WPI_VictorSPX(RIGHT)
self.centerVictor1 = ctre.WPI_VictorSPX(CENTER1)
self.centerVictor2 = ctre.WPI_VictorSPX(CENTER2)
self.left = wpilib.SpeedControllerGroup(self.leftVictor)
self.right = wpilib.SpeedControllerGroup(self.rightVictor)
self.center1 = wpilib.SpeedControllerGroup(self.centerVictor1)
self.center2 = wpilib.SpeedControllerGroup(self.centerVictor2)
self.myRobot = DifferentialDrive(self.left, self.right)
self.myRobot.setExpiration(0.1)
# joysticks 1 & 2 on the driver station
# self.leftStick = wpilib.Joystick(0)
# self.rightStick = wpilib.Joystick(1)
self.DEADZONE = 0.4
self.LEFT = GenericHID.Hand.kLeft
self.RIGHT = GenericHID.Hand.kRight
self.driver = wpilib.XboxController(0)
self.ballManipulator = BallManipulator(ctre.WPI_VictorSPX(BALL_MANIP_ID))
def autonomousInit(self):
self.myRobot.tankDrive(0.8, 0.8)
def autonomousPeriodic(self):
self.myRobot.tankDrive(1, 0.5)
def teleopInit(self):
"""Executed at the start of teleop mode"""
self.myRobot.setSafetyEnabled(True)
def setCenters(self, speed_value):
self.center1.set(-speed_value)
self.center2.set(speed_value)
def deadzone(self, val, deadzone):
if abs(val) < deadzone:
return 0
return val
def teleopPeriodic(self):
ballMotorSetPoint = 0
if self.driver.getBumper(self.LEFT):
ballMotorSetPoint = 1.0
elif self.driver.getBumper(self.RIGHT):
ballMotorSetPoint = -1.0
else:
ballMotorSetPoint = 0.0
self.ballManipulator.set(ballMotorSetPoint)
"""Runs the motors with tank steering"""
#right = self.driver.getY(self.RIGHT)
#left = self.driver.getY(self.LEFT)
#self.myRobot.tankDrive(right, left)
forward = -self.driver.getRawAxis(5)
rotation_value = rotation_value = self.driver.getX(LEFT_HAND)
forward = deadzone(forward, 0.2)
self.myRobot.arcadeDrive(forward, rotation_value)
center_speed = self.driver.getX(self.RIGHT)
self.setCenters(self.deadzone(center_speed, self.DEADZONE))
class BallManipulator:
"""
Manipulator wraps a motor controller that gathers and spits
out the cargo balls.
"""
def __init__(self, motor):
self.motor = motor
def gather(self, speed = GATHER_SPEED):
self.motor.set(speed)
def spit(self, speed = SPIT_SPEED):
self.motor.set(speed)
def stop(self):
self.motor.set(STOP_SPEED)
def set(self, setValue):
"""
Direct control to be used with a controller
that puts out f, 0, and -f for gather, stop,
and spit, respectively.
"""
self.motor.set(setValue)
def deadzone(val, deadzone):
if abs(val) < deadzone:
return 0
elif val < (0):
x = ((abs(val) - deadzone)/(1-deadzone))
return (-x)
else:
x = ((val - deadzone)/(1-deadzone))
return (x)
if __name__ == "__main__":
wpilib.run(MyRobot)
| 27.470588
| 81
| 0.646413
| 442
| 3,736
| 5.377828
| 0.282805
| 0.033656
| 0.033656
| 0.020194
| 0.07236
| 0.07236
| 0.028607
| 0.028607
| 0
| 0
| 0
| 0.019671
| 0.251606
| 3,736
| 136
| 82
| 27.470588
| 0.830472
| 0.146413
| 0
| 0.098765
| 0
| 0
| 0.002603
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.160494
| false
| 0
| 0.049383
| 0
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cddea9a721eee8e3cc13555afb08ee013159480b
| 2,158
|
py
|
Python
|
integration/emulator/device.py
|
cvlabmiet/master-programming-example
|
8a4a231ba2b72a93ae14da2c04e17b2ae3fc6651
|
[
"MIT"
] | null | null | null |
integration/emulator/device.py
|
cvlabmiet/master-programming-example
|
8a4a231ba2b72a93ae14da2c04e17b2ae3fc6651
|
[
"MIT"
] | null | null | null |
integration/emulator/device.py
|
cvlabmiet/master-programming-example
|
8a4a231ba2b72a93ae14da2c04e17b2ae3fc6651
|
[
"MIT"
] | null | null | null |
import re, operator, array
from collections import namedtuple
class Argument(object):
def __init__(self, viewtype, begin, end=None):
self.type = viewtype
self.begin = int(begin)
self.end = None
if end is not None:
self.end = int(end)
class Lram(bytearray):
pass
class Pram(Lram):
def __init__(self):
# grammar: [<output>]<operation>(<input0>[, <input1>, ...])
# <output>, <input0>, ... - <type>:<begin>[:<end>] (begin, end - bytes offsets)
# <type> - u8, i8, u16, i16, ...
# <operation> - add, sub, div, mod, mul, ...
# <begin>, <end> - int
# example: [s16:200:400]add(u8:0, u8:100)
self.instruction = re.compile(r'\[(?P<out>[^\]]+)\](?P<op>\w+)\((?P<in>[^\)]+)\)')
self.operation = dict(add=operator.add, mul=operator.mul, mod=operator.mod, sub=operator.sub, div=operator.truediv)
self.type = dict(i8='b', u8='B', i16='h', u16='H', i32='l', u32='L', f32='f')
def _parse_arguments(self, op, lram):
arguments = [Argument(*x.split(':')) for x in op.split(',')]
return [memoryview(lram)[x.begin:x.end].cast(self.type[x.type]) for x in arguments]
def _vectorize(self, op, output, inputs):
for x in zip(range(len(output)), *inputs):
output[x[0]] = op(*x[1:])
def run(self, lram):
operations = self.instruction.findall(str(self).replace(' ', ''))
for op in operations:
outputs = self._parse_arguments(op[0], lram)
inputs = self._parse_arguments(op[2], lram)
self._vectorize(self.operation[op[1]], outputs[0], inputs)
class Unit(object):
def __init__(self):
self.lram = Lram()
self.pram = Pram()
class Ctrl(list):
def __init__(self, units):
self.units = units
def wait(self):
if len(self) == 0:
return []
number = self.pop(0);
unit = self.units[number]
unit.pram.run(unit.lram)
return [number]
class Device(object):
def __init__(self, units):
self.units = [Unit() for _ in range(units)];
self.ctrl = Ctrl(self.units)
| 33.2
| 123
| 0.561631
| 284
| 2,158
| 4.165493
| 0.330986
| 0.045647
| 0.046492
| 0.043111
| 0.042265
| 0.042265
| 0
| 0
| 0
| 0
| 0
| 0.026758
| 0.255329
| 2,158
| 64
| 124
| 33.71875
| 0.709396
| 0.125116
| 0
| 0.086957
| 0
| 0
| 0.030835
| 0.025518
| 0
| 0
| 0
| 0
| 0
| 1
| 0.195652
| false
| 0.021739
| 0.043478
| 0
| 0.434783
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cde6ca9c0b5b99aea51fe8a0efe3ed98163008e0
| 17,570
|
py
|
Python
|
win/pywinauto/findbestmatch.py
|
sk8darr/BrowserRefresh-Sublime
|
daee0eda6480c07f8636ed24e5c555d24e088886
|
[
"MIT",
"Unlicense"
] | 191
|
2015-01-02T12:17:07.000Z
|
2021-05-26T09:26:05.000Z
|
win/pywinauto/findbestmatch.py
|
sk8darr/BrowserRefresh-Sublime
|
daee0eda6480c07f8636ed24e5c555d24e088886
|
[
"MIT",
"Unlicense"
] | 48
|
2015-01-14T00:57:36.000Z
|
2021-04-06T21:45:42.000Z
|
win/pywinauto/findbestmatch.py
|
sk8darr/BrowserRefresh-Sublime
|
daee0eda6480c07f8636ed24e5c555d24e088886
|
[
"MIT",
"Unlicense"
] | 36
|
2015-01-14T18:54:25.000Z
|
2021-07-18T10:54:42.000Z
|
# GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"Module to find the closest match of a string in a list"
__revision__ = "$Revision: 679 $"
import re
import difflib
from . import fuzzydict
#import ctypes
#import ldistance
#levenshtein_distance = ctypes.cdll.levenshtein.levenshtein_distance
#levenshtein_distance = ldistance.distance
# need to use sets.Set for python 2.3 compatability
# but 2.6 raises a deprecation warning about sets module
try:
set
except NameError:
import sets
set = sets.Set
find_best_control_match_cutoff = .6
#====================================================================
class MatchError(IndexError):
"A suitable match could not be found"
def __init__(self, items = None, tofind = ''):
"Init the parent with the message"
self.tofind = tofind
self.items = items
if self.items is None:
self.items = []
IndexError.__init__(self,
"Could not find '%s' in '%s'"% (tofind, self.items))
_cache = {}
# given a list of texts return the match score for each
# and the best score and text with best score
#====================================================================
def _get_match_ratios(texts, match_against):
"Get the match ratio of how each item in texts compared to match_against"
# now time to figre out the matching
ratio_calc = difflib.SequenceMatcher()
ratio_calc.set_seq1(match_against)
ratios = {}
best_ratio = 0
best_text = ''
global cache
for text in texts:
if 0:
pass
if (text, match_against) in _cache:
ratios[text] = _cache[(text, match_against)]
elif(match_against, text) in _cache:
ratios[text] = _cache[(match_against, text)]
else:
# set up the SequenceMatcher with other text
ratio_calc.set_seq2(text)
# try using the levenshtein distance instead
#lev_dist = levenshtein_distance(unicode(match_against), unicode(text))
#ratio = 1 - lev_dist / 10.0
#ratios[text] = ratio
# calculate ratio and store it
ratios[text] = ratio_calc.ratio()
_cache[(match_against, text)] = ratios[text]
# if this is the best so far then update best stats
if ratios[text] > best_ratio:
best_ratio = ratios[text]
best_text = text
return ratios, best_ratio, best_text
#====================================================================
def find_best_match(search_text, item_texts, items, limit_ratio = .5):
"""Return the item that best matches the search_text
* **search_text** The text to search for
* **item_texts** The list of texts to search through
* **items** The list of items corresponding (1 to 1)
to the list of texts to search through.
* **limit_ratio** How well the text has to match the best match.
If the best match matches lower then this then it is not
considered a match and a MatchError is raised, (default = .5)
"""
search_text = _cut_at_tab(search_text)
text_item_map = UniqueDict()
# Clean each item, make it unique and map to
# to the item index
for text, item in zip(item_texts, items):
text_item_map[_cut_at_tab(text)] = item
ratios, best_ratio, best_text = \
_get_match_ratios(list(text_item_map.keys()), search_text)
if best_ratio < limit_ratio:
raise MatchError(items = list(text_item_map.keys()), tofind = search_text)
return text_item_map[best_text]
#====================================================================
_after_tab = re.compile(r"\t.*", re.UNICODE)
_non_word_chars = re.compile(r"\W", re.UNICODE)
def _cut_at_tab(text):
"Clean out non characters from the string and return it"
# remove anything after the first tab
return _after_tab.sub("", text)
def _clean_non_chars(text):
"Remove non word characters"
# should this also remove everything after the first tab?
# remove non alphanumeric characters
return _non_word_chars.sub("", text)
def IsAboveOrToLeft(ref_control, other_ctrl):
"Return true if the other_ctrl is above or to the left of ref_control"
text_r = other_ctrl.Rectangle()
ctrl_r = ref_control.Rectangle()
# skip controls where text win is to the right of ctrl
if text_r.left >= ctrl_r.right:
return False
# skip controls where text win is below ctrl
if text_r.top >= ctrl_r.bottom:
return False
# text control top left corner is below control
# top left corner - so not to the above or left :)
if text_r.top >= ctrl_r.top and text_r.left >= ctrl_r.left:
return False
return True
#====================================================================
distance_cuttoff = 999
def GetNonTextControlName(ctrl, controls):
"""return the name for this control by finding the closest
text control above and to its left"""
names = []
ctrl_index = controls.index(ctrl)
if ctrl_index != 0:
prev_ctrl = controls[ctrl_index-1]
if prev_ctrl.FriendlyClassName() == "Static" and \
prev_ctrl.IsVisible() and prev_ctrl.WindowText() and \
IsAboveOrToLeft(ctrl, prev_ctrl):
names.append(
prev_ctrl.WindowText() +
ctrl.FriendlyClassName())
# get the visible text controls so that we can get
# the closest text if the control has no text
text_ctrls = [ctrl_ for ctrl_ in controls
if ctrl_.IsVisible() and ctrl_.WindowText() and ctrl_.can_be_label]
best_name = ''
closest = distance_cuttoff
# now for each of the visible text controls
for text_ctrl in text_ctrls:
# get aliases to the control rectangles
text_r = text_ctrl.Rectangle()
ctrl_r = ctrl.Rectangle()
# skip controls where text win is to the right of ctrl
if text_r.left >= ctrl_r.right:
continue
# skip controls where text win is below ctrl
if text_r.top >= ctrl_r.bottom:
continue
# calculate the distance between the controls
# at first I just calculated the distance from the top let
# corner of one control to the top left corner of the other control
# but this was not best, so as a text control should either be above
# or to the left of the control I get the distance between
# the top left of the non text control against the
# Top-Right of the text control (text control to the left)
# Bottom-Left of the text control (text control above)
# then I get the min of these two
# We do not actually need to calculate the difference here as we
# only need a comparative number. As long as we find the closest one
# the actual distance is not all that important to us.
# this reduced the unit tests run on my by about 1 second
# (from 61 ->60 s)
# (x^2 + y^2)^.5
#distance = (
# (text_r.left - ctrl_r.left) ** 2 + # (x^2 + y^2)
# (text_r.bottom - ctrl_r.top) ** 2) \
# ** .5 # ^.5
#distance2 = (
# (text_r.right - ctrl_r.left) ** 2 + # (x^2 + y^2)
# (text_r.top - ctrl_r.top) ** 2) \
# ** .5 # ^.5
distance = abs(text_r.left - ctrl_r.left) + abs(text_r.bottom - ctrl_r.top)
distance2 = abs(text_r.right - ctrl_r.left) + abs(text_r.top - ctrl_r.top)
distance = min(distance, distance2)
# if this distance was closer then the last one
if distance < closest:
closest = distance
best_name = text_ctrl.WindowText() + ctrl.FriendlyClassName()
names.append(best_name)
return names
#====================================================================
def get_control_names(control, allcontrols):
"Returns a list of names for this control"
names = []
# if it has a reference control - then use that
#if hasattr(control, 'ref') and control.ref:
# control = control.ref
# Add the control based on it's friendly class name
names.append(control.FriendlyClassName())
# if it has some character text then add it base on that
# and based on that with friendly class name appended
cleaned = control.WindowText()
# Todo - I don't like the hardcoded classnames here!
if cleaned and control.has_title:
names.append(cleaned)
names.append(cleaned + control.FriendlyClassName())
# it didn't have visible text
else:
# so find the text of the nearest text visible control
non_text_names = GetNonTextControlName(control, allcontrols)
# and if one was found - add it
if non_text_names:
names.extend(non_text_names)
# return the names - and make sure there are no duplicates
return set(names)
#====================================================================
class UniqueDict(dict):
"A dictionary subclass that handles making it's keys unique"
def __setitem__(self, text, item):
"Set an item of the dictionary"
# this text is already in the map
# so we need to make it unique
if text in self:
# find next unique text after text1
unique_text = text
counter = 2
while unique_text in self:
unique_text = text + str(counter)
counter += 1
# now we also need to make sure the original item
# is under text0 and text1 also!
if text + '0' not in self:
dict.__setitem__(self, text+'0', self[text])
dict.__setitem__(self, text+'1', self[text])
# now that we don't need original 'text' anymore
# replace it with the uniq text
text = unique_text
# add our current item
dict.__setitem__(self, text, item)
def FindBestMatches(
self,
search_text,
clean = False,
ignore_case = False):
"""Return the best matches for search_text in the items
* **search_text** the text to look for
* **clean** whether to clean non text characters out of the strings
* **ignore_case** compare strings case insensitively
"""
# now time to figure out the matching
ratio_calc = difflib.SequenceMatcher()
if ignore_case:
search_text = search_text.lower()
ratio_calc.set_seq1(search_text)
ratios = {}
best_ratio = 0
best_texts = []
ratio_offset = 1
if clean:
ratio_offset *= .9
if ignore_case:
ratio_offset *= .9
for text_ in self:
# make a copy of the text as we need the original later
text = text_
if clean:
text = _clean_non_chars(text)
if ignore_case:
text = text.lower()
# check if this item is in the cache - if yes, then retrieve it
if (text, search_text) in _cache:
ratios[text_] = _cache[(text, search_text)]
elif(search_text, text) in _cache:
ratios[text_] = _cache[(search_text, text)]
# not in the cache - calculate it and add it to the cache
else:
# set up the SequenceMatcher with other text
ratio_calc.set_seq2(text)
# if a very quick check reveals that this is not going
# to match then
ratio = ratio_calc.real_quick_ratio() * ratio_offset
if ratio >= find_best_control_match_cutoff:
ratio = ratio_calc.quick_ratio() * ratio_offset
if ratio >= find_best_control_match_cutoff:
ratio = ratio_calc.ratio() * ratio_offset
# save the match we got and store it in the cache
ratios[text_] = ratio
_cache[(text, search_text)] = ratio
# try using the levenshtein distance instead
#lev_dist = levenshtein_distance(unicode(search_text), unicode(text))
#ratio = 1 - lev_dist / 10.0
#ratios[text_] = ratio
#print "%5s" %("%0.2f"% ratio), search_text, `text`
# if this is the best so far then update best stats
if ratios[text_] > best_ratio and \
ratios[text_] >= find_best_control_match_cutoff:
best_ratio = ratios[text_]
best_texts = [text_]
elif ratios[text_] == best_ratio:
best_texts.append(text_)
#best_ratio *= ratio_offset
return best_ratio, best_texts
#====================================================================
def build_unique_dict(controls):
"""Build the disambiguated list of controls
Separated out to a different function so that we can get
the control identifiers for printing.
"""
name_control_map = UniqueDict()
# collect all the possible names for all controls
# and build a list of them
for ctrl in controls:
ctrl_names = get_control_names(ctrl, controls)
# for each of the names
for name in ctrl_names:
name_control_map[name] = ctrl
return name_control_map
#====================================================================
def find_best_control_matches(search_text, controls):
"""Returns the control that is the the best match to search_text
This is slightly differnt from find_best_match in that it builds
up the list of text items to search through using information
from each control. So for example for there is an OK, Button
then the following are all added to the search list:
"OK", "Button", "OKButton"
But if there is a ListView (which do not have visible 'text')
then it will just add "ListView".
"""
name_control_map = build_unique_dict(controls)
# # collect all the possible names for all controls
# # and build a list of them
# for ctrl in controls:
# ctrl_names = get_control_names(ctrl, controls)
#
# # for each of the names
# for name in ctrl_names:
# name_control_map[name] = ctrl
search_text = str(search_text)
best_ratio, best_texts = name_control_map.FindBestMatches(search_text)
best_ratio_ci, best_texts_ci = \
name_control_map.FindBestMatches(search_text, ignore_case = True)
best_ratio_clean, best_texts_clean = \
name_control_map.FindBestMatches(search_text, clean = True)
best_ratio_clean_ci, best_texts_clean_ci = \
name_control_map.FindBestMatches(
search_text, clean = True, ignore_case = True)
if best_ratio_ci > best_ratio:
best_ratio = best_ratio_ci
best_texts = best_texts_ci
if best_ratio_clean > best_ratio:
best_ratio = best_ratio_clean
best_texts = best_texts_clean
if best_ratio_clean_ci > best_ratio:
best_ratio = best_ratio_clean_ci
best_texts = best_texts_clean_ci
if best_ratio < find_best_control_match_cutoff:
raise MatchError(items = list(name_control_map.keys()), tofind = search_text)
return [name_control_map[best_text] for best_text in best_texts]
#
#def GetControlMatchRatio(text, ctrl):
# # get the texts for the control
# ctrl_names = get_control_names(ctrl)
#
# #get the best match for these
# matcher = UniqueDict()
# for name in ctrl_names:
# matcher[name] = ctrl
#
# best_ratio, unused = matcher.FindBestMatches(text)
#
# return best_ratio
#
#
#
#def get_controls_ratios(search_text, controls):
# name_control_map = UniqueDict()
#
# # collect all the possible names for all controls
# # and build a list of them
# for ctrl in controls:
# ctrl_names = get_control_names(ctrl)
#
# # for each of the names
# for name in ctrl_names:
# name_control_map[name] = ctrl
#
# match_ratios, best_ratio, best_text = \
# _get_match_ratios(name_control_map.keys(), search_text)
#
# return match_ratios, best_ratio, best_text,
| 32.657993
| 86
| 0.594878
| 2,266
| 17,570
| 4.429832
| 0.171668
| 0.029588
| 0.018131
| 0.012552
| 0.30265
| 0.261606
| 0.196553
| 0.161287
| 0.144351
| 0.144351
| 0
| 0.007258
| 0.302106
| 17,570
| 537
| 87
| 32.718808
| 0.811368
| 0.459761
| 0
| 0.150485
| 0
| 0
| 0.057572
| 0
| 0
| 0
| 0
| 0.001862
| 0
| 1
| 0.058252
| false
| 0.004854
| 0.019417
| 0
| 0.150485
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cde9443d5f9dce44149feca0d10e665a2fbcf090
| 1,074
|
py
|
Python
|
setup.py
|
boichee/fabricator
|
33ad4fa615c153817b014d6b7fe9807f1752db25
|
[
"MIT"
] | 11
|
2018-07-09T07:08:16.000Z
|
2018-07-13T14:05:46.000Z
|
setup.py
|
boichee/fabricator
|
33ad4fa615c153817b014d6b7fe9807f1752db25
|
[
"MIT"
] | 3
|
2020-03-24T17:37:47.000Z
|
2021-02-02T22:18:59.000Z
|
setup.py
|
boichee/fabricator
|
33ad4fa615c153817b014d6b7fe9807f1752db25
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
exclude_dirs = ['ez_setup', 'examples', 'tests', 'venv']
# Runtime requirements
reqs = [
'requests',
'six',
'future',
'aenum'
]
# Requirements for testing
test_reqs = ['pytest', 'hypothesis', 'requests_mock']
# Requirements for setup
setup_reqs = ['flake8', 'pep8', 'pytest-runner']
setup(
name='fabricate-it',
version='1.1.0',
author='Brett Levenson',
author_email='blevenson@apple.com',
description='A library that makes creating API clients simple and declarative',
url='https://github.com/boichee/fabricator',
packages=find_packages(exclude=exclude_dirs),
install_requires=reqs,
tests_require=test_reqs,
setup_requires=setup_reqs,
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Topic :: Software Development',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Intended Audience :: Developers'
]
)
| 26.85
| 83
| 0.650838
| 117
| 1,074
| 5.854701
| 0.700855
| 0.035037
| 0.055474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011792
| 0.210428
| 1,074
| 39
| 84
| 27.538462
| 0.795991
| 0.063315
| 0
| 0
| 0
| 0
| 0.474052
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.032258
| 0
| 0.032258
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cde9dfcf27b3e92945a09440ebd5cd1eb09e8452
| 12,607
|
py
|
Python
|
src/gan/ccgan/ccGAN.py
|
matkir/Master_programs
|
70c4c399f9c9fc3e1643e78694223b24d7b94b18
|
[
"MIT"
] | null | null | null |
src/gan/ccgan/ccGAN.py
|
matkir/Master_programs
|
70c4c399f9c9fc3e1643e78694223b24d7b94b18
|
[
"MIT"
] | null | null | null |
src/gan/ccgan/ccGAN.py
|
matkir/Master_programs
|
70c4c399f9c9fc3e1643e78694223b24d7b94b18
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, division
if __name__=='__main__':
from cc_weights import Weight_model
else:
from . import Weight_model
from keras.models import load_model
import keras.backend as K
import plotload
import sys
from selector import Selector
#from masker import mask_from_template,mask_randomly_square,mask_green_corner,combine_imgs_with_mask
import masker as ms
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import cutter
import masker
class CCgan():
def __init__(self,img_cols,img_rows):
"""
Initializes the autoencoder.
"""
self.set_training_info()
globals().update(self.info)
self.threshold=threshold
self.img_cols = img_cols # Original is ~576
self.img_rows = img_rows # Original is ~720
self.channels = 3 # RGB
self.img_shape=(self.img_cols,self.img_rows,self.channels)
if not mask:
dummy=plotload.load_polyp_batch(self.img_shape,20,data_type='med/stool-inclusions',crop=False)
self.dims =cutter.find_square_coords(dummy)
self.combined=None
self.discriminator=None
self.generator=None
self.pretrained=False
def load_model(self):
"""
loads a model to the object instead of creating one.
:param adress: string of adress to the file of type h5.
"""
if self.combined!=None:
print("Warning: overriding a loaded model")
self.generator=load_model(f"models/CCgan-gen-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5")
self.discriminator=load_model(f"models/CCgan-dic-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5")
self.combined=load_model(f"models/CCgan-com-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5")
def load_model_weights(self):
if self.combined==None:
print("Error: no model in object")
else:
try:
self.combined.load_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-com.h5")
self.discriminator.load_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-dis.h5")
self.generator.load_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-gen.h5")
self.pretrained=True
except e:
print("Error: weights could not be loaded")
print(e)
def build_model(self):
"""
builds a model to the object instead of loading one.
Uses AE_weights.py as model
"""
if self.combined!=None:
print("Warning: overriding a loaded model")
wm=Weight_model(self.img_cols,self.img_rows)
self.discriminator,self.generator,self.combined=wm.build_model()
def set_training_info(self):
self.info={}
import sys
try:
if len(sys.argv)==1:
choise=2
else:
choise=int(input("press 1 for last run or 2 for info.txt "))
except:
choise=False
if choise==1:
self.info=np.load("temp_info.npy").item()
return
elif choise==2:
with open("info.txt") as f:
for line in f:
(key, val) = line.split()
try:
self.info[key] = int(val)
except:
self.info[key] = float(val)
np.save("temp_info.npy", self.info)
return
else:
self.info["mask"]=int(input("Mask [1] or corner [0]? "))
if self.info['mask']==1:
tmp=input("Mask adress? (default: /masks) ")
self.info["mask_folder"]=tmp if isinstance(tmp, str) else "/masks"
self.info["epochs"]=int(input("Number of epochs? "))
self.info["batch_size"]=int(input("Batch size? "))
self.info["save_interval"]=int(input("save interval? "))
np.save("temp_info.npy", self.info)
def train_model(self):
def t(m,bol):
for layer in m.layers:
layer.trainable=bol
if self.info==None:
print("Warning no info found, prompting for info")
self.set_training_info()
globals().update(self.info)
if self.combined==None:
print("Error: no model loaded")
return
if self.pretrained==True:
print("Warning: model has pretrained weights")
half_batch = batch_size
for epoch in tqdm(range(epochs)):
X_train = plotload.load_polyp_batch(self.img_shape, batch_size, data_type='med/none',crop=False)
if corner:
masked_imgs, missing, mask = ms.mask_green_corner(X_train)
m=np.zeros(shape=X_train.shape)
for i in range(X_train.shape[0]):
m[i,mask[0]:mask[1],mask[2]:mask[3]]=missing[i]
missing=m
else:
masked_imgs, missing, mask = ms.mask_from_template(X_train)
if soft:
valid = 0.2*np.random.random_sample((half_batch,1))+0.9
fake = 0.1*np.random.random_sample((half_batch,1))
else:
valid = np.ones((half_batch, 1))
fake = np.zeros((half_batch, 1))
# ---------------------
# Train Generator
# ---------------------
valid = np.ones((batch_size, 1))
# Train the generator
t(self.discriminator,False)
g_loss = self.combined.train_on_batch(masked_imgs, [X_train, valid])
t(self.discriminator,True)
# ---------------------
# Train discriminator
# ---------------------
gen_fake = self.generator.predict(masked_imgs)
gen_fake = ms.combine_imgs_with_mask(gen_fake, X_train, mask)
if epoch%120==0 and epoch!=0:
#small shakeup to get out of local minimas
fake, valid = valid , fake
# Train the discriminator
d_loss_real = self.discriminator.train_on_batch(X_train, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_fake, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# Plot the progress
print ("[D: %f G: %f, mse: %f]" % (d_loss[0], g_loss[0], g_loss[1]))
if g_loss[1]<self.threshold:
self.threshold=g_loss[1]
self.generator.save(f"models/CCgan-gen-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5")
self.discriminator.save(f"models/CCgan-dic-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5")
self.combined.save(f"models/CCgan-com-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5")
self.combined.save_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-com.h5")
self.discriminator.save_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-dis.h5")
self.generator.save_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-gen.h5")
if g_loss[1]<self.threshold:
self.threshold=g_loss[1]
self.generator.save(f"models/CCgan-gen-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}_fin.h5")
self.discriminator.save(f"models/CCgan-dic-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}_fin.h5")
self.combined.save(f"models/CCgan-com-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}_fin.h5")
self.combined.save_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-com_fin.h5")
self.discriminator.save_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-dis_fin.h5")
self.generator.save_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-gen_fin.h5")
def build_wrapper(self):
"""
Returns a func that works as a complete preprocsess tool
"""
if mask==1:
def ret(input_img,mask=None):
"""
Without a corner, a mask must be added
"""
if not cutter.is_green(input_img):
return input_img
if mask is None:
mask=plotload.load_single_template(input_img.shape,dest='med/green')
img=input_img.copy()
if len(img.shape)==3:
img=np.expand_dims(img, 0)
prediced=np.squeeze(self.generator.predict(img),0)
img=masker.combine_imgs_with_mask(prediced, img, mask)
return np.expand_dims(img,0)
else:
def ret(input_img):
if not cutter.is_green(input_img):
return input_img
img=input_img.copy()
if len(img.shape)==3:
img=np.expand_dims(img, 0)
y1,y2,x1,x2=self.dims
img, _, _ = ms.mask_green_corner(img)
prediced=np.squeeze(self.generator.predict(img),0)
img=np.squeeze(img,0)
img[y1:y2,x1:x2]=prediced[y1:y2,x1:x2]
return np.expand_dims(img,0)
return ret
def sample_images(self, epoch, imgs):
r, c = 3, 6
masked_imgs, missing_parts, m = mask_from_template(imgs)
gen_fake1 = self.generator.predict(missing_parts)
gen_fake = combine_imgs_with_mask(gen_fake1, imgs, m)
imgs = 0.5 * imgs + 0.5
masked_imgs = 0.5 * masked_imgs + 0.5
gen_fake = 0.5 * gen_fake + 0.5
gen_fake1 = 0.5 * gen_fake1 + 0.5
fig, axs = plt.subplots(r, c)
for i in range(c):
axs[0,i].imshow(imgs[i, :,:])
axs[0,i].axis('off')
axs[1,i].imshow(gen_fake[i, :,:])
axs[1,i].axis('off')
axs[2,i].imshow(gen_fake1[i,:,:])
axs[2,i].axis('off')
fig.savefig("images/cc_%d.png" % epoch)
plt.close()
def sort_folder(self,w,path=None):
import os
import cv2
from tqdm import tqdm
from shutil import copyfile
import sys
if path is not None:
dirs_i=[]
dirs_o=[]
d=next(os.walk(path))[1]
for i in d:
if i =='none' or i=='green' or i=='preprocessed':
continue
dirs_o.append(path+'preprocessed/'+i)
dirs_i.append(path+i)
for i in dirs_o:
if not os.path.exists(i):
os.makedirs(i)
else:
polyps='polyps'
ulcerative_colitis='ulcerative-colitis'
dirs=[polyps,ulcerative_colitis]
if not os.path.exists(polyps_prep):
os.makedirs(polyps_prep)
if not os.path.exists(ulcerative_colitis_prep):
os.makedirs(ulcerative_colitis_prep)
for i,o in tqdm(zip(dirs_i,dirs_o)):
for img_name in os.listdir(i):
path=os.path.join(i,img_name)
img=plotload.load_one_img((self.img_cols,self.img_rows), dest=path,
extra_dim=True)
if cutter.is_green(img):
tmp=cv2.imwrite(os.path.join(o,img_name), cv2.cvtColor(127.5*w(img)[0]+127.5,cv2.COLOR_RGB2BGR))
else:
tmp=cv2.imwrite(os.path.join(o,img_name), cv2.cvtColor(127.5*img[0]+127.5,cv2.COLOR_RGB2BGR))
if __name__ == '__main__':
cc = CCgan(256,256)
#cc.build_model()
#cc.train_model()
cc.load_model()
#cc.load_model_weights()
w=cc.build_wrapper()
root='/home/mathias/Documents/kvasir-dataset-v2/med/'
cc.sort_folder(w,path=root)
cc.sort_folder(w,path='/media/mathias/A_New_Hope/medico_test/')
| 41.199346
| 142
| 0.546601
| 1,690
| 12,607
| 3.915385
| 0.168639
| 0.050778
| 0.070727
| 0.035363
| 0.425419
| 0.392625
| 0.371921
| 0.314644
| 0.291975
| 0.278676
| 0
| 0.022043
| 0.319902
| 12,607
| 305
| 143
| 41.334426
| 0.749708
| 0.058777
| 0
| 0.23176
| 0
| 0.077253
| 0.193537
| 0.097965
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051502
| false
| 0
| 0.085837
| 0
| 0.175966
| 0.042918
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|