hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
efaec9e129260471f4d26372fd487df99a205a00
| 4,887
|
py
|
Python
|
utils.py
|
loc-trinh/GrandmasterZero
|
58365890fe2b0145344f17be5fb59e08c8f1993a
|
[
"MIT"
] | null | null | null |
utils.py
|
loc-trinh/GrandmasterZero
|
58365890fe2b0145344f17be5fb59e08c8f1993a
|
[
"MIT"
] | null | null | null |
utils.py
|
loc-trinh/GrandmasterZero
|
58365890fe2b0145344f17be5fb59e08c8f1993a
|
[
"MIT"
] | null | null | null |
import pprint
import time
import chess.pgn
import IPython.display as display
import ipywidgets as widgets
def who(player):
return 'White' if player == chess.WHITE else 'Black'
def get_last_move_san_from_board(board):
if len(board.move_stack) == 0:
return chess.Move.null()
else:
last_move = board.pop()
move_san = board.san(last_move)
board.push(last_move)
return move_san
def view_game(pgn_file, manual=False, pause=0.1, print_text=False):
pgn_file = open(pgn_file)
game = chess.pgn.read_game(pgn_file)
board = game.board()
mainline_moves = list(reversed(game.mainline_moves()))
def print_game_info(game):
print('Event:', game.headers['Event'])
print('White:', game.headers['White'])
print('Black:', game.headers['Black'])
print('Result:', game.headers['Result'])
def backward_click(event):
if len(board.move_stack) == 0:
return
move = board.pop()
mainline_moves.append(move)
render(with_manual=manual)
def fbackward_click(event):
if len(board.move_stack) == 0:
return
while len(board.move_stack) > 0:
move = board.pop()
mainline_moves.append(move)
render(with_manual=manual)
def foward_click(event):
if len(mainline_moves) == 0:
return
move = mainline_moves.pop()
board.push(move)
render(with_manual=manual)
def ffoward_click(event):
if len(mainline_moves) == 0:
return
while len(mainline_moves) > 0:
move = mainline_moves.pop()
board.push(move)
render(with_manual=manual)
def render(with_manual):
with output:
html = "<b>Move {} {}, Play '{}':</b><br/>{}".format(
len(board.move_stack), who(not board.turn),
get_last_move_san_from_board(board), board._repr_svg_())
display.clear_output(wait=True)
display.display(display.HTML(html))
if with_manual:
layout = widgets.Layout(width='95px')
btn_fbackward = widgets.Button(description='<<', layout=layout)
btn_backward = widgets.Button(description='<', layout=layout)
btn_forward = widgets.Button(description='>', layout=layout)
btn_fforward = widgets.Button(description='>>', layout=layout)
display.display(
widgets.HBox((btn_fbackward, btn_backward, btn_forward,
btn_fforward)))
btn_fbackward.on_click(fbackward_click)
btn_backward.on_click(backward_click)
btn_forward.on_click(foward_click)
btn_fforward.on_click(ffoward_click)
time.sleep(pause)
print_game_info(game)
if print_text:
print(game.mainline_moves())
else:
output = widgets.Output()
display.display(output)
if manual:
render(with_manual=manual)
else:
while len(mainline_moves) > 0:
move = mainline_moves.pop()
board.push(move)
render(with_manual=manual)
time.sleep(pause)
def play_game(white_player,
black_player,
visualize=False,
move_limit=None,
pause=0.1):
board = chess.Board()
try:
while not board.is_game_over(claim_draw=True):
if move_limit is not None and len(board.move_stack) >= move_limit:
return (None, 'draw: reached move limit', board)
if board.turn == chess.WHITE:
move = white_player.move(board)
else:
move = black_player.move(board)
board.push(move)
if visualize:
html = "<b>Move %s %s, Play '%s':</b><br/>%s" % (
len(board.move_stack), who(not board.turn),
get_last_move_san_from_board(board), board._repr_svg_())
display.clear_output(wait=True)
display.display(display.HTML(html))
time.sleep(pause)
except KeyboardInterrupt:
msg = 'Game interrupted!'
return (None, msg, board)
result = None
if board.is_checkmate():
result = who(not board.turn)
msg = 'checkmate: ' + result + ' wins!'
elif board.is_stalemate():
msg = 'draw: stalemate'
elif board.is_fivefold_repetition():
msg = 'draw: 5-fold repetition'
elif board.is_insufficient_material():
msg = 'draw: insufficient material'
elif board.can_claim_draw():
msg = 'draw: claim'
else:
raise Exception(
'error: game ended without reaching correct ending conditions')
return (result, msg, board)
| 32.58
| 79
| 0.575609
| 563
| 4,887
| 4.801066
| 0.207815
| 0.057714
| 0.031077
| 0.044025
| 0.353681
| 0.333703
| 0.290418
| 0.27044
| 0.244543
| 0.217906
| 0
| 0.004479
| 0.314713
| 4,887
| 149
| 80
| 32.798658
| 0.802628
| 0
| 0
| 0.346457
| 0
| 0
| 0.067935
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07874
| false
| 0
| 0.03937
| 0.007874
| 0.19685
| 0.07874
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efb0224997c2a73db24a06482baa1e76838ea1f0
| 2,904
|
py
|
Python
|
query.py
|
urmi-21/COVID-biorxiv
|
6dfe713c2634197b6c9983eb2aa3fa6676f7d045
|
[
"MIT"
] | 2
|
2020-06-29T16:55:17.000Z
|
2020-09-21T14:00:16.000Z
|
query.py
|
urmi-21/COVID-biorxiv
|
6dfe713c2634197b6c9983eb2aa3fa6676f7d045
|
[
"MIT"
] | null | null | null |
query.py
|
urmi-21/COVID-biorxiv
|
6dfe713c2634197b6c9983eb2aa3fa6676f7d045
|
[
"MIT"
] | 1
|
2020-09-21T14:00:23.000Z
|
2020-09-21T14:00:23.000Z
|
import sys
import json
import requests
import subprocess
from datetime import datetime
#dict storing data
collection={}
def execute_commandRealtime(cmd):
"""Execute shell command and print stdout in realtime.
Function taken from pyrpipe Singh et.al. 2020
usage:
for output in execute_commandRealtime(['curl','-o',outfile,link]):
print (output)
"""
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
def update_collection():
'''
Download bioarxiv and medarxiv collections
'''
link='https://connect.biorxiv.org/relate/collection_json.php?grp=181'
outfile='collection.json'
print('Downloading ...')
for output in execute_commandRealtime(['curl','-o',outfile,link]):
print (output)
def read_collection():
'''
open file
'''
filename='collection.json'
with open(filename) as f:
data = json.load(f)
i=0
for key,value in data.items() :
#print (key,":",value)
if key=='rels':
val=data[key]
print('{} records found'.format(len(val)))
return value
def get_terms():
print('Available terms:')
for key,value in collection[0].items():
print(key)
def searchall(keywords):
result=[]
for k in keywords:
result.extend(search(k))
return result
def search(term):
#search in collection is a list of dicts
print('Searching',term)
result=[]
for d in collection:
#seach in all keys
for key,value in d.items():
if term.lower() in str(value).lower():
#print (d['rel_title'])
result.append(d)
#print('total matches: {}'.format(len(result)))
return result
def get_title(res):
titles=[]
for d in res:
if not d['rel_title'] in titles:
titles.append(d['rel_title'])
#print(d['rel_title'])
return titles
def filter_date(res,startdate):
'''
keep results by date
'''
filtered=[]
for d in res:
if datetime.strptime(d['rel_date'], '%Y-%m-%d')>=startdate:
filtered.append(d)
return filtered
#step 1 update collection downloads around 15 MB .json data
#update_collection()
#read collection in memory
collection=read_collection()
#see available terms
#get_terms()
#perform search
#res=search(' RNA-seq')
tosearch=[' RNA-seq','transcriptom','express','sequencing']
res=searchall(tosearch)
print(len(res))
print(len(get_title(res)))
fdate=datetime.strptime('2020-06-25', '%Y-%m-%d')
print('filtering results before',fdate)
final_res=get_title(filter_date(res,fdate))
print(len(final_res))
print('\n'.join(final_res))
| 25.034483
| 82
| 0.64084
| 376
| 2,904
| 4.87234
| 0.393617
| 0.010917
| 0.019651
| 0.021288
| 0.077511
| 0.065502
| 0.065502
| 0.065502
| 0.065502
| 0.065502
| 0
| 0.008877
| 0.224174
| 2,904
| 115
| 83
| 25.252174
| 0.804261
| 0.212121
| 0
| 0.086957
| 0
| 0
| 0.123922
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115942
| false
| 0
| 0.072464
| 0
| 0.26087
| 0.15942
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efb3562ab2f0bc0a7a96ac315758b6464fb9c4ea
| 1,336
|
py
|
Python
|
core/server/wx_handler.py
|
Maru-zhang/FilmHub-Tornado
|
870da52cec65920565439d2d5bb1424ae614665d
|
[
"Apache-2.0"
] | 2
|
2017-07-19T01:24:05.000Z
|
2017-07-19T09:12:46.000Z
|
core/server/wx_handler.py
|
Maru-zhang/FilmHub-Tornado
|
870da52cec65920565439d2d5bb1424ae614665d
|
[
"Apache-2.0"
] | null | null | null |
core/server/wx_handler.py
|
Maru-zhang/FilmHub-Tornado
|
870da52cec65920565439d2d5bb1424ae614665d
|
[
"Apache-2.0"
] | 1
|
2017-07-28T09:31:42.000Z
|
2017-07-28T09:31:42.000Z
|
import tornado.web
from core.logger_helper import logger
from core.server.wxauthorize import WxConfig
from core.server.wxauthorize import WxAuthorServer
from core.cache.tokencache import TokenCache
class WxHandler(tornado.web.RequestHandler):
"""
微信handler处理类
"""
'''微信配置文件'''
wx_config = WxConfig()
'''微信网页授权server'''
wx_author_server = WxAuthorServer()
'''redis服务'''
wx_token_cache = TokenCache()
def post(self, flag):
if flag == 'wxauthor':
'''微信网页授权'''
code = self.get_argument('code')
state = self.get_argument('state')
# 获取重定向的url
redirect_url = self.wx_config.wx_menu_state_map[state]
logger.debug('【微信网页授权】将要重定向的地址为:redirct_url[' + redirect_url + ']')
logger.debug('【微信网页授权】用户同意授权,获取code>>>>code[' + code + ']state[' + state + ']')
if code:
# 通过code换取网页授权access_token
data = self.wx_author_server.get_auth_access_token(code)
openid = data['openid']
logger.debug('【微信网页授权】openid>>>>openid[' + openid + ']')
if openid:
# 跳到自己的业务界面
self.redirect(redirect_url)
else:
# 获取不到openid
logger.debug('获取不到openid')
| 32.585366
| 91
| 0.569611
| 128
| 1,336
| 5.773438
| 0.414063
| 0.043302
| 0.069012
| 0.067659
| 0.083897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.312126
| 1,336
| 40
| 92
| 33.4
| 0.804135
| 0.051647
| 0
| 0
| 0
| 0
| 0.107293
| 0.071249
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.208333
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efb4030a249dafcb2be0137ce898a4f573bed62c
| 2,771
|
py
|
Python
|
Recipes/Convert_Files_Into_JSON_And_CSV/Mapping_JsonToCsvConverter.py
|
Lotame/DataStream_Cookbook
|
3ec7ded6bd1e3a59fa4d06bb76e81be9da9c97a6
|
[
"MIT"
] | 1
|
2022-02-28T10:40:53.000Z
|
2022-02-28T10:40:53.000Z
|
Recipes/Convert_Files_Into_JSON_And_CSV/Mapping_JsonToCsvConverter.py
|
Lotame/DataStream_Cookbook
|
3ec7ded6bd1e3a59fa4d06bb76e81be9da9c97a6
|
[
"MIT"
] | 2
|
2021-01-08T17:51:10.000Z
|
2021-03-29T11:36:07.000Z
|
Recipes/Convert_Files_Into_JSON_And_CSV/Mapping_JsonToCsvConverter.py
|
Lotame/DataStream_Cookbook
|
3ec7ded6bd1e3a59fa4d06bb76e81be9da9c97a6
|
[
"MIT"
] | 3
|
2020-01-26T23:31:23.000Z
|
2022-02-18T19:29:30.000Z
|
#!/usr/bin/python
#
# Write in Python3.6
# Filename:
#
# Mapping_JsonToCsvExtractor.py
#
#
# Basic Usage:
#
# python Mapping_JsonToCsvExtractor.py /directory/containing/datastream/mapping/json/files
#
# Utilities
import sys
import os
import json
import argparse
def writeCsvHeader(delimiter, csv_file, *args):
csv_file.write(delimiter.join(args))
csv_file.write("\n")
# write a line to the target file
def writeCsvLine(delimiter, csv_file, *args):
csv_file.write(delimiter.join([str(i) for i in args]))
csv_file.write("\n")
def main():
parser = argparse.ArgumentParser(description='Parse the mapping json file to CSV format')
parser.add_argument('--mapping_path', dest='mapping_path', required=True,
help='the path for the mapping json file')
parser.add_argument('--csv_name', dest='csv_name', required=False, default='mapping.csv',
help='specify the file name to write the csv file')
parser.add_argument('--csv_dir', dest='csv_dir', required=False, default='',
help='specify the dir to write the output file')
parser.add_argument('--delimiter', dest='delimiter', required=False, default='\001',
help='specify the delimiter to write the output file')
args = parser.parse_args()
mapping_path = args.mapping_path
csv_dir = args.csv_dir if args.csv_dir else mapping_path
csv_name = args.csv_name
delimiter = args.delimiter
if not os.path.isdir(mapping_path):
print("The mapping file path does not exist, confirm it again")
sys.exit()
if not os.path.isdir(csv_dir):
print("the specific csv_dir path %s does not exist, create it now" % csv_dir)
os.system("mkdir -p %s" % csv_dir)
output_path = os.path.join(csv_dir, csv_name)
output = open(output_path, 'w')
writeCsvHeader(delimiter, output, "behavior_id", "hierarchy_path", "hierarchy_id")
for file in os.listdir(mapping_path):
if not file.endswith("json"):
print("%s is not a json file, skip it" % file)
continue
file_path = os.path.join(mapping_path, file)
with open(file_path, 'r') as f:
for line in f:
js = json.loads(line.strip())
behid = js.get('behavior_id')
# if behavior id smaller than 0, it should be illegal skip
if behid < 0:
continue
# for each hierarchy, write a line
for hierpath in js.get('hierarchy_nodes', []):
writeCsvHeader(delimiter, output, str(behid), str(hierpath.get("path", "")), str(hierpath.get("id", -1)))
output.close()
if __name__ == '__main__':
sys.exit(main())
| 35.987013
| 125
| 0.631902
| 373
| 2,771
| 4.552279
| 0.289544
| 0.035336
| 0.025913
| 0.037691
| 0.143698
| 0.053004
| 0.053004
| 0.053004
| 0.053004
| 0
| 0
| 0.003846
| 0.249368
| 2,771
| 76
| 126
| 36.460526
| 0.8125
| 0.114399
| 0
| 0.08
| 0
| 0
| 0.221083
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0
| 0.08
| 0
| 0.14
| 0.06
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efb55216c30cf2837e4576480260417e73138279
| 4,088
|
py
|
Python
|
main.py
|
DayvsonAlmeida/Programa-o-Gen-tica
|
6edaceab99c61f55f4157e81fcf7cbad580f81d1
|
[
"MIT"
] | null | null | null |
main.py
|
DayvsonAlmeida/Programa-o-Gen-tica
|
6edaceab99c61f55f4157e81fcf7cbad580f81d1
|
[
"MIT"
] | null | null | null |
main.py
|
DayvsonAlmeida/Programa-o-Gen-tica
|
6edaceab99c61f55f4157e81fcf7cbad580f81d1
|
[
"MIT"
] | null | null | null |
from utils import initialize
from pandas import DataFrame
from genetic import GA
import numpy as np
import argparse
import random
import time
import sys
sys.setrecursionlimit(2000)
random.seed(time.time())
parser = argparse.ArgumentParser()
parser.add_argument('--mr', help='Mutation Rate')
parser.add_argument('--cr', help='Crossover Rate')
parser.add_argument('--size', help='Population Size')
parser.add_argument('--ngen', help='Number of Generations')
parser.add_argument('--base', help='Base de Teste [Easy, Middle, Hard, Newton, Einstein, Pythagorean]')
args, unknown = parser.parse_known_args()
#cls && python main.py --mr 0.05 --cr 0.8 --size 100 --ngen 5000 --base Easy
#cr:[0.7, 0.75, 0.8] mr:[0.05, 0.1, 0.2] size:[10, 50, 100]
mutation_rate = float(args.mr)
crossover_rate = float(args.cr)
size = int(args.size)
ngen = int(args.ngen)
test = args.base
# f(x) = 2*x
easy = {}
easy['x'] = {'a':np.array(np.arange(100), dtype='float64')}
easy['y'] = easy['x']['a']*2
easy['terminal_symb'] = ['a']
# f(x,y,z) = sqrt(x+y)+z
medium = {}
medium['x'] = {'x':np.array(np.arange(100), dtype='float64'),
'y':np.array(np.random.randint(100)),#, dtype='float64'),
'z':np.array(np.random.randint(100))}#, dtype='float64')}
medium['y'] = (medium['x']['x']+medium['x']['y'])**0.5 + medium['x']['z']
medium['terminal_symb'] = ['x','y','z']
# f(x,y,z) = sin(x)+sqrt(y)-tan(z+x)
hard = {}
hard['x'] = {'x':np.array(np.arange(100), dtype='float64'),
'y':np.array(np.random.randint(100), dtype='float64'),#, dtype='float64'),
'z':np.array(np.random.randint(100), dtype='float64')}#, dtype='float64')}
hard['y'] = np.sin(hard['x']['x']) + hard['x']['y']**0.5 - np.tan(hard['x']['z'] + hard['x']['x'])
hard['terminal_symb'] = ['x','y','z']
#Pythagorean Theorem
# c² = a²+b²
pythagorean_theorem = {}
pythagorean_theorem['x'] = {'a': np.array(np.random.randint(100, size=100), dtype='float64'),
'b': np.array(np.arange(100), dtype='float64')}
pythagorean_theorem['y'] = pythagorean_theorem['x']['a']**2 +pythagorean_theorem['x']['b']**2
pythagorean_theorem['terminal_symb'] = ['a','b']
#Einstein's Theory of Relativity
# E = m*c²
# c = 299.792.458 m/s
einstein_relativity = {}
einstein_relativity['x'] = {'m': np.random.random(100)}
einstein_relativity['y'] = einstein_relativity['x']['m']*(299792458**2) #c²=89875517873681764
einstein_relativity['terminal_symb'] = ['m']
#Newton's Universal Law of Gravitation
# F = G*m1*m2/d²
G = 6.674*10E-11
newton_law = {}
newton_law['x'] = {'m1': 10*np.array(np.random.random(100), dtype='float64'),
'm2': np.array(np.random.randint(100, size=100), dtype='float64'),
'd': np.array(np.random.randint(100, size=100)+np.random.rand(100)+10E-11, dtype='float64')}
newton_law['y'] = (newton_law['x']['m1']*newton_law['x']['m2']*G)/(newton_law['x']['d']**2)
newton_law['terminal_symb'] = ['m1','m2','d']
base = {'Easy': easy, 'Pythagorean':pythagorean_theorem,
'Middle': medium, 'Hard': hard,
'Newton': newton_law,
"Einstein": einstein_relativity}
#cr:[0.7, 0.75, 0.8] mr:[0.05, 0.1, 0.2] size:[10, 50, 100]
results = {}
duration = {}
ngen = 2000
for test in ['Hard']:#,'Hard','Hard']:
for crossover_rate in [0.7, 0.8]:
for mutation_rate in [0.05]:#, 0.1, 0.2]:
for size in [10, 100]:
ga = GA(terminal_symb=base[test]['terminal_symb'], x=base[test]['x'], y=base[test]['y'], size=size,
num_generations=ngen, crossover_rate=crossover_rate, mutation_rate=mutation_rate, early_stop=0.1)
ga.run()
loss = ga.loss_history
loss = np.concatenate((loss, [loss[len(loss)-1] for i in range(ngen - len(loss))] ) )
results[test+'_cr_'+str(crossover_rate)+'_mr_'+str(mutation_rate)+'_size_'+str(size)] = loss
duration[test+'_cr_'+str(crossover_rate)+'_mr_'+str(mutation_rate)+'_size_'+str(size)] = [ga.duration]
df = DataFrame(results)
df.to_csv('Resultados Hard GA.csv', index=False, decimal=',', sep=';')
df = DataFrame(duration)
df.to_csv('Duração Hard GA.csv', index=False, decimal=',', sep=';')
| 40.88
| 107
| 0.634785
| 638
| 4,088
| 3.965517
| 0.211599
| 0.066403
| 0.042688
| 0.047431
| 0.267589
| 0.25415
| 0.251383
| 0.204743
| 0.192095
| 0.192095
| 0
| 0.06556
| 0.130626
| 4,088
| 99
| 108
| 41.292929
| 0.646314
| 0.128669
| 0
| 0
| 0
| 0
| 0.146674
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.105263
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efc4891e8e505e8dc24f5447323153c9667f9326
| 1,220
|
py
|
Python
|
file-convertors/pdf-to-image/pdf_to_image.py
|
fraserlove/python-productivity-scripts
|
4a667446250042b01e307c7e4be53defc905207e
|
[
"MIT"
] | null | null | null |
file-convertors/pdf-to-image/pdf_to_image.py
|
fraserlove/python-productivity-scripts
|
4a667446250042b01e307c7e4be53defc905207e
|
[
"MIT"
] | null | null | null |
file-convertors/pdf-to-image/pdf_to_image.py
|
fraserlove/python-productivity-scripts
|
4a667446250042b01e307c7e4be53defc905207e
|
[
"MIT"
] | null | null | null |
'''
PDF to Image Converter
Author: Fraser Love, me@fraser.love
Created: 2020-06-13
Latest Release: v1.0.1, 2020-06-21
Python: v3.6.9
Dependancies: pdf2image
Converts multiple pdf's to images (JPEG format) and stores them in a logical folder structure under the desired image directory.
Usage: Update the pdf_dir and img_dir paths to point to the directory that holds the pdf files and the directory that the
generated images should be placed under.
'''
from pdf2image import convert_from_path
import os
pdf_dir = 'pdfs/' # Include trailing forward slash
img_dir = 'images/'
first_page_only = True # Only convert the first page of the pdf to an image
pdf_names = [pdf_name.split('.')[0] for pdf_name in os.listdir(pdf_dir) if pdf_name[-4:] == ".pdf"]
for pdf_name in pdf_names:
pages = convert_from_path('{}{}.pdf'.format(pdf_dir, pdf_name))
if first_page_only:
pages[0].save('{}/{}.jpg'.format(img_dir, pdf_name), 'JPEG')
else:
directory = '{}{}'.format(img_dir, pdf_name)
if not os.path.exists(directory):
os.makedirs(directory)
for i, page in enumerate(pages):
page.save('{}{}/{}-{}.jpg'.format(img_dir, pdf_name, pdf_name, i), 'JPEG')
| 36.969697
| 128
| 0.694262
| 195
| 1,220
| 4.2
| 0.446154
| 0.076923
| 0.04884
| 0.054945
| 0.086691
| 0.063492
| 0.063492
| 0
| 0
| 0
| 0
| 0.027136
| 0.184426
| 1,220
| 33
| 129
| 36.969697
| 0.79598
| 0.439344
| 0
| 0
| 0
| 0
| 0.088757
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efc5229f2a8966dc64e04e1c67caf2f4bee4df93
| 4,217
|
py
|
Python
|
tests/test/search/test_references_searcher_string.py
|
watermelonwolverine/fvttmv
|
8689d47d1f904dd2bf0a083de515fda65713c460
|
[
"MIT"
] | 1
|
2022-03-30T19:12:14.000Z
|
2022-03-30T19:12:14.000Z
|
tests/test/search/test_references_searcher_string.py
|
watermelonwolverine/fvttmv
|
8689d47d1f904dd2bf0a083de515fda65713c460
|
[
"MIT"
] | null | null | null |
tests/test/search/test_references_searcher_string.py
|
watermelonwolverine/fvttmv
|
8689d47d1f904dd2bf0a083de515fda65713c460
|
[
"MIT"
] | null | null | null |
from fvttmv.exceptions import FvttmvException
from fvttmv.reference_tools import ReferenceTools
from fvttmv.search.__references_searcher_string import ReferencesSearcherString
from test.common import TestCase
class ReferencesSearcherStringTest(TestCase):
json_base_str = "\"img\":\"{0}\""
html_base_str = "<img src=\\\"{0}\\\">"
reference = "this/is/just/a/test"
json_str = json_base_str.format(reference)
html_str = html_base_str.format(reference)
def test_contain_json_references(self):
print("test_contain_json_references")
result = ReferencesSearcherString._does_contain_json_references(self.json_str,
self.reference)
self.assertTrue(result)
def test_contain_json_references2(self):
print("test_contain_json_references2")
result = ReferencesSearcherString._does_contain_json_references(self.json_str,
"this/is/just/a")
self.assertFalse(result)
def test_contain_json_references3(self):
print("test_contain_json_references3")
result = ReferencesSearcherString._does_contain_json_references(self.json_str,
"this/is/a/false/test")
self.assertFalse(result)
def test_contain_json_references4(self):
print("test_contain_json_references4")
result = ReferencesSearcherString._does_contain_json_references(self.html_str,
self.reference)
self.assertFalse(result)
def test_contain_html_references1(self):
print("test_contain_html_references1")
result = ReferencesSearcherString._does_contain_html_references(self.html_str,
self.reference)
self.assertTrue(result)
def test_contain_html_references2(self):
print("test_contain_html_references2")
result = ReferencesSearcherString._does_contain_html_references(self.html_str,
"this/is/just/a")
self.assertFalse(result)
def test_contain_html_references3(self):
print("test_contain_html_references3")
result = ReferencesSearcherString._does_contain_html_references(self.html_str,
"this/is/a/false/test")
self.assertFalse(result)
def test_contain_html_references4(self):
print("test_contain_html_references4")
result = ReferencesSearcherString._does_contain_html_references(self.json_str,
self.reference)
self.assertFalse(result)
def test_contain_references1(self):
print("test_contain_references1")
result = ReferencesSearcherString.does_contain_references(self.html_str + self.json_str,
self.reference)
self.assertTrue(result)
def test_contain_references2(self):
print("test_contain_references2")
result = ReferencesSearcherString.does_contain_references(self.html_str + self.json_str,
"this/is/just/a")
self.assertFalse(result)
def test_contain_references3(self):
print("test_contain_references3")
result = ReferencesSearcherString.does_contain_references(self.html_str + self.json_str,
"this/is/a/false/test")
self.assertFalse(result)
def test_does_references_exceptions(self):
print("test_does_contain_html_references_exceptions")
for char in ReferenceTools.illegal_chars:
try:
ReferencesSearcherString.does_contain_references(self.json_str,
char)
self.fail()
except FvttmvException:
pass
| 40.548077
| 96
| 0.591653
| 381
| 4,217
| 6.183727
| 0.136483
| 0.102716
| 0.066214
| 0.093379
| 0.771222
| 0.546689
| 0.546689
| 0.48472
| 0.48472
| 0.370119
| 0
| 0.007894
| 0.339104
| 4,217
| 103
| 97
| 40.941748
| 0.83746
| 0
| 0
| 0.418919
| 0
| 0
| 0.114774
| 0.082286
| 0
| 0
| 0
| 0
| 0.148649
| 1
| 0.162162
| false
| 0.013514
| 0.054054
| 0
| 0.297297
| 0.162162
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efc64b0b3d469f8a4e23675a9039dc1fed37be48
| 4,999
|
py
|
Python
|
vtk.py
|
becklabs/geotag-gui
|
c8b1c3a0c6ca0c3eed09fab69d9dbb8b974b1b03
|
[
"MIT"
] | null | null | null |
vtk.py
|
becklabs/geotag-gui
|
c8b1c3a0c6ca0c3eed09fab69d9dbb8b974b1b03
|
[
"MIT"
] | null | null | null |
vtk.py
|
becklabs/geotag-gui
|
c8b1c3a0c6ca0c3eed09fab69d9dbb8b974b1b03
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 19 19:50:43 2020
@author: beck
"""
import cv2
import datetime
import dateparser
import os
import sys
import pandas as pd
import pytz
from hachoir.parser import createParser
from hachoir.metadata import extractMetadata
from PIL import Image
import numpy as np
import pytesseract
import imutils
import time
from GPSPhoto import gpsphoto
from threading import Thread
def firstFrame(video):
if 'timestamp_frame' not in os.listdir(os.getcwd()):
os.mkdir('timestamp_frame/')
video_capture = cv2.VideoCapture(video)
file = 'timestamp_frame/'+video+'_'+ str(0)+'.jpg'
while(True):
ret, frame = video_capture.read()
if not ret:
break
im = frame
break
video_capture.release()
PIL_image = Image.fromarray(im.astype('uint8'), 'RGB')
return PIL_image
def formatFrame(image, LEFT = 50, TOP = 20, RIGHT = 250, BOTTOM = 90):
image = image.crop((LEFT, TOP, RIGHT, BOTTOM))
image = np.array(image.convert('RGB'))[:, :, ::-1].copy()
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
return thresh
def getCreationDate(filename, config):
if config == 'trident':
pytesseract.pytesseract.tesseract_cmd = 'Tesseract-OCR\\tesseract.exe'
image = formatFrame(firstFrame(filename))
data = pytesseract.image_to_string(image, lang='eng',config='--psm 6')
data_str = str(data).split('\n')
metadata = dateparser.parse(data_str[0]+ ' '+data_str[1])
else:
parser = createParser(filename)
metadata = extractMetadata(parser).get('creation_date')
return metadata
def getOffsets(file):
#GET DELTA SECONDS FOR EVERY FRAME
cap = cv2.VideoCapture(file)
totalframes = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
offsets = [0]
for i in range(totalframes-1):
offsets.append(offsets[-1]+1000/fps)
offsets = [datetime.timedelta(milliseconds=i) for i in offsets]
return offsets
def getTimestamps(file, config):
offsets = getOffsets(file)
creationdate = getCreationDate(file, config)
#CALCULATE TIMESTAMPS
timestamps = [(creationdate+offset).replace(tzinfo = pytz.timezone('UTC')) for offset in offsets]
#GENERATE FRAME NAMES
frames = [file.split('/')[-1]+'_'+str(i)+'.jpg' for i in range(len(timestamps))]
#EXPORT DATA AS CSV
df = pd.DataFrame()
df['Frame'] = frames
df['Timestamp'] = timestamps
return df
def getFps(file):
cap = cv2.VideoCapture(file)
return int(cap.get(cv2.CAP_PROP_FPS))
class Writer:
def __init__(self, stream, export_path, taggedDF, parent, controller):
self.taggedDF = taggedDF.reset_index()
self.export_path = export_path
self.taggedList = [self.taggedDF.loc[i,'Frame'] for i in range(len(self.taggedDF['Frame']))]
self.frame_inds = [int(i.split('.')[1].split('_')[1]) for i in self.taggedList]
self.parent = parent
self.controller = controller
self.stream = cv2.VideoCapture(stream)
self.thread = Thread(target=self.write, args=())
self.thread.setDaemon(True)
def write(self):
i = 0
for frame_ind in self.frame_inds:
self.stream.set(cv2.CAP_PROP_POS_FRAMES, frame_ind)
(grabbed, frame) = self.stream.read()
frame_path = self.export_path+self.taggedList[self.frame_inds.index(frame_ind)]
cv2.imwrite(frame_path, frame)
#ADD METADATA
photo = gpsphoto.GPSPhoto(frame_path)
info = gpsphoto.GPSInfo((self.taggedDF.loc[i, 'Latitude'],
self.taggedDF.loc[i, 'Longitude']),
timeStamp=self.taggedDF.loc[i, 'Timestamp'],
alt=int(self.taggedDF.loc[i, 'Elevation']))
photo.modGPSData(info, frame_path)
self.parent.num+=1
i+=1
self.parent.e_status.set('Writing: '+str(self.parent.num)+'/'+str(self.parent.denom))
self.stream.release()
return
def createFrames(path, export_path, taggedDF, parent, controller):
x = len(taggedDF)
a = int(round(x/3))
b = int(a*2)
writer1 = Writer(path, export_path, taggedDF.iloc[:a], parent, controller)
writer2 = Writer(path, export_path, taggedDF.iloc[a:b], parent, controller)
writer3 = Writer(path, export_path, taggedDF.iloc[b:], parent, controller)
writer1.thread.start()
writer2.thread.start()
writer3.thread.start()
writer1.thread.join()
writer2.thread.join()
writer3.thread.join()
parent.e_status.set('Done')
| 35.707143
| 102
| 0.619324
| 609
| 4,999
| 4.990148
| 0.333333
| 0.026324
| 0.009872
| 0.026324
| 0.102994
| 0.052978
| 0.036196
| 0
| 0
| 0
| 0
| 0.02043
| 0.255851
| 4,999
| 140
| 103
| 35.707143
| 0.796505
| 0.035207
| 0
| 0.035714
| 0
| 0
| 0.044521
| 0.005993
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080357
| false
| 0
| 0.142857
| 0
| 0.294643
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efc6952d49bfc96baa0e1e3a017cc887fba50c18
| 4,237
|
py
|
Python
|
ROS_fall_detection/src/detector.py
|
SeanChen0220/Posefall
|
f27eedc0a624cc2875d14ffa276cf96cdfc1b410
|
[
"MIT"
] | 15
|
2021-08-08T08:41:54.000Z
|
2022-03-30T10:12:49.000Z
|
ROS_fall_detection/src/detector.py
|
SeanChen0220/Posefall
|
f27eedc0a624cc2875d14ffa276cf96cdfc1b410
|
[
"MIT"
] | 1
|
2021-11-24T16:51:51.000Z
|
2021-12-03T06:20:11.000Z
|
ROS_fall_detection/src/detector.py
|
SeanChen0220/Posefall
|
f27eedc0a624cc2875d14ffa276cf96cdfc1b410
|
[
"MIT"
] | 3
|
2021-08-08T08:41:55.000Z
|
2022-03-15T07:28:53.000Z
|
#! /home/seanchen/anaconda3/bin/python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#import sys
import rospy
from std_msgs.msg import String
import torch
import torch.nn.parallel
import torch.nn.functional as F
import numpy as np
import cv2
from LPN import LPN
from fall_net import Fall_Net
from pose_utils import Cropmyimage
from pose_utils import Drawkeypoints
import plot_sen
from time import *
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
#sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
global cam_image
def callback(data):
try:
global cam_image
cam_image = np.frombuffer(data.data, dtype=np.uint8).reshape((data.height, data.width, -1))
#print(cam_image.shape)
# show_image = bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
if __name__ == '__main__':
rospy.init_node('detector', anonymous=True)
pub = rospy.Publisher('det_result', Image, queue_size=10)
rospy.Subscriber('cam_image', Image, callback)
rate = rospy.Rate(50) # 10hz
# model
pose_net = LPN(nJoints=17)
pose_net.load_state_dict(torch.load('/home/seanchen/robot_fall_det/pose_net_pred100.pth.tar'))
pose_net.cuda()
fall_net = Fall_Net(64, 48, 17, device=torch.device('cuda'))
fall_net.cuda().double()
fall_net.load_state_dict(torch.load('/home/seanchen/robot_fall_det/fall_net_pred5.pth.tar'))
pose_net.eval()
fall_net.eval()
print('Load successfully!')
bridge = CvBridge()
global cam_image
cam_image = np.array([])
fall_count = []
while not rospy.is_shutdown():
rate.sleep()
if not cam_image.any():
print('waiting!')
continue
start = time()
# 每来一张图检测一次,更新显示
# image initialize
#photo_file = '/home/seanchen/robot_fall_det/fall1.jpg'
#input = cv2.imread(photo_file)# cv2 返回np.array类型,为(w,h,channel)
input = cam_image
bbox = [0, 0, input.shape[1], input.shape[0]]
input_image, details = Cropmyimage(input, bbox)
input_image = np.array([input_image.numpy()])
#print(input_image.shape)
input_image = torch.from_numpy(input_image)
#input_image.cuda()
# get posedetails
pose_out = pose_net(input_image.cuda())
fall_out, pose_cor = fall_net(pose_out)
# 跌倒结果计算
# 姿态可视化
neck = (pose_cor[:, 5:6, :] + pose_cor[:, 6:7, :]) / 2
pose_cor = torch.cat((pose_cor, neck), dim=1)
pose_cor = pose_cor * 4 + 2.
scale = torch.Tensor([[256, 192]]).cuda()
pose_cor = pose_cor / scale
scale = torch.Tensor([[details[3]-details[1], details[2]-details[0]]]).cuda()
pose_cor = pose_cor * scale
scale = torch.Tensor([[details[1], details[0]]]).cuda()
pose_cor = pose_cor + scale
#pose_cor_1 = (4*pose_cor[:, :, 0]+2.)/64*(details[3]-details[1])/4+details[1]
#pose_cor_2 = (4*pose_cor[:, :, 1]+2.)/48*(details[2]-details[0])/4+details[0]
pose_cor = torch.flip(pose_cor, dims=[2])
ones = torch.ones(1, 18, 1).cuda()
pose_cor = torch.cat((pose_cor, ones), dim=2).cpu().detach().numpy()
#det_result = torch.zeros(64, 48, 3).numpy()
det_result = plot_sen.plot_poses(input, pose_cor)
#print(det_result.shape)
# 跌倒估计
#if fall_out.indices == 1:
# print('Down!')
#if fall_out.indices == 0:
# print('Not Down!')
fall_out = torch.max(F.softmax(fall_out, dim=0), dim=0)
fall_count.append(fall_out.indices)
fall_dis = sum(fall_count[len(fall_count)-30 : len(fall_count)])
#print(len(fall_count))
end = time()
run_time = end-start
if fall_dis > 24:
print('Normal!', 1. / run_time)
else:
print('Down!', 1. / run_time)
det_result = bridge.cv2_to_imgmsg(det_result, encoding="passthrough")
pub.publish(det_result)
#print(1. / run_time)
# spin() simply keeps python from exiting until this node is stopped
#rospy.spin()
#while True:
#pass
| 35.605042
| 99
| 0.630399
| 597
| 4,237
| 4.246231
| 0.319933
| 0.06075
| 0.017357
| 0.022091
| 0.136095
| 0.126627
| 0.090335
| 0.090335
| 0.074951
| 0.074951
| 0
| 0.029611
| 0.234836
| 4,237
| 118
| 100
| 35.90678
| 0.752313
| 0.203446
| 0
| 0.0375
| 0
| 0
| 0.058032
| 0.031708
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0125
| false
| 0.0125
| 0.225
| 0
| 0.2375
| 0.075
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efc705c5b7dd44b358486c8f4931ee3c4faede41
| 3,696
|
py
|
Python
|
tensorflow1.x/sound_conv.py
|
wikeex/tensorflow-learning
|
a6ab7c99455711e9f3c015e0abb04fa58342e0cb
|
[
"MIT"
] | null | null | null |
tensorflow1.x/sound_conv.py
|
wikeex/tensorflow-learning
|
a6ab7c99455711e9f3c015e0abb04fa58342e0cb
|
[
"MIT"
] | null | null | null |
tensorflow1.x/sound_conv.py
|
wikeex/tensorflow-learning
|
a6ab7c99455711e9f3c015e0abb04fa58342e0cb
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from sound_lstm_test import data
batch_size = 10
x = tf.placeholder(tf.float32, [batch_size, 512, 80])
y_ = tf.placeholder(tf.float32, [batch_size, 59])
w_conv1 = tf.Variable(tf.truncated_normal([16, 2, 1, 64], stddev=0.1), name='conv1_w')
b_conv1 = tf.Variable(tf.constant(0.1, shape=[64]), name='conv1_b')
x_image = tf.reshape(x, [-1, 512, 80, 1])
h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image, w_conv1, strides=[1, 2, 1, 1], padding='VALID') + b_conv1)
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
w_conv2 = tf.Variable(tf.truncated_normal([2, 16, 64, 128], stddev=0.1), name='conv2_w')
b_conv2 = tf.Variable(tf.constant(0.1, shape=[128]), name='conv2_b')
h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, w_conv2, strides=[1, 1, 1, 1], padding='VALID') + b_conv2)
h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
w_fc1 = tf.Variable(tf.truncated_normal([61 * 12 * 128, 1024], stddev=0.1), name='fc1_w')
b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]), name='fc1_b')
h_pool2_flat = tf.reshape(h_pool2, [-1, 61 * 12 * 128])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
rate = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, rate=rate)
w_fc2 = tf.Variable(tf.truncated_normal([1024, 59], stddev=0.1), name='fc2_w')
b_fc2 = tf.Variable(tf.constant(0.1, shape=[59]), name='fc2_b')
y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2
variables = tf.trainable_variables()
conv1_variable = [t for t in variables if t.name.startswith('conv1')]
conv2_variable = [t for t in variables if t.name.startswith('conv2')]
fc1_variable = [t for t in variables if t.name.startswith('fc1')]
fc2_variable = [t for t in variables if t.name.startswith('fc2')]
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.arg_max(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y_conv))
grads_conv1, _ = tf.clip_by_global_norm(tf.gradients(loss, conv1_variable), clip_norm=5)
grads_conv2, _ = tf.clip_by_global_norm(tf.gradients(loss, conv2_variable), clip_norm=5)
grads_fc1, _ = tf.clip_by_global_norm(tf.gradients(loss, fc1_variable), clip_norm=5)
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, variables), clip_norm=5)
conv1_optimizer = tf.train.AdamOptimizer(0.001)
conv2_optimizer = tf.train.AdamOptimizer(0.001)
fc1_optimizer = tf.train.AdamOptimizer(0.001)
fc2_optimizer = tf.train.AdamOptimizer(0.001)
optimizer = tf.train.AdamOptimizer(0.001)
conv1_op = conv1_optimizer.apply_gradients(zip(grads_conv1, conv1_variable))
conv2_op = conv2_optimizer.apply_gradients(zip(grads_conv2, conv2_variable))
fc1_op = fc1_optimizer.apply_gradients(zip(grads_fc1, fc1_variable))
fc2_op = fc2_optimizer.apply_gradients(zip(grads_fc2, fc2_variable))
op = optimizer.apply_gradients(zip(grads, variables))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
train_data = data.np_load(batch_size=10, batch_type='train/')
test_data = data.np_load(batch_size=10, batch_type='test/')
for i in range(1000):
for _ in range(100):
input_, label = next(train_data)
sess.run([conv1_op, conv2_op, fc1_op, fc2_op], feed_dict={x: input_, y_: label, rate: 0})
test_total_accuracy = 0
for i in range(10):
test_input_, test_label = next(test_data)
test_accuracy, _ = sess.run([accuracy, tf.no_op()], feed_dict={x: test_input_, y_: test_label, rate: 0})
test_total_accuracy += test_accuracy
print('测试集准确度:%.3f' % (test_total_accuracy / 10))
| 44.53012
| 116
| 0.717532
| 639
| 3,696
| 3.890454
| 0.189358
| 0.014481
| 0.038616
| 0.058327
| 0.497989
| 0.343926
| 0.216412
| 0.172969
| 0.119871
| 0.092518
| 0
| 0.073661
| 0.125812
| 3,696
| 82
| 117
| 45.073171
| 0.69576
| 0
| 0
| 0
| 0
| 0
| 0.02868
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.033333
| 0
| 0.033333
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efc7a9d58bb127091a58a8679f3c1f9062aeca6a
| 3,123
|
py
|
Python
|
src/ensae_projects/datainc/data_medical.py
|
sdpython/ensae_projects
|
9647751da053c09fa35402527b294e02a4e6e2ad
|
[
"MIT"
] | 1
|
2020-11-22T10:24:54.000Z
|
2020-11-22T10:24:54.000Z
|
src/ensae_projects/datainc/data_medical.py
|
sdpython/ensae_projects
|
9647751da053c09fa35402527b294e02a4e6e2ad
|
[
"MIT"
] | 13
|
2017-11-20T00:20:45.000Z
|
2021-01-05T14:13:51.000Z
|
src/ensae_projects/datainc/data_medical.py
|
sdpython/ensae_projects
|
9647751da053c09fa35402527b294e02a4e6e2ad
|
[
"MIT"
] | null | null | null |
"""
@file
@brief Functions to handle data coming from
:epkg:`Cancer Imaging Archive`.
"""
import os
import pydicom
import pandas
import cv2
from pyquickhelper.filehelper.synchelper import explore_folder_iterfile # pylint: disable=C0411
def _recurse_fill(obs, dataset, parent=""):
for data_element in dataset:
if isinstance(data_element.value, bytes):
continue
if data_element.VR == "SQ": # a sequence
name = data_element.name
for i, ds in enumerate(data_element.value):
_recurse_fill(obs, ds,
parent="{parent}.{name}[{i}]".format(
parent=parent, name=name, i=i))
else:
text = str(data_element.value)
name = str(data_element.name)
key = name if parent == '' else parent + "." + name
obs[key] = text
def convert_dcm2png(folder, dest, fLOG=None):
"""
Converts all medical images in a folder from format
:epkg:`dcm` to :epkg:`png`.
@param folder source folder
@param dest destination folder
@param fLOG logging function
@return :epkg:`pandas:DataFrame` with many data
The function uses module :epkg:`pydicom`.
"""
if not os.path.exists(dest):
raise FileNotFoundError("Unable to find folder '{}'.".format(dest))
if fLOG is not None:
fLOG("[convert_dcm2png] convert dcm files from '{}'.".format(folder))
fLOG("[convert_dcm2png] into '{}'.".format(dest))
done = {}
rows = []
for name in explore_folder_iterfile(folder, ".*[.]dcm$"):
relname = os.path.relpath(name, folder)
if fLOG is not None:
fLOG("[convert_dcm2png] read {}: '{}'.".format(
len(rows) + 1, relname))
f1 = relname.replace("\\", "/").split("/")[0]
name_ = "img_%06d.png" % len(done)
if "_" in f1:
sub = f1.split('_')[0]
fsub = os.path.join(dest, sub)
if not os.path.exists(fsub):
if fLOG is not None:
fLOG("[convert_dcm2png] create folder '{}'.".format(sub))
os.mkdir(fsub)
new_name = os.path.join(sub, name_)
else:
new_name = name_
# read
ds = pydicom.dcmread(name)
# data
obs = dict(_src=relname, _dest=new_name, _size=len(ds.pixel_array))
_recurse_fill(obs, ds)
rows.append(obs)
# image
full_name = os.path.join(dest, new_name)
if os.path.exists(full_name):
done[name] = full_name
continue
pixel_array_numpy = ds.pixel_array
cv2.imwrite(full_name, pixel_array_numpy) # pylint: disable=E1101
done[name] = full_name
final = os.path.join(dest, "_summary.csv")
if fLOG is not None:
fLOG("[convert_dcm2png] converted {} images.".format(len(rows)))
fLOG("[convert_dcm2png] write '{}'.".format(final))
df = pandas.DataFrame(rows)
df.to_csv(final, index=False, encoding="utf-8")
return df
| 33.945652
| 96
| 0.565802
| 376
| 3,123
| 4.566489
| 0.340426
| 0.027956
| 0.0629
| 0.025626
| 0.09668
| 0.076878
| 0.076878
| 0.076878
| 0
| 0
| 0
| 0.012015
| 0.307077
| 3,123
| 91
| 97
| 34.318681
| 0.781423
| 0.148255
| 0
| 0.15873
| 0
| 0
| 0.116564
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031746
| false
| 0
| 0.079365
| 0
| 0.126984
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efcb531829013e0d275069585a78eef303453aa5
| 851
|
py
|
Python
|
dfirtrack_api/serializers.py
|
0xflotus/dfirtrack
|
632ebe582c2b40a4ac4b9fb12b7a118c2c49ede5
|
[
"MIT"
] | 4
|
2020-03-06T17:37:09.000Z
|
2020-03-17T07:50:55.000Z
|
dfirtrack_api/serializers.py
|
0xflotus/dfirtrack
|
632ebe582c2b40a4ac4b9fb12b7a118c2c49ede5
|
[
"MIT"
] | null | null | null |
dfirtrack_api/serializers.py
|
0xflotus/dfirtrack
|
632ebe582c2b40a4ac4b9fb12b7a118c2c49ede5
|
[
"MIT"
] | 1
|
2020-03-06T20:54:52.000Z
|
2020-03-06T20:54:52.000Z
|
from rest_framework import serializers
from dfirtrack_main.models import System, Systemtype
class SystemtypeSerializer(serializers.ModelSerializer):
""" create serializer for systemtype (needed because of foreignkey relationsship) """
class Meta:
model = Systemtype
# attributes made available for api
fields = (
'systemtype_name',
)
class SystemSerializer(serializers.ModelSerializer):
""" create serializer for system """
# get serializer for systemtype (needed because of foreignkey relationsship)
systemtype = SystemtypeSerializer(many=False, read_only=True)
class Meta:
model = System
# attributes made available for api
fields = (
'system_id',
'system_uuid',
'system_name',
'systemtype',
)
| 27.451613
| 89
| 0.654524
| 78
| 851
| 7.051282
| 0.487179
| 0.070909
| 0.116364
| 0.152727
| 0.489091
| 0.349091
| 0.221818
| 0.221818
| 0
| 0
| 0
| 0
| 0.274971
| 851
| 30
| 90
| 28.366667
| 0.89141
| 0.296122
| 0
| 0.222222
| 0
| 0
| 0.09589
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efd02e3f34305859967db711ac4399efc0f26e99
| 7,489
|
py
|
Python
|
corrct/utils_proc.py
|
cicwi/PyCorrectedEmissionCT
|
424449e1879a03cdbb8910c806417962e5b9faff
|
[
"BSD-3-Clause"
] | 3
|
2020-12-08T17:09:08.000Z
|
2022-01-21T22:46:56.000Z
|
corrct/utils_proc.py
|
cicwi/PyCorrectedEmissionCT
|
424449e1879a03cdbb8910c806417962e5b9faff
|
[
"BSD-3-Clause"
] | 11
|
2021-03-19T11:34:34.000Z
|
2022-03-31T13:22:02.000Z
|
corrct/utils_proc.py
|
cicwi/PyCorrectedEmissionCT
|
424449e1879a03cdbb8910c806417962e5b9faff
|
[
"BSD-3-Clause"
] | 1
|
2021-03-11T18:27:48.000Z
|
2021-03-11T18:27:48.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 15:25:14 2020
@author: Nicola VIGANÒ, Computational Imaging group, CWI, The Netherlands,
and ESRF - The European Synchrotron, Grenoble, France
"""
import numpy as np
from . import operators
from . import solvers
def get_circular_mask(vol_shape, radius_offset=0, coords_ball=None, mask_drop_off="const", data_type=np.float32):
"""Computes a circular mask for the reconstruction volume.
:param vol_shape: The size of the volume.
:type vol_shape: numpy.array_like
:param radius_offset: The offset with respect to the volume edge.
:type radius_offset: float. Optional, default: 0
:param coords_ball: The coordinates to consider for the non-masked region.
:type coords_ball: list of dimensions. Optional, default: None
:param data_type: The mask data type.
:type data_type: numpy.dtype. Optional, default: np.float32
:returns: The circular mask.
:rtype: (numpy.array_like)
"""
vol_shape = np.array(vol_shape, dtype=np.intp)
coords = [np.linspace(-(s - 1) / 2, (s - 1) / 2, s, dtype=data_type) for s in vol_shape]
coords = np.meshgrid(*coords, indexing="ij")
if coords_ball is None:
coords_ball = np.arange(-np.fmin(2, len(vol_shape)), 0, dtype=np.intp)
else:
coords_ball = np.array(coords_ball, dtype=np.intp)
radius = np.min(vol_shape[coords_ball]) / 2 + radius_offset
coords = np.stack(coords, axis=0)
if coords_ball.size == 1:
dists = np.abs(coords[coords_ball, ...])
else:
dists = np.sqrt(np.sum(coords[coords_ball, ...] ** 2, axis=0))
if mask_drop_off.lower() == "const":
return dists <= radius
elif mask_drop_off.lower() == "sinc":
cut_off = np.min(vol_shape[coords_ball]) / np.sqrt(2) - radius
outter_region = 1 - (dists <= radius)
outter_vals = 1 - np.sinc((dists - radius) / cut_off)
return np.fmax(1 - outter_region * outter_vals, 0)
else:
raise ValueError("Unknown drop-off function: %s" % mask_drop_off)
def pad_sinogram(sinogram, width, pad_axis=-1, mode="edge", **kwds):
"""Pads the sinogram.
:param sinogram: The sinogram to pad.
:type sinogram: numpy.array_like
:param width: The width of the padding.
:type width: either an int or tuple(int, int)
:param pad_axis: The axis to pad.
:type pad_axis: int. Optional, default: -1
:param mode: The padding type (from numpy.pad).
:type mode: string. Optional, default: 'edge'.
:param kwds: The numpy.pad arguments.
:returns: The padded sinogram.
:rtype: (numpy.array_like)
"""
pad_size = [(0, 0)] * len(sinogram.shape)
if len(width) == 1:
width = (width, width)
pad_size[pad_axis] = width
return np.pad(sinogram, pad_size, mode=mode, **kwds)
def apply_flat_field(projs, flats, darks=None, crop=None, data_type=np.float32):
"""Apply flat field.
:param projs: Projections
:type projs: numpy.array_like
:param flats: Flat fields
:type flats: numpy.array_like
:param darks: Dark noise, defaults to None
:type darks: numpy.array_like, optional
:param crop: Crop region, defaults to None
:type crop: numpy.array_like, optional
:param data_type: numpy.dtype, defaults to np.float32
:type data_type: Data type of the processed data, optional
:return: Falt-field corrected and linearized projections
:rtype: numpy.array_like
"""
if crop is not None:
projs = projs[..., crop[0] : crop[2], crop[1] : crop[3]]
flats = flats[..., crop[0] : crop[2], crop[1] : crop[3]]
if darks is not None:
darks = darks[..., crop[0] : crop[2], crop[1] : crop[3]]
if darks is not None:
projs -= darks
flats -= darks
flats = np.mean(flats.astype(data_type), axis=0)
return projs.astype(data_type) / flats
def apply_minus_log(projs):
"""Apply -log.
:param projs: Projections
:type projs: numpy.array_like
:return: Falt-field corrected and linearized projections
:rtype: numpy.array_like
"""
return np.fmax(-np.log(projs), 0.0)
def denoise_image(
img, reg_weight=1e-2, stddev=None, error_norm="l2b", iterations=250, axes=(-2, -1), lower_limit=None, verbose=False
):
"""Image denoiser based on (simple, weighted or dead-zone) least-squares and wavelets.
The weighted least-squares requires the local pixel-wise standard deviations.
It can be used to denoise sinograms and projections.
:param img: The image or sinogram to denoise.
:type img: `numpy.array_like`
:param reg_weight: Weight of the regularization term, defaults to 1e-2
:type reg_weight: float, optional
:param stddev: The local standard deviations. If None, it performs a standard least-squares.
:type stddev: `numpy.array_like`, optional
:param error_norm: The error weighting mechanism. When using std_dev, options are: {'l2b'} | 'l1b' | 'hub' | 'wl2' \
(corresponding to: 'l2 dead-zone', 'l1 dead-zone', 'Huber', 'weighted least-squares').
:type error_norm: str, optional
:param iterations: Number of iterations, defaults to 250
:type iterations: int, optional
:param axes: Axes along which the regularization should be done, defaults to (-2, -1)
:type iterations: int or tuple, optional
:param lower_limit: Lower clipping limit of the image, defaults to None
:type iterations: float, optional
:param verbose: Turn verbosity on, defaults to False
:type verbose: boolean, optional
:return: Denoised image or sinogram.
:rtype: `numpy.array_like`
"""
def compute_wls_weights(stddev, At, reg_weights):
stddev_zeros = stddev == 0
stddev_valid = np.invert(stddev_zeros)
min_valid_stddev = np.min(stddev[stddev_valid])
reg_weights = reg_weights * (At(stddev_zeros) == 0) * min_valid_stddev
img_weights = min_valid_stddev / np.fmax(stddev, min_valid_stddev)
return (img_weights, reg_weights)
def compute_lsb_weights(stddev):
stddev_zeros = stddev == 0
stddev_valid = np.invert(stddev_zeros)
min_valid_stddev = np.min(stddev[stddev_valid])
return np.fmax(stddev, min_valid_stddev)
OpI = operators.TransformIdentity(img.shape)
if stddev is not None:
if error_norm.lower() == "l2b":
img_weight = compute_lsb_weights(stddev)
data_term = solvers.DataFidelity_l2b(img_weight)
elif error_norm.lower() == "l1b":
img_weight = compute_lsb_weights(stddev)
data_term = solvers.DataFidelity_l1b(img_weight)
elif error_norm.lower() == "hub":
img_weight = compute_lsb_weights(stddev)
data_term = solvers.DataFidelity_Huber(img_weight)
elif error_norm.lower() == "wl2":
(img_weight, reg_weight) = compute_wls_weights(stddev, OpI.T, reg_weight)
data_term = solvers.DataFidelity_wl2(img_weight)
else:
raise ValueError('Unknown error method: "%s". Options are: {"l2b"} | "l1b" | "hub" | "wl2"' % error_norm)
else:
data_term = error_norm
if isinstance(axes, int):
axes = (axes,)
reg_wl = solvers.Regularizer_l1swl(reg_weight, "bior4.4", 2, axes=axes, normalized=False)
sol_wls_wl = solvers.CP(verbose=verbose, regularizer=reg_wl, data_term=data_term)
(denoised_img, _) = sol_wls_wl(OpI, img, iterations, x0=img, lower_limit=lower_limit)
return denoised_img
| 37.633166
| 120
| 0.667646
| 1,055
| 7,489
| 4.582938
| 0.225592
| 0.028956
| 0.040538
| 0.019648
| 0.210341
| 0.18242
| 0.136298
| 0.136298
| 0.113961
| 0.113961
| 0
| 0.016388
| 0.217786
| 7,489
| 198
| 121
| 37.823232
| 0.808979
| 0.409
| 0
| 0.190476
| 0
| 0.011905
| 0.034558
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.035714
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efd1c5307f2a5343f619264248d49a40d7ec14ee
| 675
|
py
|
Python
|
84.py
|
gdmanandamohon/leetcode
|
a691a4e37ee1fdad69c710e3710c5faf8b0a7d76
|
[
"MIT"
] | null | null | null |
84.py
|
gdmanandamohon/leetcode
|
a691a4e37ee1fdad69c710e3710c5faf8b0a7d76
|
[
"MIT"
] | null | null | null |
84.py
|
gdmanandamohon/leetcode
|
a691a4e37ee1fdad69c710e3710c5faf8b0a7d76
|
[
"MIT"
] | null | null | null |
'''
@author: l4zyc0d3r
People who are happy makes other happy. I am gonna finish it slowly but definitely.cdt
'''
class Solution:
def largestRectangleArea(self, H: List[int]) -> int:
st, mx, i = [], 0, 0
while i<len(H):
if len(st)==0 or H[st[-1]]<=H[i]:
st.append(i)
i+=1
else:
rb = i
h = H[st.pop()]
lb = st[-1] if len(st) else -1
mx = max(mx, (rb-lb-1)*h)
while len(st):
rb = len(H)
h = H[st.pop()]
lb = st[-1] if len(st) else -1
mx = max(mx, (rb-lb-1)*h)
return mx
| 29.347826
| 86
| 0.422222
| 100
| 675
| 2.85
| 0.4
| 0.070175
| 0.073684
| 0.049123
| 0.259649
| 0.259649
| 0.259649
| 0.259649
| 0.259649
| 0.259649
| 0
| 0.036176
| 0.426667
| 675
| 22
| 87
| 30.681818
| 0.700258
| 0.157037
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efd28e21b75921adf9dd8a8cb27c1319019eacfc
| 402
|
py
|
Python
|
delete_event.py
|
garymcwilliams/py-google-calendar
|
546b412f0ffc1bdc9a81868bddf4de18a0c20899
|
[
"Apache-2.0"
] | null | null | null |
delete_event.py
|
garymcwilliams/py-google-calendar
|
546b412f0ffc1bdc9a81868bddf4de18a0c20899
|
[
"Apache-2.0"
] | 1
|
2021-04-30T20:59:15.000Z
|
2021-04-30T20:59:15.000Z
|
delete_event.py
|
garymcwilliams/py-google-calendar
|
546b412f0ffc1bdc9a81868bddf4de18a0c20899
|
[
"Apache-2.0"
] | null | null | null |
from cal_setup import get_calendar_service
def main():
# Delete the event
service = get_calendar_service()
try:
service.events().delete(
calendarId='primary',
eventId='njdev79d574rdmkv0180t7t7lo',
).execute()
except googleapiclient.errors.HttpError:
print("Failed to delete event")
print("Event deleted")
if __name__ == '__main__':
main()
| 23.647059
| 48
| 0.659204
| 41
| 402
| 6.146341
| 0.707317
| 0.087302
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 0.233831
| 402
| 17
| 49
| 23.647059
| 0.782468
| 0.039801
| 0
| 0
| 0
| 0
| 0.197403
| 0.067532
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.153846
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efd30ee41ca03d2e23b35a990fdeba3358b3d6c7
| 15,351
|
py
|
Python
|
pycdp/asyncio.py
|
HMaker/python-chrome-devtools-protocol
|
a9646a1c4e172ce458c15e2fcb3860ca8c9b4599
|
[
"MIT"
] | null | null | null |
pycdp/asyncio.py
|
HMaker/python-chrome-devtools-protocol
|
a9646a1c4e172ce458c15e2fcb3860ca8c9b4599
|
[
"MIT"
] | null | null | null |
pycdp/asyncio.py
|
HMaker/python-chrome-devtools-protocol
|
a9646a1c4e172ce458c15e2fcb3860ca8c9b4599
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import json
import asyncio
import itertools
import typing as t
from collections import defaultdict
from contextlib import asynccontextmanager
from aiohttp import ClientSession
from aiohttp.client import ClientWebSocketResponse
from aiohttp.http_websocket import WSMsgType, WSCloseCode
from aiohttp.client_exceptions import (
ClientResponseError, ClientConnectorError, ClientConnectionError, ServerDisconnectedError
)
from pycdp.utils import ContextLoggerMixin, LoggerMixin, SingleTaskWorker, retry_on
from pycdp import cdp
T = t.TypeVar('T')
class CDPError(Exception):
pass
class CDPBrowserError(CDPError):
''' This exception is raised when the browser's response to a command
indicates that an error occurred. '''
def __init__(self, obj):
self.code: int = obj['code']
self.message: str = obj['message']
self.detail = obj.get('data')
def __str__(self):
return 'BrowserError<code={} message={}> {}'.format(self.code,
self.message, self.detail)
class CDPConnectionClosed(CDPError):
''' Raised when a public method is called on a closed CDP connection. '''
def __init__(self, reason):
'''
Constructor.
:param reason:
:type reason: wsproto.frame_protocol.CloseReason
'''
self.reason = reason
def __repr__(self):
''' Return representation. '''
return '{}<{}>'.format(self.__class__.__name__, self.reason)
class CDPSessionClosed(CDPError):
pass
class CDPInternalError(CDPError):
''' This exception is only raised when there is faulty logic in TrioCDP or
the integration with PyCDP. '''
class CDPEventListenerClosed(CDPError):
pass
_CLOSE_SENTINEL = object
class CDPEventListener:
def __init__(self, queue: asyncio.Queue):
self._queue = queue
self._closed = False
@property
def closed(self):
return self._closed
def put(self, elem: dict):
if self._closed: raise CDPEventListenerClosed
self._queue.put_nowait(elem)
def close(self):
self._closed = True
try:
self._queue.put_nowait(_CLOSE_SENTINEL)
except asyncio.QueueFull:
pass
async def __aiter__(self):
try:
while not self._closed:
elem = await self._queue.get()
if elem is _CLOSE_SENTINEL:
return
yield elem
finally:
self._closed = True
def __str__(self) -> str:
return f'{self.__class__.__name__}(buffer={self._queue.qsize()}/{self._queue.maxsize}, closed={self._closed})'
class CDPBase(LoggerMixin):
'''
Contains shared functionality between the CDP connection and session.
'''
def __init__(self, ws: ClientWebSocketResponse=None, session_id=None, target_id=None):
super().__init__()
self._listeners: t.Dict[type, t.Set[CDPEventListener]] = defaultdict(set)
self._id_iter = itertools.count()
self._inflight_cmd: t.Dict[int, t.Tuple[t.Generator[dict, dict , t.Any], asyncio.Future]] = {}
self._session_id = session_id
self._target_id = target_id
self._ws = ws
@property
def session_id(self) -> cdp.target.SessionID:
return self._session_id
async def execute(self, cmd: t.Generator[dict, dict , T]) -> T:
'''
Execute a command on the server and wait for the result.
:param cmd: any CDP command
:returns: a CDP result
'''
cmd_id = next(self._id_iter)
cmd_response = asyncio.get_running_loop().create_future()
self._inflight_cmd[cmd_id] = cmd, cmd_response
request = next(cmd)
request['id'] = cmd_id
if self._session_id:
request['sessionId'] = self._session_id
self._logger.debug('sending command %r', request)
request_str = json.dumps(request)
try:
try:
await self._ws.send_str(request_str)
except ConnectionResetError as e:
del self._inflight_cmd[cmd_id]
raise CDPConnectionClosed(e.args[0]) from e
return await cmd_response
except asyncio.CancelledError:
if cmd_id in self._inflight_cmd:
del self._inflight_cmd[cmd_id]
raise
def listen(self, *event_types: t.Type[T], buffer_size=100) -> t.AsyncIterator[T]:
'''Return an async iterator that iterates over events matching the
indicated types.'''
receiver = CDPEventListener(asyncio.Queue(buffer_size))
for event_type in event_types:
self._listeners[event_type].add(receiver)
return receiver.__aiter__()
@asynccontextmanager
async def wait_for(self, event_type: t.Type[T], buffer_size=100) -> t.AsyncGenerator[T, None]:
'''
Wait for an event of the given type and return it.
This is an async context manager, so you should open it inside an async
with block. The block will not exit until the indicated event is
received.
'''
async for event in self.listen(event_type, buffer_size):
yield event
return
def close_listeners(self):
for listener in itertools.chain.from_iterable(self._listeners.values()):
listener.close()
self._listeners.clear()
def _handle_data(self, data):
'''
Handle incoming WebSocket data.
:param dict data: a JSON dictionary
'''
if 'id' in data:
self._handle_cmd_response(data)
else:
self._handle_event(data)
def _handle_cmd_response(self, data):
'''
Handle a response to a command. This will set an event flag that will
return control to the task that called the command.
:param dict data: response as a JSON dictionary
'''
cmd_id = data['id']
try:
cmd, event = self._inflight_cmd.pop(cmd_id)
except KeyError:
self._logger.debug('got a message with a command ID that does not exist: %s', data)
return
if 'error' in data:
# If the server reported an error, convert it to an exception and do
# not process the response any further.
event.set_exception(CDPBrowserError(data['error']))
else:
# Otherwise, continue the generator to parse the JSON result
# into a CDP object.
try:
cmd.send(data['result'])
event.set_exception(CDPInternalError("the command's generator function did not exit when expected!"))
except StopIteration as e:
event.set_result(e.value)
def _handle_event(self, data):
'''
Handle an event.
:param dict data: event as a JSON dictionary
'''
event = cdp.util.parse_json_event(data)
self._logger.debug('dispatching event %s', event)
to_remove = set()
for listener in self._listeners[type(event)]:
try:
listener.put(event)
except asyncio.QueueFull:
self._logger.warning('event %s dropped because listener %s queue is full', type(event), listener)
except CDPEventListenerClosed:
to_remove.add(listener)
self._listeners[type(event)] -= to_remove
self._logger.debug('event dispatched')
class CDPConnection(CDPBase, SingleTaskWorker):
'''
Contains the connection state for a Chrome DevTools Protocol server.
CDP can multiplex multiple "sessions" over a single connection. This class
corresponds to the "root" session, i.e. the implicitly created session that
has no session ID. This class is responsible for reading incoming WebSocket
messages and forwarding them to the corresponding session, as well as
handling messages targeted at the root session itself.
You should generally call the :func:`open_cdp()` instead of
instantiating this class directly.
'''
def __init__(self, debugging_url: str, http_client: ClientSession):
super().__init__()
self._debugging_url = debugging_url.rstrip('/')
self._http_client = http_client
self._wsurl: str = None
self._ws_context = None
self._sessions: t.Dict[str, CDPSession] = {}
@property
def closed(self) -> bool:
return self._ws.closed
@property
def had_normal_closure(self) -> bool:
return self._ws.close_code == WSCloseCode.OK
@retry_on(
ClientConnectorError, asyncio.TimeoutError,
retries=10, delay=3.0, delay_growth=1.3, log_errors=True
)
async def connect(self):
if self._ws is not None: raise RuntimeError('already connected')
if self._wsurl is None:
if self._debugging_url.startswith('http://'):
async with self._http_client.get(f'{self._debugging_url}/json/version') as resp:
if resp.status != 200:
raise ClientResponseError(
resp.request_info,
resp.history,
status=resp.status,
message=resp.reason,
headers=resp.headers
)
self._wsurl = (await resp.json())['webSocketDebuggerUrl']
elif self._debugging_url.startswith('ws://'):
self._wsurl = self._debugging_url
else:
raise ValueError('bad debugging URL scheme')
self._ws = await self._http_client.ws_connect(self._wsurl, compress=15, autoping=True, autoclose=True).__aenter__()
def add_session(self, session_id: str, target_id: str) -> CDPSession:
if session_id is self._sessions:
return self._sessions[session_id]
session = CDPSession(self._ws, session_id, target_id)
self._sessions[session_id] = session
return session
def remove_session(self, session_id: str):
if session_id in self._sessions:
self._sessions.pop(session_id).close()
async def connect_session(self, target_id: cdp.target.TargetID) -> 'CDPSession':
'''
Returns a new :class:`CDPSession` connected to the specified target.
'''
session_id = await self.execute(cdp.target.attach_to_target(target_id, True))
session = CDPSession(self._ws, session_id, target_id)
self._sessions[session_id] = session
return session
async def _run(self):
while True:
message = await self._ws.receive()
if message.type == WSMsgType.TEXT:
try:
data = json.loads(message.data)
except json.JSONDecodeError:
raise CDPBrowserError({
'code': -32700,
'message': 'Client received invalid JSON',
'data': message
})
if 'sessionId' in data:
session_id = cdp.target.SessionID(data['sessionId'])
try:
session = self._sessions[session_id]
except KeyError:
self._logger.debug(f'received message for unknown session: {data}')
continue
session._handle_data(data)
else:
self._handle_data(data)
elif message.type == WSMsgType.CLOSE or message.type == WSMsgType.CLOSING or message.type == WSMsgType.CLOSED:
return
elif message.type == WSMsgType.ERROR:
raise message.data
else:
await self._ws.close(code=WSCloseCode.UNSUPPORTED_DATA)
raise CDPConnectionClosed('received non text frame from remote peer')
async def _close(self):
try:
await super()._close()
for session in self._sessions.values():
session.close()
self._sessions.clear()
self.close_listeners()
if self._ws is not None and not self._ws.closed:
await self._ws.close()
finally:
await self._http_client.close()
class CDPSession(CDPBase, ContextLoggerMixin):
'''
Contains the state for a CDP session.
Generally you should not instantiate this object yourself; you should call
:meth:`CdpConnection.open_session`.
'''
def __init__(self, ws: ClientWebSocketResponse, session_id: cdp.target.SessionID, target_id: cdp.target.TargetID):
super().__init__(ws, session_id, target_id)
self._dom_enable_count = 0
self._dom_enable_lock = asyncio.Lock()
self._page_enable_count = 0
self._page_enable_lock = asyncio.Lock()
self.set_logger_context(extra_name=session_id)
@asynccontextmanager
async def dom_enable(self):
'''
A context manager that executes ``dom.enable()`` when it enters and then
calls ``dom.disable()``.
This keeps track of concurrent callers and only disables DOM events when
all callers have exited.
'''
async with self._dom_enable_lock:
self._dom_enable_count += 1
if self._dom_enable_count == 1:
await self.execute(cdp.dom.enable())
yield
async with self._dom_enable_lock:
self._dom_enable_count -= 1
if self._dom_enable_count == 0:
await self.execute(cdp.dom.disable())
@asynccontextmanager
async def page_enable(self):
'''
A context manager that executes ``page.enable()`` when it enters and
then calls ``page.disable()`` when it exits.
This keeps track of concurrent callers and only disables page events
when all callers have exited.
'''
async with self._page_enable_lock:
self._page_enable_count += 1
if self._page_enable_count == 1:
await self.execute(cdp.page.enable())
yield
async with self._page_enable_lock:
self._page_enable_count -= 1
if self._page_enable_count == 0:
await self.execute(cdp.page.disable())
def close(self):
if len(self._inflight_cmd) > 0:
exc = CDPSessionClosed()
for (_, event) in self._inflight_cmd.values():
if not event.done():
event.set_exception(exc)
self._inflight_cmd.clear()
self.close_listeners()
@retry_on(ClientConnectionError, ServerDisconnectedError, retries=10, delay=3.0, delay_growth=1.3, log_errors=True)
async def connect_cdp(url: str) -> CDPConnection:
'''
Connect to the browser specified by debugging ``url``.
This connection is not automatically closed! You can either use the connection
object as a context manager (``async with conn:``) or else call ``await
conn.aclose()`` on it when you are done with it.
'''
http = ClientSession()
cdp_conn = CDPConnection(url, http)
try:
await cdp_conn.connect()
cdp_conn.start()
except:
await http.close()
raise
return cdp_conn
| 35.7
| 123
| 0.614162
| 1,779
| 15,351
| 5.093311
| 0.205734
| 0.023838
| 0.014899
| 0.010484
| 0.181547
| 0.13387
| 0.111246
| 0.077033
| 0.077033
| 0.059817
| 0
| 0.003712
| 0.297961
| 15,351
| 429
| 124
| 35.783217
| 0.83706
| 0.109048
| 0
| 0.214789
| 0
| 0.003521
| 0.055221
| 0.010962
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080986
| false
| 0.014085
| 0.045775
| 0.021127
| 0.221831
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efd3aea1c3cf0426d8d1f43ef851162a882e6a5f
| 7,680
|
py
|
Python
|
src/manager/om/script/gspylib/inspection/items/cluster/CheckSpecialFile.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | 1
|
2020-06-30T15:00:50.000Z
|
2020-06-30T15:00:50.000Z
|
src/manager/om/script/gspylib/inspection/items/cluster/CheckSpecialFile.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | null | null | null |
src/manager/om/script/gspylib/inspection/items/cluster/CheckSpecialFile.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms
# and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# ----------------------------------------------------------------------------
import os
import subprocess
from multiprocessing.dummy import Pool as ThreadPool
from gspylib.common.Common import DefaultValue
from gspylib.inspection.common.CheckItem import BaseItem
from gspylib.inspection.common.CheckResult import ResultStatus
from gspylib.os.gsfile import g_file
class CheckSpecialFile(BaseItem):
def __init__(self):
super(CheckSpecialFile, self).__init__(self.__class__.__name__)
def getDiskPath(self):
nodeDirs = []
# get PGHOST Dir
tmpDir = DefaultValue.getEnv("PGHOST")
nodeDirs.append(tmpDir)
# get gphome dir
gphome_path = DefaultValue.getEnv("GPHOME")
nodeDirs.append(gphome_path)
# get log dir
log_path = DefaultValue.getEnv("GAUSSLOG")
nodeDirs.append(log_path)
# get gausshome dir
gausshome_path = DefaultValue.getEnv("GAUSSHOME")
nodeDirs.append(os.path.realpath(gausshome_path))
hostName = DefaultValue.GetHostIpOrName()
dbNode = self.cluster.getDbNodeByName(hostName)
# including dn
for dbInst in dbNode.datanodes:
nodeDirs.append(dbInst.datadir)
return nodeDirs
def checkPathVaild(self, envValue):
"""
function: check path vaild
input : envValue
output: NA
"""
if (envValue.strip() == ""):
return 0
# check path vaild
for rac in DefaultValue.PATH_CHECK_LIST:
flag = envValue.find(rac)
if flag >= 0:
return 1
return 0
def ignorePath(self, path):
# Part of the root path and file permissions need to be ignored
ignorePathList = []
toolPath = DefaultValue.getEnv("GPHOME")
sudoPath = os.path.join(toolPath, "sudo")
inspectionPath = os.path.join(toolPath, "script/inspection")
ignorePathList.append("%s/script/gs_preinstall" % toolPath)
ignorePathList.append("%s/script/gs_postuninstall" % toolPath)
ignorePathList.append("%s/script/gs_checkos" % toolPath)
scriptPath = os.path.join(toolPath, "script")
scriptDirList = scriptPath.split('/')
inspectionDirList = inspectionPath.split('/')
# ignore own special files
if (path in ignorePathList or os.path.dirname(path) == sudoPath):
return True
else:
(filename, suffix) = os.path.splitext(path)
pathDirList = path.split('/')
# ignore .pyc file in GPHOME/script
if (path.find(scriptPath) == 0 and pathDirList[:len(
scriptDirList)] == scriptDirList and suffix == ".pyc"):
return True
# ignore GPHOME/script/inspection dir
elif (path.find(inspectionPath) == 0 and pathDirList[:len(
inspectionDirList)] == inspectionDirList):
return True
else:
return False
def checkSpecialChar(self):
outputList = []
failList = []
pathList = []
paths = self.getDiskPath()
for path in paths:
if (not path or not os.path.isdir(path)):
continue
else:
pathList.append(path)
pool = ThreadPool(DefaultValue.getCpuSet())
results = pool.map(self.checkSingleSpecialChar, pathList)
pool.close()
pool.join()
for outlist, flist in results:
if (outlist):
outputList.extend(outlist)
if (flist):
failList.extend(flist)
if (len(outputList) > 0):
outputList = DefaultValue.Deduplication(outputList)
if (failList):
failList = DefaultValue.Deduplication(failList)
return outputList, failList
def checkSingleSpecialChar(self, path):
# Check a single path
outputList = []
failList = []
cmd = "find '%s' -name '*'" % path
(status, output) = subprocess.getstatusoutput(cmd)
FileList = output.split('\n')
while '' in FileList:
FileList.remove('')
if (status != 0 and output.find("Permission denied") > 0):
for realPath in FileList:
if (realPath.find("Permission denied") > 0):
failList.append(realPath)
elif (self.checkPathVaild(realPath) != 0):
outputList.append(realPath)
else:
for realPath in FileList:
if (self.checkPathVaild(realPath) != 0):
outputList.append(realPath)
return outputList, failList
#########################################################
# get the files which under the all useful directory and
# its owner is not current execute use
#########################################################
def checkErrorOwner(self, ownername):
outputList = []
failList = []
path = ""
for path in self.getDiskPath():
if (not path or not os.path.isdir(path)):
continue
cmd = "find '%s' -iname '*' ! -user %s -print" % (path, ownername)
(status, output) = subprocess.getstatusoutput(cmd)
if (status == 0 and output != ""):
pathList = output.split("\n")
for path in pathList:
if (self.ignorePath(path)):
continue
outputList.append(path)
elif (output.find("Permission denied") > 0):
pathList = output.split("\n")
for path in pathList:
if (path.find("Permission denied") > 0):
failList.append(path)
continue
if (self.ignorePath(path)):
continue
outputList.append(path)
if (len(outputList) > 0):
outputList = DefaultValue.Deduplication(outputList)
return outputList, failList
def doCheck(self):
parRes = ""
flag = 0
output = ""
outputList, failList = self.checkSpecialChar()
for output in outputList:
if (output != ""):
flag = 1
parRes += "\nSpecial characters file: \"%s\"" % output
outputList, errorList = self.checkErrorOwner(self.user)
for output in outputList:
if (output != ""):
flag = 1
parRes += "\nFile owner should be %s." \
" Incorrect owner file: \"%s\"" \
% (self.user, output)
failList.extend(errorList)
if (failList):
flag = 1
failList = DefaultValue.Deduplication(failList)
parRes += "\n%s" % ("\n".join(failList))
if (flag == 1):
self.result.rst = ResultStatus.NG
self.result.val = parRes
else:
self.result.rst = ResultStatus.OK
self.result.val = "All files are normal."
| 37.101449
| 78
| 0.552344
| 745
| 7,680
| 5.656376
| 0.303356
| 0.011391
| 0.009492
| 0.019934
| 0.232795
| 0.164689
| 0.130517
| 0.106312
| 0.055055
| 0.017561
| 0
| 0.005995
| 0.326693
| 7,680
| 206
| 79
| 37.281553
| 0.808934
| 0.132422
| 0
| 0.357143
| 0
| 0
| 0.057707
| 0.007561
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051948
| false
| 0
| 0.045455
| 0
| 0.175325
| 0.006494
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efd3f9d1de68654dbc76d3fbfef70bcad64b263b
| 585
|
py
|
Python
|
main/methods/analysis.py
|
hannxiao/autotrade2
|
8e6f3d463334b6ea8a18074de58e25c0dab93f39
|
[
"MIT"
] | null | null | null |
main/methods/analysis.py
|
hannxiao/autotrade2
|
8e6f3d463334b6ea8a18074de58e25c0dab93f39
|
[
"MIT"
] | 6
|
2020-06-06T01:05:02.000Z
|
2021-12-13T20:42:16.000Z
|
main/methods/analysis.py
|
hannxiao/autotrade
|
8e6f3d463334b6ea8a18074de58e25c0dab93f39
|
[
"MIT"
] | null | null | null |
from . import toolFuncs
def DefineTrend(data, K):
'''
Filter all the trend whose range less than K%
'''
pairs = list(zip(data['Date'], data['Close']))
is_extreme = toolFuncs.extreme_point(data['Close'], K, recognition_method='height')
output = [pairs[i] for i in range(len(is_extreme)) if is_extreme[i]]
return {'DefineTrend': {'name': 'Trend', 'data': output, 'position': 'main', 'type': 'line',
'lineStyle': {'normal': {'width': 3}, 'showSymbol':False}
}
}
| 32.5
| 98
| 0.529915
| 64
| 585
| 4.765625
| 0.6875
| 0.088525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002463
| 0.305983
| 585
| 17
| 99
| 34.411765
| 0.748768
| 0.076923
| 0
| 0
| 0
| 0
| 0.179732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efd40da6f7f764459934c721ccc5ec880311c2e3
| 607
|
py
|
Python
|
FaceClassify/losses/TripletMarginLoss.py
|
CharlesPikachu/CharlesFace
|
90bfe38c58068228d0069dce43b55b2570acaa16
|
[
"MIT"
] | 13
|
2018-05-23T07:07:28.000Z
|
2021-05-28T07:37:30.000Z
|
FaceClassify/losses/TripletMarginLoss.py
|
CharlesPikachu/CharlesFace
|
90bfe38c58068228d0069dce43b55b2570acaa16
|
[
"MIT"
] | null | null | null |
FaceClassify/losses/TripletMarginLoss.py
|
CharlesPikachu/CharlesFace
|
90bfe38c58068228d0069dce43b55b2570acaa16
|
[
"MIT"
] | null | null | null |
# Author:
# Charles
# Function:
# Triplet loss function.
import torch
from torch.autograd import Function
import sys
sys.path.append('../')
from utils.utils import *
class TripletMarginLoss(Function):
def __init__(self, margin):
super(TripletMarginLoss, self).__init__()
self.margin = margin
# norm 2
self.pdist = PairwiseDistance(2)
def forward(self, anchor, positive, negative):
dis_apos = self.pdist.forward(anchor, positive)
dis_aneg = self.pdist.forward(anchor, negative)
dist_hinge = torch.clamp(self.margin+dis_apos-dis_aneg, min=0.0)
loss = torch.mean(dist_hinge)
return loss
| 26.391304
| 66
| 0.744646
| 82
| 607
| 5.341463
| 0.463415
| 0.068493
| 0.063927
| 0.100457
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007648
| 0.138386
| 607
| 23
| 67
| 26.391304
| 0.829828
| 0.093904
| 0
| 0
| 0
| 0
| 0.005505
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efd461c9230c324e2c8e6e92be4631dc26caa578
| 768
|
py
|
Python
|
DailyProgrammer/20120316A.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | 2
|
2020-12-23T18:59:22.000Z
|
2021-04-14T13:16:09.000Z
|
DailyProgrammer/20120316A.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
DailyProgrammer/20120316A.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
"""
you have a string "ddaaiillyypprrooggrraammeerr". We want to remove all the consecutive duplicates and put them in a
separate string, which yields two separate instances of the string "dailyprogramer".
use this list for testing:
input: "balloons"
expected output: "balons" "lo"
input: "ddaaiillyypprrooggrraammeerr"
expected output: "dailyprogramer" "dailyprogramer"
input: "aabbccddeded"
expected output: "abcdeded" "abcd"
input: "flabby aapples"
expected output: "flaby aples" "bap"
"""
inp = "ddaaiillyypprrooggrraammeerr"
org = ""
extra = ""
hold = ""
for a in range(len(inp)):
if hold == inp[a]:
extra += inp[a]
else:
org += inp[a]
hold = inp[a]
print("original:\t", inp)
print("first:\t\t", org)
print("repeats:\t", extra)
| 25.6
| 116
| 0.69401
| 98
| 768
| 5.438776
| 0.571429
| 0.105066
| 0.030019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173177
| 768
| 30
| 117
| 25.6
| 0.83937
| 0.63151
| 0
| 0
| 0
| 0
| 0.213768
| 0.101449
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.230769
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efd60ec0f5dfed774930cf3e30f7572bed405c2b
| 6,485
|
py
|
Python
|
src/preppipe/enginesupport/enginesupport.py
|
PrepPipe/preppipe-python
|
6fc547a539737ec37a7528eb97ce92e56d4f404a
|
[
"Apache-2.0"
] | 1
|
2022-02-28T03:34:57.000Z
|
2022-02-28T03:34:57.000Z
|
src/preppipe/enginesupport/enginesupport.py
|
PrepPipe/preppipe-python
|
6fc547a539737ec37a7528eb97ce92e56d4f404a
|
[
"Apache-2.0"
] | null | null | null |
src/preppipe/enginesupport/enginesupport.py
|
PrepPipe/preppipe-python
|
6fc547a539737ec37a7528eb97ce92e56d4f404a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import typing
import PIL.Image
from enum import Enum
import re
import preppipe.commontypes
from preppipe.vnmodel import *
class EngineSupport:
"""All engine support classes inherit this class, so that we can use reflection to query all supported engines"""
pass
# we define an MIR infrastructure for backend... Engine Model (EM)
class EMInstruction:
# abstract opcode data
opcode : typing.Any
# list of operands
operand_list : typing.List[typing.Any] = []
def __init__(self, opcode, operand_list : typing.List[typing.Any] = []) -> None :
self.opcode = opcode
if len(operand_list) == 0:
self.operand_list = []
else:
self.operand_list = operand_list
def set_operand_list(self, operand_list : typing.List[typing.Any]) -> None:
self.operand_list = operand_list
def add_operand(self, operand : typing.Any) -> None:
self.operand_list.append(operand)
def get_num_operands(self):
return len(self.operand_list)
def get_operand(self, index : int) -> typing.Any:
return self.operand_list[index]
def get_opcode(self) -> typing.Any:
return self.opcode
def get_operand_dict(self, arglist : typing.List[str]) -> typing.Dict[str, typing.Any]:
assert(len(arglist) == len(self.operand_list))
result : typing.Dict[str, typing.Any] = {}
for i in range(0, len(self.operand_list)):
result[arglist[i]] = self.operand_list[i]
return result
class EMBasicBlock:
label : str = ""
instr_list : typing.List[EMInstruction] = []
def __init__(self, label : str = "") -> None :
self.label = label
self.instr_list = []
def add_instruction(self, instr : EMInstruction) -> EMInstruction:
self.instr_list.append(instr)
return instr
def get_instruction_list(self) -> typing.List[EMInstruction]:
return self.instr_list
def get_label(self) -> str:
return self.label
class EMFunction:
"""It is fine if left unused; not all engines support functions"""
basicblock_list : typing.List[typing.Any] = []
def __init__(self) -> None :
self.basicblock_list = []
def add_basicblock(self, bb : typing.Any):
self.basicblock_list.append(bb)
return bb
# helper functions
def _get_label_name(name : str, type_prefix : str, scope_prefix: str, name_dict : typing.Dict[str, typing.Any], prefix : str = "") -> str:
# get the base name
base_label = re.sub(r'[^a-zA-Z0-9_]', '', name.replace(" ", "_"))
# ensure the name does not start with number or underscore, or is not empty
if len(base_label) > 0:
frontchar = base_label[0]
if frontchar == '_' or frontchar.isnumeric():
base_label = type_prefix + "_" + base_label
else:
# we have no alphanumetic characters
base_label = type_prefix + "_anon"
# make sure it is unique
# we may have duplicates
# try to add scope prefix to resolve this
if prefix + base_label in name_dict and len(scope_prefix) > 0:
base_label = scope_prefix + "_" + base_label
# now add the prefix; we no longer add prefix to base label
if len(prefix) > 0:
base_label = prefix + base_label
# if not working, add a numeric suffix
numeric_suffix = 0
result = base_label
while result in name_dict:
numeric_suffix += 1
result = base_label + '_' + str(numeric_suffix)
# done
return result
def label_branch_targets(model : VNModel, reserved_set : typing.Set[str] = [], include_basicblock : bool = True) -> typing.Dict[VNValue, str]:
"""Assign all functions (and optionally basic blocks) with a label that is:
1. alphanumeric, non-empty
2. does not start with underscore '_'
3. unique across all functions and basic blocks
We may need this labeling even when functions already has no duplicated label so avoid sanitization issue or reserved keywords
"""
name_dict = {} # label -> element (used internally)
elem_dict = {} # element -> label (for returning)
# add all reserved keywords to name_dict
for reserved in reserved_set:
assert isinstance(reserved, str)
name_dict[reserved] = None
# actual work
for func in model.get_function_list():
func_label = _get_label_name(func.get_name(), "control_label", "", name_dict)
name_dict[func_label] = func
elem_dict[func] = func_label
if include_basicblock:
for bb in func.get_basicblock_list():
bbname = bb.get_name()
if len(bbname) == 0 and bb is func.get_entry_block():
bbname = "entry"
bb_label = _get_label_name(bbname, "control_label", func_label, name_dict)
name_dict[bb_label] = bb
elem_dict[bb] = bb_label
return elem_dict
def label_basicblocks(func : VNFunction, reserved_set : typing.Set[str] = []) -> typing.Dict[VNBasicBlock, str]:
"""Assign labels to basic blocks with the same criteria as label_branch_targets:
1. alphanumeric, non-empty
2. does not start with underscore '_'
3. unique
"""
name_dict = {} # label -> element (used internally)
elem_dict = {} # element -> label (for returning)
# add all reserved keywords to name_dict
for reserved in reserved_set:
assert isinstance(reserved, str)
name_dict[reserved]= None
for bb in func.get_basicblock_list():
bbname = bb.get_name()
if len(bbname) == 0 and bb is func.get_entry_block():
bbname = "entry"
bb_label = _get_label_name(bbname, "label", "", name_dict, ".")
name_dict[bb_label] = bb
elem_dict[bb] = bb_label
return elem_dict
def label_sayer_identity(model : VNModel, reserved_set : typing.Set[str] = []) -> typing.Dict[str, str]:
"""make sure all characters and sayers have (alphanumeric) labels"""
name_dict = {}
elem_dict = {}
for reserved in reserved_set:
assert isinstance(reserved, str)
name_dict[reserved] = None
for character in model.get_character_list():
name = _get_label_name(character.get_name(), "character", "", name_dict)
name_dict[name] = character
elem_dict[character] = name
for sayer in model.get_sayer_list():
character = sayer.get_identity()
character_label = elem_dict[character]
name = _get_label_name(character_label + sayer.get_name(), "sayer", "", name_dict)
name_dict[name] = sayer
elem_dict[sayer] = name
return elem_dict
| 32.918782
| 143
| 0.665998
| 876
| 6,485
| 4.726027
| 0.211187
| 0.04058
| 0.036232
| 0.019324
| 0.353865
| 0.299517
| 0.278019
| 0.250725
| 0.215942
| 0.215942
| 0
| 0.003808
| 0.230686
| 6,485
| 197
| 144
| 32.918782
| 0.826017
| 0.212799
| 0
| 0.297521
| 0
| 0
| 0.016529
| 0
| 0
| 0
| 0
| 0
| 0.033058
| 1
| 0.140496
| false
| 0.008264
| 0.049587
| 0.041322
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efd8cec6101a750931dee27419124950274496b7
| 3,422
|
py
|
Python
|
upload.py
|
woodlords/nftmaker-pro-scripts
|
86e1eef0d297bf9589d56272b1edea9bb3e18612
|
[
"Apache-2.0"
] | 2
|
2022-02-09T17:48:33.000Z
|
2022-02-12T08:18:42.000Z
|
upload.py
|
woodlords/nftmaker-pro-scripts
|
86e1eef0d297bf9589d56272b1edea9bb3e18612
|
[
"Apache-2.0"
] | null | null | null |
upload.py
|
woodlords/nftmaker-pro-scripts
|
86e1eef0d297bf9589d56272b1edea9bb3e18612
|
[
"Apache-2.0"
] | null | null | null |
from pprint import pprint
import requests
import base64
import json
import argparse
import sys
p = argparse.ArgumentParser(description="New")
p.add_argument('-f','--folder-name', required=True, help='Folder name of the images/metadata files')
p.add_argument('-s','--start', required=False, help='Start ID to upload')
p.add_argument('-e','--end', required=False, help='End number for IDs to upload')
p.add_argument('--ids', nargs="+", required=False, help='List of local IDs to upload')
if len(sys.argv)==1:
p.print_help(sys.stderr)
sys.exit(1)
args = p.parse_args()
# Some variables you will need
api_key = "api_key_from_nftmakerpro"
nft_project_id = "12345"
upload_url = f'https://api.nft-maker.io/UploadNft/{api_key}/{nft_project_id}'
prefixName="WoodCastleProject"
prefixDispalyName="Wood Castle: Wood Lords S1 " # Leave a space at the end as we will add the #number of token at the end.
projectDescription="Wood Castle Studios Presents Woods Lords: Season One"
# Lord details
folder_name = args.folder_name
ids_list = args.ids
def convert_image_to_base64(image_file):
with open(image_file, 'rb') as binary_file:
binary_file_data = binary_file.read()
base64_encoded_data = base64.b64encode(binary_file_data)
base64_message = base64_encoded_data.decode('utf-8')
return base64_message
# See example Metadata file to use for adding metadata
def gen_api_metadata(metadata_json_file):
api_metadata = 'api_' + metadata_json_file
with open(metadata_json_file, 'r') as fd:
myjson = json.load(fd)
data = []
for k,v in myjson.items():
d = { }
d['name'] = k
d['value'] = v
data.append(d)
return data
def gen_metadata(assetName):
metadata_file = "images/" + folder_name + '/' + assetName + '.json'
image_file = "images/" + folder_name + '/' + assetName + '.jpg'
base64_message = convert_image_to_base64(image_file)
api_metadata = gen_api_metadata(metadata_file)
params = {
"assetName": prefixName+assetName, # If you set up a prefix in your project, you omit the prefix here, if not add prefix as well
"previewImageNft": {
"mimetype": "image/jpeg",
"displayname": prefixDispalyName + "#" + assetName,
"fileFromBase64": base64_message,
"description": projectDescription,
"metadataPlaceholder": api_metadata
}
}
return params
def upload_image(data):
try:
r = requests.post(upload_url, json=data)
print(r.json())
except:
print(str(i) + ' : FAILED!')
def upload_set(startCount, endCount):
# Names of the images/metadata files
for i in range(startCount, endCount+1):
if(i < 10):
assetName = '000' + str(i)
elif(i < 100):
assetName = '00' + str(i)
elif(i < 1000):
assetName = '0' + str(i)
else:
assetName = str(i)
print(f'INFO: Working on asset {prefixName+assetName}')
data = gen_metadata(assetName)
upload_image(data)
def main():
# Iterate through list of IDs and upload them
if args.ids:
for i in args.ids:
startCount = int(i)
endCount = int(i)
upload_set(startCount,endCount)
else:
startCount = int(args.start)
endCount = int(args.end)
upload_set(startCount,endCount)
main()
| 30.553571
| 136
| 0.648159
| 453
| 3,422
| 4.743929
| 0.362031
| 0.02792
| 0.022336
| 0.037692
| 0.094928
| 0.026989
| 0
| 0
| 0
| 0
| 0
| 0.018738
| 0.235827
| 3,422
| 111
| 137
| 30.828829
| 0.803059
| 0.09848
| 0
| 0.046512
| 0
| 0
| 0.176911
| 0.014959
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.069767
| 0
| 0.174419
| 0.05814
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efdf05259aeb476a54f281ec506c8577fe42f662
| 17,015
|
py
|
Python
|
app/common/helper.py
|
lguobin/KB_API
|
f7180cf430cb8de2eac8fa78e3937666da950c7a
|
[
"Apache-2.0"
] | null | null | null |
app/common/helper.py
|
lguobin/KB_API
|
f7180cf430cb8de2eac8fa78e3937666da950c7a
|
[
"Apache-2.0"
] | null | null | null |
app/common/helper.py
|
lguobin/KB_API
|
f7180cf430cb8de2eac8fa78e3937666da950c7a
|
[
"Apache-2.0"
] | null | null | null |
# from app.common.utils import *
from sqlalchemy import desc
from settings import Config
from app.models import *
from app.extensions import db
from app.models.base import _BaseModel
from app.common.message import DBError
# 获取分页数据
def Pages(_request, _TABLE, _filter=None):
page = get_page_value(_request)
per_page = get_per_page_value(_request, Config.PER_PAGE, Config.MAX_PER_PAGE)
paging = get_query_data(_request, "paging", 1)
filter_params = _filter
if bool(int(paging)):
pagination = get_models_filter_with_pagination(_TABLE, "", page, per_page, desc, *filter_params)
total = pagination['total']
models = pagination['models']
data = [model.get_json() for model in models]
return {
'status': 'ok',
'total': total,
'page': page,
'pages': get_pages(total, per_page),
'per_page': per_page,
'results': data
}
def input_files(pid, *row):
# 批量导入接口测试用例,不存在就创建
_interss = get_model_by(Interfaces, name=row[0])
if _interss != None:
_input = {
"name": row[1],
"description": "导入用例__" + str(row[2]),
"pid": pid,
"Iid": _interss.object_id,
"route": row[5],
"headers": row[6],
"requestMethod": row[7],
"requestBody": row[8],
"parameterType": row[9],
"setGlobalVars": eval(row[10]),
"checkoptions": None,
"checkSpendSeconds": row[12],
"checkResponseBody": eval(row[13]),
"checkResponseCode": row[14],
"uid": row[-1],
}
if row[11] == "Y" or row[11] == "True":
_input["checkoptions"] = True
else:
_input["checkoptions"] = False
create_model(TestCase, **_input)
else:
require_items = {
"pid": pid,
"uid": row[-1],
"name": row[0],
"route": row[5],
"headers": row[6],
"requestMethod": row[7],
"i_type": "HTTP",
"description": "导入用例__" + str(row[2])
}
_model = create_model(Interfaces, **require_items)
_input = {
"name": row[1],
"description": "导入用例__" + str(row[2]),
"pid": pid,
"Iid": _model.object_id,
"route": row[5],
"headers": row[6],
"requestMethod": row[7],
"requestBody": row[8],
"parameterType": row[9],
"setGlobalVars": row[10],
"checkoptions": None,
"checkSpendSeconds": row[12],
"checkResponseBody": row[13],
"checkResponseCode": row[14],
"uid": row[-1],
}
if row[11] == "Y" or row[11] == "True":
_input["checkoptions"] = True
else:
_input["checkoptions"] = False
create_model(TestCase, **_input)
return True
def get_Env(test_env_id):
# 获取环境变量信息
Temp_env_list = get_model(EnvConfig, test_env_id)
if Temp_env_list != None:
if Temp_env_list.domain == "" or Temp_env_list.domain == None:
return {'status': 'failed', 'data': '环境配置存在异常, 请前往环境设置检查'}
_env_list = [
Temp_env_list.object_id,
Temp_env_list.name,
Temp_env_list.domain,
Temp_env_list.redis,
Temp_env_list.mysql,
]
return _env_list
else:
return None
def composeCaseWorkshop(EnvId, ProjectId=None, Interface=None, Tcase=None):
if EnvId != None:
_EnvList = get_Env(EnvId)
if _EnvList == None:
return None
_CASE = []
if ProjectId != None:
_Pro_object_id = get_models(Project, object_id=ProjectId)
if _Pro_object_id != []:
__case = get_models(TestCase, pid=_Pro_object_id[0].object_id)
for x in range(len(__case)):
if __case[x].route:
reqs = {
"EnvId": _EnvList[0],
"EnvName": _EnvList[1],
"route": _EnvList[2] + __case[x].route,
"redis": _EnvList[3],
"mysql": _EnvList[4],
"name": __case[x].name,
"Project_id": __case[x].pid,
"Interface_id": __case[x].Iid,
"object_id": __case[x].object_id,
"Method": __case[x].requestMethod,
"Body": __case[x].requestBody,
"Headers": __case[x].headers,
"parameterType": __case[x].parameterType,
"filePath": __case[x].filePath,
"setGlobalVars": __case[x].setGlobalVars,
"checkoptions": __case[x].checkoptions,
"checkSpendSeconds": __case[x].checkSpendSeconds,
"checkResponseCode": __case[x].checkResponseCode,
"checkResponseBody": __case[x].checkResponseBody,
"checkResponseNumber": __case[x].checkResponseNumber,
}
_CASE.append(reqs)
else:
return None
elif Interface != None and Interface != []:
for index in range(len(Interface)):
_Inter_object_id = get_models(Interfaces, object_id=Interface[index])
if _Inter_object_id != []:
__case = get_models(TestCase, Iid=_Inter_object_id[0].object_id)
for x in range(len(__case)):
if __case[x].route:
reqs = {
"EnvId": _EnvList[0],
"EnvName": _EnvList[1],
"route": _EnvList[2] + __case[x].route,
"redis": _EnvList[3],
"mysql": _EnvList[4],
"name": __case[x].name,
"Project_id": __case[x].pid,
"Interface_id": __case[x].Iid,
"object_id": __case[x].object_id,
"Method": __case[x].requestMethod,
"Body": __case[x].requestBody,
"Headers": __case[x].headers,
"parameterType": __case[x].parameterType,
"filePath": __case[x].filePath,
"setGlobalVars": __case[x].setGlobalVars,
"checkoptions": __case[x].checkoptions,
"checkSpendSeconds": __case[x].checkSpendSeconds,
"checkResponseCode": __case[x].checkResponseCode,
"checkResponseBody": __case[x].checkResponseBody,
"checkResponseNumber": __case[x].checkResponseNumber,
}
_CASE.append(reqs)
else:
return None
elif Tcase != None and Tcase != []:
id_list = []
for case in Tcase:
_obj_id = case
if _obj_id in id_list:
Tcase.remove(case)
else:
# 判断 Id 是否有效
_temp = get_model(TestCase, object_id=_obj_id)
if _temp != None:
reqs = {
"EnvId": _EnvList[0],
"EnvName": _EnvList[1],
"route": _EnvList[2] + _temp.route,
"redis": _EnvList[3],
"mysql": _EnvList[4],
"name": _temp.name,
"Project_id": _temp.pid,
"Interface_id": _temp.Iid,
"object_id": _temp.object_id,
"Method": _temp.requestMethod,
"Body": _temp.requestBody,
"Headers": _temp.headers,
"parameterType": _temp.parameterType,
"filePath": _temp.filePath,
"setGlobalVars": _temp.setGlobalVars,
"checkoptions": _temp.checkoptions,
"checkSpendSeconds": _temp.checkSpendSeconds,
"checkResponseCode": _temp.checkResponseCode,
"checkResponseBody": _temp.checkResponseBody,
"checkResponseNumber": _temp.checkResponseNumber,
}
_CASE.append(reqs)
else:
pass
# print(7777777777777777777777777777)
# print(_CASE)
return _CASE
else:
return None
def single_Save_response(_response, object_id):
from app import app
with app.app_context():
_model = get_model(TestCase, object_id)
_model.responseBody = str(_response)
update_models(_model)
print("异步保存数据")
def save_TestReport(_response):
from app import app
with app.app_context():
_model = create_model(TestReport, **_response)
return {"object_id": _model.object_id}
def get_TestReport(_model):
from app.models.tools import get_username
if _model != None:
return {
"status": "ok",
"object_id": _model.object_id,
"uid": _model.uid,
"uid_name": get_username("UID", _model.uid),
"Project_id_name": get_username("PID", _model.Project_id),
"EnvId":_model.EnvId,
"EnvName":_model.EnvName,
"executionMode":_model.executionMode ,
"mission_name":_model.cronJobId,
# "cronJobId":_model.cronJobId,
"Project_id":_model.Project_id,
"StartTime":_model.StartTime,
"interfaces_Suites_CaseDetail":_model.interfaces_Suites_CaseDetail,
"totalCount":_model.totalCount,
"passCount":_model.passCount,
"failCount":_model.failCount,
"errorCount":_model.errorCount,
"spendTimeInSec":_model.spendTimeInSec,
"create_at": _model.created_at,
"updated_at": _model.updated_at,
}
else:
return {"status": "failed", "data": "报告不存在或已被删除!"}
# ------------------------------
# ------------------------------
# ------------------------------
def get_task_Job(table_class, **params):
_moble = db.session.query(table_class).filter_by(**params).first()
return _moble.object_id
def get_first_one_model(table_class):
return db.session.query(table_class).order_by(table_class.updated_at.desc()).first()
def get_like(table_class, params, _user=None):
if params != None and _user == None:
return db.session.query(table_class).filter(table_class.name.like("%"+params+"%")).all()
else:
_uid = db.session.query(Users).filter(Users.user==_user).first()
if _uid != None:
return db.session.query(table_class).filter(
table_class.name.like("%"+params+"%"),
table_class.uid==_uid.object_id).all()
# table_class.uid==_uid.user).all()
else:
return []
def safe_check(value):
return True
def get_query_data(request, key, default=None, throwable=False):
value = request.args.get(key, None)
if value is not None and safe_check(value):
return value
value = request.headers.get(key, None)
if value is not None and safe_check(value):
return value
if not throwable:
return default
def get_name(table_class, object_id):
try:
return get_model(table_class, object_id)
except BaseException:
return get_model(table_class, object_id)
def get_model(table_class, object_id):
return db.session.query(table_class).get(object_id)
def get_models(table_class, **params):
if params is not None and len(params) > 0:
return db.session.query(table_class).filter_by(**params, state=0).all()
else:
return db.session.query(table_class).all()
def get_post_data(request, key, throwable=False):
try:
value = request.form.get(key, None)
if value is not None:
return value
json = request.get_json(force=True)
if json is not None:
value = json.get(key, None)
if value is not None and safe_check(value):
return value
if not throwable:
return None
print("[ 缺少提交的参数 ] -> ", key)
except BaseException:
raise DBError("Error: post value no contains {0}".format(key))
def get_post_items(request, item_names, throwable=False):
items = {}
for name in item_names:
data = get_post_data(request, name, throwable)
if data is not None:
items[name] = data
return items
from sqlalchemy.exc import IntegrityError
def create_model(table_class, **items):
model = table_class()
for key, value in items.items():
setattr(model, key, value)
try:
model.update()
db.session.add(model)
db.session.commit()
return model
except IntegrityError as ie:
db.session.rollback()
raise DBError
except Exception as e:
db.session.rollback()
raise DBError
def update_models(*models, auto_commit=True):
try:
for model in models:
model.update()
db.session.add(model)
if auto_commit:
db.session.commit()
except IntegrityError as ie:
db.session.rollback()
raise DBError
except Exception as e:
db.session.rollback()
raise DBError
def get_models_timestamp(table_class, *params):
try:
return db.session.query(table_class).filter(_BaseModel.created_at <= params).all()
except Exception as e:
raise DBError
def get_models_filter(table_class, *params):
try:
return db.session.query(table_class).filter(*params).all()
except Exception as e:
raise DBError(e)
def get_page_value(request):
page = int(get_query_data(request, 'page', 1))
if page <= 0:
return 1
return page
def get_pages(total, per_page):
pages = (total + per_page - 1) // per_page
if pages <= 0:
pages = 1
return pages
def get_per_page_value(request, default, max_value):
per_page = int(get_query_data(request, 'per_page', default))
if per_page > max_value or per_page <= 0:
return max_value
return per_page
def params_filter(table_class, _name=None, _uid=None):
if _name != None and _uid != None:
return [table_class.state == table_class.STATE_NORMAL, table_class.name.like("%"+_name+"%"), table_class.like("%"+_uid+"%")]
elif _name == None and _uid != None:
return [table_class.state == table_class.STATE_NORMAL, table_class.uid.like("%"+_uid+"%")]
elif _uid == None and _name != None:
return [table_class.state == table_class.STATE_NORMAL, table_class.name.like("%"+_name+"%")]
else:
return [table_class.state == table_class.STATE_NORMAL]
def get_models_filter_with_pagination(table_class, order_name, page, per_page, order_func, *params):
# order_name 暂时废弃
try:
offset = (page - 1) * per_page
query = table_class.query.filter(*params)
total = query.count()
models = query.order_by(order_func(_BaseModel.updated_at)).offset(offset).limit(per_page).all()
return {
'total': total,
'models': models
}
except Exception as e:
raise DBError(e)
def get_model_by(table_class, **params):
try:
return db.session.query(table_class).filter_by(**params).first()
except Exception as e:
raise DBError(e)
def delete_model(table_class, object_id, real_delete=False, auto_commit=True):
try:
model = db.session.query(table_class).get(object_id)
delete_model_with_model(model, real_delete, auto_commit=auto_commit)
except Exception as e:
raise DBError(e)
def delete_model_with_model(model, real_delete=False, state=_BaseModel.STATE_DELETE, auto_commit=True):
try:
if real_delete:
db.session.delete(model)
else:
model.update()
model.state = state
db.session.add(model)
if auto_commit:
db.session.commit()
except Exception as e:
if auto_commit:
db.session.rollback()
raise DBError(e)
| 33.759921
| 132
| 0.53253
| 1,753
| 17,015
| 4.885339
| 0.127211
| 0.052546
| 0.019617
| 0.024404
| 0.488907
| 0.450257
| 0.414176
| 0.399813
| 0.359995
| 0.326483
| 0
| 0.009443
| 0.352689
| 17,015
| 504
| 133
| 33.759921
| 0.768113
| 0.017455
| 0
| 0.457921
| 0
| 0
| 0.085783
| 0.001676
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071782
| false
| 0.004951
| 0.024752
| 0.007426
| 0.207921
| 0.004951
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efe1e27548d4a791c0325857f9e7735c777989c1
| 2,635
|
py
|
Python
|
decisive/__init__.py
|
decisive/api-demo-python
|
58cd14e9e1f6373a3cd927536fd29f5f286940a0
|
[
"MIT"
] | null | null | null |
decisive/__init__.py
|
decisive/api-demo-python
|
58cd14e9e1f6373a3cd927536fd29f5f286940a0
|
[
"MIT"
] | null | null | null |
decisive/__init__.py
|
decisive/api-demo-python
|
58cd14e9e1f6373a3cd927536fd29f5f286940a0
|
[
"MIT"
] | null | null | null |
import requests
import requests.exceptions
import datetime
import ujson as json
import logging
class DecisiveApiClient(object):
HOST = 'https://ads.decisive.is'.strip('/')
def __init__(self, api_key, host=None):
self.session = requests.Session()
self.session.auth = (api_key,'')
self.host = host or DecisiveApiClient.HOST
def to_uri(self, *paths, **get_args):
path = '/'.join(p.strip('/') for p in map(unicode, paths))
args = '&'.join('{}={}'.format(*i) for i in self.flatten_getargs(get_args))
return '{}/{}?{}'.format(self.host, path, args)
def flatten_getargs(self, get_args):
# NOTE: support multiple value arg values, e.g. select=bids&select=spend
for key,value in get_args.items():
value_list = value if hasattr(v, '__iter__') else [v]
for list_value in value_list:
yield key, value
def get(self, *paths, **get_args):
uri = self.to_uri(*paths, **get_args)
response = self.session.get(uri)
return self.examine_response(response)
def put(self, updated_ad): # NOTE: only /ads supports PUT method at the moment
uri = self.to_uri('ads',updated_ad['ad_id'])
response = self.session.put(uri, data=json.dumps(updated_ad))
return self.examine_response(response, False)
def post(self, data, *paths):
uri = self.to_uri(*paths)
response = self.session.post(uri, data=json.dumps(data))
return self.examine_response(response)
def delete(self, *paths):
uri = self.to_uri(*paths)
response = self.session.delete(uri)
return self.examine_response(response, False)
def get_report(self, ad, type_, attribute, start_datehour, end_datehour, **options):
return self.get('ads', ad['ad_id'], 'reports',
type_, attribute,
start_datehour.date().isoformat(),
start_datehour.hour,
end_datehour.date().isoformat(),
end_datehour.hour,
**options)
def examine_response(self, response, return_json=True):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as error:
body = response.json() or {}
message = body.get('reason') or error.messsage
logging.warning('HTTPError', response.status_code, message)
logging.info('Did you know?', body.get('did_you_know'))
return False
return True if not return_json else response.json()
| 38.188406
| 88
| 0.603036
| 323
| 2,635
| 4.758514
| 0.334365
| 0.042941
| 0.023422
| 0.03123
| 0.16851
| 0.15745
| 0.106701
| 0.053351
| 0.053351
| 0
| 0
| 0
| 0.274383
| 2,635
| 68
| 89
| 38.75
| 0.80387
| 0.045541
| 0
| 0.111111
| 0
| 0
| 0.044206
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.092593
| 0.018519
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efe41b6dc8f659359b1e12cb86ef509b2e8e51a8
| 38,284
|
py
|
Python
|
app/main/views/service_settings.py
|
karlchillmaid/notifications-admin
|
9ef6da4ef9e2fa97b7debb4b573cb035a5cb8880
|
[
"MIT"
] | null | null | null |
app/main/views/service_settings.py
|
karlchillmaid/notifications-admin
|
9ef6da4ef9e2fa97b7debb4b573cb035a5cb8880
|
[
"MIT"
] | null | null | null |
app/main/views/service_settings.py
|
karlchillmaid/notifications-admin
|
9ef6da4ef9e2fa97b7debb4b573cb035a5cb8880
|
[
"MIT"
] | null | null | null |
from flask import (
abort,
current_app,
flash,
redirect,
render_template,
request,
session,
url_for,
)
from flask_login import current_user, login_required
from notifications_python_client.errors import HTTPError
from notifications_utils.field import Field
from notifications_utils.formatters import formatted_list
from app import (
billing_api_client,
current_service,
email_branding_client,
inbound_number_client,
organisations_client,
service_api_client,
user_api_client,
zendesk_client,
)
from app.main import main
from app.main.forms import (
BrandingOptionsEmail,
ConfirmPasswordForm,
FreeSMSAllowance,
InternationalSMSForm,
LetterBranding,
LinkOrganisationsForm,
OrganisationTypeForm,
RenameServiceForm,
RequestToGoLiveForm,
ServiceBasicViewForm,
ServiceContactLinkForm,
ServiceEditInboundNumberForm,
ServiceInboundNumberForm,
ServiceLetterContactBlockForm,
ServiceReplyToEmailForm,
ServiceSetBranding,
ServiceSmsSenderForm,
ServiceSwitchLettersForm,
SMSPrefixForm,
branding_options_dict,
)
from app.utils import (
AgreementInfo,
email_safe,
get_cdn_domain,
user_has_permissions,
user_is_platform_admin,
)
@main.route("/services/<service_id>/service-settings")
@login_required
@user_has_permissions('manage_service', 'manage_api_keys')
def service_settings(service_id):
letter_branding_organisations = email_branding_client.get_letter_email_branding()
organisation = organisations_client.get_service_organisation(service_id).get('name', None)
if current_service['email_branding']:
email_branding = email_branding_client.get_email_branding(current_service['email_branding'])['email_branding']
else:
email_branding = None
inbound_number = inbound_number_client.get_inbound_sms_number_for_service(service_id)
disp_inbound_number = inbound_number['data'].get('number', '')
reply_to_email_addresses = service_api_client.get_reply_to_email_addresses(service_id)
reply_to_email_address_count = len(reply_to_email_addresses)
default_reply_to_email_address = next(
(x['email_address'] for x in reply_to_email_addresses if x['is_default']), "Not set"
)
letter_contact_details = service_api_client.get_letter_contacts(service_id)
letter_contact_details_count = len(letter_contact_details)
default_letter_contact_block = next(
(Field(x['contact_block'], html='escape') for x in letter_contact_details if x['is_default']), "Not set"
)
sms_senders = service_api_client.get_sms_senders(service_id)
sms_sender_count = len(sms_senders)
default_sms_sender = next(
(Field(x['sms_sender'], html='escape') for x in sms_senders if x['is_default']), "None"
)
free_sms_fragment_limit = billing_api_client.get_free_sms_fragment_limit_for_year(service_id)
return render_template(
'views/service-settings.html',
email_branding=email_branding,
letter_branding=letter_branding_organisations.get(
current_service.get('dvla_organisation', '001')
),
can_receive_inbound=('inbound_sms' in current_service['permissions']),
inbound_number=disp_inbound_number,
default_reply_to_email_address=default_reply_to_email_address,
reply_to_email_address_count=reply_to_email_address_count,
default_letter_contact_block=default_letter_contact_block,
letter_contact_details_count=letter_contact_details_count,
default_sms_sender=default_sms_sender,
sms_sender_count=sms_sender_count,
free_sms_fragment_limit=free_sms_fragment_limit,
prefix_sms=current_service['prefix_sms'],
organisation=organisation,
)
@main.route("/services/<service_id>/service-settings/name", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_name_change(service_id):
form = RenameServiceForm()
if request.method == 'GET':
form.name.data = current_service['name']
if form.validate_on_submit():
if form.name.data == current_service['name']:
return redirect(url_for('.service_settings', service_id=service_id))
unique_name = service_api_client.is_service_name_unique(service_id, form.name.data, email_safe(form.name.data))
if not unique_name:
form.name.errors.append("This service name is already in use")
return render_template('views/service-settings/name.html', form=form)
session['service_name_change'] = form.name.data
return redirect(url_for('.service_name_change_confirm', service_id=service_id))
return render_template(
'views/service-settings/name.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/name/confirm", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_name_change_confirm(service_id):
# Validate password for form
def _check_password(pwd):
return user_api_client.verify_password(current_user.id, pwd)
form = ConfirmPasswordForm(_check_password)
if form.validate_on_submit():
try:
service_api_client.update_service(
current_service['id'],
name=session['service_name_change'],
email_from=email_safe(session['service_name_change'])
)
except HTTPError as e:
error_msg = "Duplicate service name '{}'".format(session['service_name_change'])
if e.status_code == 400 and error_msg in e.message['name']:
# Redirect the user back to the change service name screen
flash('This service name is already in use', 'error')
return redirect(url_for('main.service_name_change', service_id=service_id))
else:
raise e
else:
session.pop('service_name_change')
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/confirm.html',
heading='Change your service name',
form=form)
@main.route("/services/<service_id>/service-settings/request-to-go-live")
@login_required
@user_has_permissions('manage_service')
def request_to_go_live(service_id):
return render_template(
'views/service-settings/request-to-go-live.html',
has_team_members=(
user_api_client.get_count_of_users_with_permission(
service_id, 'manage_service'
) > 1
),
has_templates=(
service_api_client.count_service_templates(service_id) > 0
),
has_email_templates=(
service_api_client.count_service_templates(service_id, template_type='email') > 0
),
has_email_reply_to_address=bool(
service_api_client.get_reply_to_email_addresses(service_id)
)
)
@main.route("/services/<service_id>/service-settings/submit-request-to-go-live", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def submit_request_to_go_live(service_id):
form = RequestToGoLiveForm()
if form.validate_on_submit():
zendesk_client.create_ticket(
subject='Request to go live - {}'.format(current_service['name']),
message=(
'Service: {}\n'
'{}\n'
'\n---'
'\nOrganisation type: {}'
'\nAgreement signed: {}'
'\nChannel: {}\nStart date: {}\nStart volume: {}'
'\nPeak volume: {}'
'\nFeatures: {}'
).format(
current_service['name'],
url_for('main.service_dashboard', service_id=current_service['id'], _external=True),
current_service['organisation_type'],
AgreementInfo.from_current_user().as_human_readable,
formatted_list(filter(None, (
'email' if form.channel_email.data else None,
'text messages' if form.channel_sms.data else None,
'letters' if form.channel_letter.data else None,
)), before_each='', after_each=''),
form.start_date.data,
form.start_volume.data,
form.peak_volume.data,
formatted_list(filter(None, (
'one off' if form.method_one_off.data else None,
'file upload' if form.method_upload.data else None,
'API' if form.method_api.data else None,
)), before_each='', after_each='')
),
ticket_type=zendesk_client.TYPE_QUESTION,
user_email=current_user.email_address,
user_name=current_user.name
)
flash('Thanks for your request to go live. We’ll get back to you within one working day.', 'default')
return redirect(url_for('.service_settings', service_id=service_id))
return render_template('views/service-settings/submit-request-to-go-live.html', form=form)
@main.route("/services/<service_id>/service-settings/switch-live")
@login_required
@user_is_platform_admin
def service_switch_live(service_id):
service_api_client.update_service(
current_service['id'],
# TODO This limit should be set depending on the agreement signed by
# with Notify.
message_limit=250000 if current_service['restricted'] else 50,
restricted=(not current_service['restricted'])
)
return redirect(url_for('.service_settings', service_id=service_id))
@main.route("/services/<service_id>/service-settings/research-mode")
@login_required
@user_is_platform_admin
def service_switch_research_mode(service_id):
service_api_client.update_service_with_properties(
service_id,
{"research_mode": not current_service['research_mode']}
)
return redirect(url_for('.service_settings', service_id=service_id))
def switch_service_permissions(service_id, permission, sms_sender=None):
force_service_permission(
service_id,
permission,
on=permission not in current_service['permissions'],
sms_sender=sms_sender
)
def force_service_permission(service_id, permission, on=False, sms_sender=None):
permissions, permission = set(current_service['permissions']), {permission}
update_service_permissions(
service_id,
permissions | permission if on else permissions - permission,
sms_sender=sms_sender
)
def update_service_permissions(service_id, permissions, sms_sender=None):
current_service['permissions'] = list(permissions)
data = {'permissions': current_service['permissions']}
if sms_sender:
data['sms_sender'] = sms_sender
service_api_client.update_service_with_properties(service_id, data)
@main.route("/services/<service_id>/service-settings/can-send-email")
@login_required
@user_is_platform_admin
def service_switch_can_send_email(service_id):
switch_service_permissions(service_id, 'email')
return redirect(url_for('.service_settings', service_id=service_id))
@main.route("/services/<service_id>/service-settings/can-send-sms")
@login_required
@user_is_platform_admin
def service_switch_can_send_sms(service_id):
switch_service_permissions(service_id, 'sms')
return redirect(url_for('.service_settings', service_id=service_id))
@main.route("/services/<service_id>/service-settings/email-auth")
@login_required
@user_is_platform_admin
def service_switch_email_auth(service_id):
switch_service_permissions(service_id, 'email_auth')
return redirect(url_for('.service_settings', service_id=service_id))
@main.route("/services/<service_id>/service-settings/can-send-precompiled-letter")
@login_required
@user_is_platform_admin
def service_switch_can_send_precompiled_letter(service_id):
switch_service_permissions(service_id, 'precompiled_letter')
return redirect(url_for('.service_settings', service_id=service_id))
@main.route("/services/<service_id>/service-settings/can-upload-document", methods=['GET', 'POST'])
@login_required
@user_is_platform_admin
def service_switch_can_upload_document(service_id):
form = ServiceContactLinkForm()
# If turning the permission off, or turning it on and the service already has a contact_link,
# don't show the form to add the link
if 'upload_document' in current_service['permissions'] or current_service.get('contact_link'):
switch_service_permissions(service_id, 'upload_document')
return redirect(url_for('.service_settings', service_id=service_id))
if form.validate_on_submit():
service_api_client.update_service(
current_service['id'],
contact_link=form.url.data
)
switch_service_permissions(service_id, 'upload_document')
return redirect(url_for('.service_settings', service_id=service_id))
return render_template('views/service-settings/contact_link.html', form=form)
@main.route("/services/<service_id>/service-settings/archive", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def archive_service(service_id):
if request.method == 'POST':
service_api_client.archive_service(service_id)
return redirect(url_for('.service_settings', service_id=service_id))
else:
flash('There\'s no way to reverse this! Are you sure you want to archive this service?', 'delete')
return service_settings(service_id)
@main.route("/services/<service_id>/service-settings/suspend", methods=["GET", "POST"])
@login_required
@user_has_permissions('manage_service')
def suspend_service(service_id):
if request.method == 'POST':
service_api_client.suspend_service(service_id)
return redirect(url_for('.service_settings', service_id=service_id))
else:
flash("This will suspend the service and revoke all api keys. Are you sure you want to suspend this service?",
'suspend')
return service_settings(service_id)
@main.route("/services/<service_id>/service-settings/resume", methods=["GET", "POST"])
@login_required
@user_has_permissions('manage_service')
def resume_service(service_id):
if request.method == 'POST':
service_api_client.resume_service(service_id)
return redirect(url_for('.service_settings', service_id=service_id))
else:
flash("This will resume the service. New api key are required for this service to use the API.", 'resume')
return service_settings(service_id)
@main.route("/services/<service_id>/service-settings/contact-link", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_set_contact_link(service_id):
form = ServiceContactLinkForm()
if request.method == 'GET':
form.url.data = current_service.get('contact_link')
if form.validate_on_submit():
service_api_client.update_service(
current_service['id'],
contact_link=form.url.data
)
return redirect(url_for('.service_settings', service_id=current_service['id']))
return render_template('views/service-settings/contact_link.html', form=form)
@main.route("/services/<service_id>/service-settings/set-email", methods=['GET'])
@login_required
@user_has_permissions('manage_service')
def service_set_email(service_id):
return render_template(
'views/service-settings/set-email.html',
)
@main.route("/services/<service_id>/service-settings/set-reply-to-email", methods=['GET'])
@login_required
@user_has_permissions('manage_service')
def service_set_reply_to_email(service_id):
return redirect(url_for('.service_email_reply_to', service_id=service_id))
@main.route("/services/<service_id>/service-settings/email-reply-to", methods=['GET'])
@login_required
@user_has_permissions('manage_service', 'manage_api_keys')
def service_email_reply_to(service_id):
reply_to_email_addresses = service_api_client.get_reply_to_email_addresses(service_id)
return render_template(
'views/service-settings/email_reply_to.html',
reply_to_email_addresses=reply_to_email_addresses)
@main.route("/services/<service_id>/service-settings/email-reply-to/add", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_add_email_reply_to(service_id):
form = ServiceReplyToEmailForm()
reply_to_email_address_count = len(service_api_client.get_reply_to_email_addresses(service_id))
first_email_address = reply_to_email_address_count == 0
if form.validate_on_submit():
service_api_client.add_reply_to_email_address(
current_service['id'],
email_address=form.email_address.data,
is_default=first_email_address if first_email_address else form.is_default.data
)
return redirect(url_for('.service_email_reply_to', service_id=service_id))
return render_template(
'views/service-settings/email-reply-to/add.html',
form=form,
first_email_address=first_email_address)
@main.route(
"/services/<service_id>/service-settings/email-reply-to/<reply_to_email_id>/edit",
methods=['GET', 'POST'],
endpoint="service_edit_email_reply_to"
)
@main.route(
"/services/<service_id>/service-settings/email-reply-to/<reply_to_email_id>/delete",
methods=['GET'],
endpoint="service_confirm_delete_email_reply_to"
)
@login_required
@user_has_permissions('manage_service')
def service_edit_email_reply_to(service_id, reply_to_email_id):
form = ServiceReplyToEmailForm()
reply_to_email_address = service_api_client.get_reply_to_email_address(service_id, reply_to_email_id)
if request.method == 'GET':
form.email_address.data = reply_to_email_address['email_address']
form.is_default.data = reply_to_email_address['is_default']
if form.validate_on_submit():
service_api_client.update_reply_to_email_address(
current_service['id'],
reply_to_email_id=reply_to_email_id,
email_address=form.email_address.data,
is_default=True if reply_to_email_address['is_default'] else form.is_default.data
)
return redirect(url_for('.service_email_reply_to', service_id=service_id))
return render_template(
'views/service-settings/email-reply-to/edit.html',
form=form,
reply_to_email_address_id=reply_to_email_id,
confirm_delete=(request.endpoint == "main.service_confirm_delete_email_reply_to"),
)
@main.route("/services/<service_id>/service-settings/email-reply-to/<reply_to_email_id>/delete", methods=['POST'])
@login_required
@user_has_permissions('manage_service')
def service_delete_email_reply_to(service_id, reply_to_email_id):
service_api_client.delete_reply_to_email_address(
service_id=current_service['id'],
reply_to_email_id=reply_to_email_id,
)
return redirect(url_for('.service_email_reply_to', service_id=service_id))
@main.route("/services/<service_id>/service-settings/set-inbound-number", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_set_inbound_number(service_id):
available_inbound_numbers = inbound_number_client.get_available_inbound_sms_numbers()
service_has_inbound_number = inbound_number_client.get_inbound_sms_number_for_service(service_id)['data'] != {}
inbound_numbers_value_and_label = [
(number['id'], number['number']) for number in available_inbound_numbers['data']
]
no_available_numbers = available_inbound_numbers['data'] == []
form = ServiceInboundNumberForm(
inbound_number_choices=inbound_numbers_value_and_label
)
if form.validate_on_submit():
service_api_client.add_sms_sender(
current_service['id'],
sms_sender=form.inbound_number.data,
is_default=True,
inbound_number_id=form.inbound_number.data
)
switch_service_permissions(current_service['id'], 'inbound_sms')
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/set-inbound-number.html',
form=form,
no_available_numbers=no_available_numbers,
service_has_inbound_number=service_has_inbound_number
)
@main.route("/services/<service_id>/service-settings/set-sms", methods=['GET'])
@login_required
@user_has_permissions('manage_service')
def service_set_sms(service_id):
return render_template(
'views/service-settings/set-sms.html',
)
@main.route("/services/<service_id>/service-settings/sms-prefix", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_set_sms_prefix(service_id):
form = SMSPrefixForm(enabled=(
'on' if current_service['prefix_sms'] else 'off'
))
form.enabled.label.text = 'Start all text messages with ‘{}:’'.format(current_service['name'])
if form.validate_on_submit():
service_api_client.update_service(
current_service['id'],
prefix_sms=(form.enabled.data == 'on')
)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/sms-prefix.html',
form=form
)
@main.route("/services/<service_id>/service-settings/set-international-sms", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_set_international_sms(service_id):
form = InternationalSMSForm(
enabled='on' if 'international_sms' in current_service['permissions'] else 'off'
)
if form.validate_on_submit():
force_service_permission(
service_id,
'international_sms',
on=(form.enabled.data == 'on'),
)
return redirect(
url_for(".service_settings", service_id=service_id)
)
return render_template(
'views/service-settings/set-international-sms.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/set-inbound-sms", methods=['GET'])
@login_required
@user_has_permissions('manage_service')
def service_set_inbound_sms(service_id):
number = inbound_number_client.get_inbound_sms_number_for_service(service_id)['data'].get('number', '')
return render_template(
'views/service-settings/set-inbound-sms.html',
inbound_number=number,
)
@main.route("/services/<service_id>/service-settings/set-letters", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_set_letters(service_id):
form = ServiceSwitchLettersForm(
enabled='on' if 'letter' in current_service['permissions'] else 'off'
)
if form.validate_on_submit():
force_service_permission(
service_id,
'letter',
on=(form.enabled.data == 'on'),
)
return redirect(
url_for(".service_settings", service_id=service_id)
)
return render_template(
'views/service-settings/set-letters.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/set-auth-type", methods=['GET'])
@login_required
@user_has_permissions('manage_service')
def service_set_auth_type(service_id):
return render_template(
'views/service-settings/set-auth-type.html',
)
@main.route("/services/<service_id>/service-settings/set-basic-view", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service', 'send_messages')
def service_set_basic_view(service_id):
if current_user.previewing_basic_view:
session.pop('basic', None)
if not current_user.has_permissions('manage_service'):
abort(403)
form = ServiceBasicViewForm(
enabled='caseworking' in current_service['permissions']
)
if form.validate_on_submit():
force_service_permission(
service_id,
'caseworking',
on=(form.enabled.data == 'on'),
)
return redirect(
url_for('.service_settings', service_id=service_id)
)
return render_template(
'views/service-settings/set-basic-view.html',
form=form,
)
@main.route("/services/<service_id>/preview-basic-view")
@login_required
@user_has_permissions('manage_service')
def preview_basic_view(service_id):
session['basic'] = True
return redirect(url_for('.service_dashboard', service_id=service_id))
@main.route("/services/<service_id>/service-settings/letter-contacts", methods=['GET'])
@login_required
@user_has_permissions('manage_service', 'manage_api_keys')
def service_letter_contact_details(service_id):
letter_contact_details = service_api_client.get_letter_contacts(service_id)
return render_template(
'views/service-settings/letter-contact-details.html',
letter_contact_details=letter_contact_details)
@main.route("/services/<service_id>/service-settings/letter-contact/add", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_add_letter_contact(service_id):
form = ServiceLetterContactBlockForm()
letter_contact_blocks_count = len(service_api_client.get_letter_contacts(service_id))
first_contact_block = letter_contact_blocks_count == 0
if form.validate_on_submit():
service_api_client.add_letter_contact(
current_service['id'],
contact_block=form.letter_contact_block.data.replace('\r', '') or None,
is_default=first_contact_block if first_contact_block else form.is_default.data
)
if request.args.get('from_template'):
return redirect(
url_for('.set_template_sender', service_id=service_id, template_id=request.args.get('from_template'))
)
return redirect(url_for('.service_letter_contact_details', service_id=service_id))
return render_template(
'views/service-settings/letter-contact/add.html',
form=form,
first_contact_block=first_contact_block)
@main.route("/services/<service_id>/service-settings/letter-contact/<letter_contact_id>/edit", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_edit_letter_contact(service_id, letter_contact_id):
letter_contact_block = service_api_client.get_letter_contact(service_id, letter_contact_id)
form = ServiceLetterContactBlockForm(letter_contact_block=letter_contact_block['contact_block'])
if request.method == 'GET':
form.is_default.data = letter_contact_block['is_default']
if form.validate_on_submit():
service_api_client.update_letter_contact(
current_service['id'],
letter_contact_id=letter_contact_id,
contact_block=form.letter_contact_block.data.replace('\r', '') or None,
is_default=True if letter_contact_block['is_default'] else form.is_default.data
)
return redirect(url_for('.service_letter_contact_details', service_id=service_id))
return render_template(
'views/service-settings/letter-contact/edit.html',
form=form,
letter_contact_id=letter_contact_block['id'])
@main.route("/services/<service_id>/service-settings/sms-sender", methods=['GET'])
@login_required
@user_has_permissions('manage_service', 'manage_api_keys')
def service_sms_senders(service_id):
def attach_hint(sender):
hints = []
if sender['is_default']:
hints += ["default"]
if sender['inbound_number_id']:
hints += ["receives replies"]
if hints:
sender['hint'] = "(" + " and ".join(hints) + ")"
sms_senders = service_api_client.get_sms_senders(service_id)
for sender in sms_senders:
attach_hint(sender)
return render_template(
'views/service-settings/sms-senders.html',
sms_senders=sms_senders
)
@main.route("/services/<service_id>/service-settings/sms-sender/add", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_add_sms_sender(service_id):
form = ServiceSmsSenderForm()
sms_sender_count = len(service_api_client.get_sms_senders(service_id))
first_sms_sender = sms_sender_count == 0
if form.validate_on_submit():
service_api_client.add_sms_sender(
current_service['id'],
sms_sender=form.sms_sender.data.replace('\r', '') or None,
is_default=first_sms_sender if first_sms_sender else form.is_default.data
)
return redirect(url_for('.service_sms_senders', service_id=service_id))
return render_template(
'views/service-settings/sms-sender/add.html',
form=form,
first_sms_sender=first_sms_sender)
@main.route(
"/services/<service_id>/service-settings/sms-sender/<sms_sender_id>/edit",
methods=['GET', 'POST'],
endpoint="service_edit_sms_sender"
)
@main.route(
"/services/<service_id>/service-settings/sms-sender/<sms_sender_id>/delete",
methods=['GET'],
endpoint="service_confirm_delete_sms_sender"
)
@login_required
@user_has_permissions('manage_service')
def service_edit_sms_sender(service_id, sms_sender_id):
sms_sender = service_api_client.get_sms_sender(service_id, sms_sender_id)
is_inbound_number = sms_sender['inbound_number_id']
if is_inbound_number:
form = ServiceEditInboundNumberForm(is_default=sms_sender['is_default'])
else:
form = ServiceSmsSenderForm(**sms_sender)
if form.validate_on_submit():
service_api_client.update_sms_sender(
current_service['id'],
sms_sender_id=sms_sender_id,
sms_sender=sms_sender['sms_sender'] if is_inbound_number else form.sms_sender.data.replace('\r', ''),
is_default=True if sms_sender['is_default'] else form.is_default.data
)
return redirect(url_for('.service_sms_senders', service_id=service_id))
form.is_default.data = sms_sender['is_default']
return render_template(
'views/service-settings/sms-sender/edit.html',
form=form,
sms_sender=sms_sender,
inbound_number=is_inbound_number,
sms_sender_id=sms_sender_id,
confirm_delete=(request.endpoint == "main.service_confirm_delete_sms_sender")
)
@main.route(
"/services/<service_id>/service-settings/sms-sender/<sms_sender_id>/delete",
methods=['POST'],
)
@login_required
@user_has_permissions('manage_service')
def service_delete_sms_sender(service_id, sms_sender_id):
service_api_client.delete_sms_sender(
service_id=current_service['id'],
sms_sender_id=sms_sender_id,
)
return redirect(url_for('.service_sms_senders', service_id=service_id))
@main.route("/services/<service_id>/service-settings/set-letter-contact-block", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_set_letter_contact_block(service_id):
if 'letter' not in current_service['permissions']:
abort(403)
form = ServiceLetterContactBlockForm(letter_contact_block=current_service['letter_contact_block'])
if form.validate_on_submit():
service_api_client.update_service(
current_service['id'],
letter_contact_block=form.letter_contact_block.data.replace('\r', '') or None
)
if request.args.get('from_template'):
return redirect(
url_for('.view_template', service_id=service_id, template_id=request.args.get('from_template'))
)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/set-letter-contact-block.html',
form=form
)
@main.route("/services/<service_id>/service-settings/set-organisation-type", methods=['GET', 'POST'])
@login_required
@user_is_platform_admin
def set_organisation_type(service_id):
form = OrganisationTypeForm(organisation_type=current_service.get('organisation_type'))
if form.validate_on_submit():
free_sms_fragment_limit = current_app.config['DEFAULT_FREE_SMS_FRAGMENT_LIMITS'].get(
form.organisation_type.data)
service_api_client.update_service(
service_id,
organisation_type=form.organisation_type.data,
)
billing_api_client.create_or_update_free_sms_fragment_limit(service_id, free_sms_fragment_limit)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/set-organisation-type.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/set-free-sms-allowance", methods=['GET', 'POST'])
@login_required
@user_is_platform_admin
def set_free_sms_allowance(service_id):
form = FreeSMSAllowance(free_sms_allowance=billing_api_client.get_free_sms_fragment_limit_for_year(service_id))
if form.validate_on_submit():
billing_api_client.create_or_update_free_sms_fragment_limit(service_id, form.free_sms_allowance.data)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/set-free-sms-allowance.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/set-email-branding", methods=['GET', 'POST'])
@login_required
@user_is_platform_admin
def service_set_email_branding(service_id):
email_branding = email_branding_client.get_all_email_branding()
form = ServiceSetBranding(branding_type=current_service.get('branding'))
# dynamically create org choices, including the null option
form.branding_style.choices = [('None', 'None')] + get_branding_as_value_and_label(email_branding)
if form.validate_on_submit():
branding_style = None if form.branding_style.data == 'None' else form.branding_style.data
service_api_client.update_service(
service_id,
branding=form.branding_type.data,
email_branding=branding_style
)
return redirect(url_for('.service_settings', service_id=service_id))
form.branding_style.data = current_service['email_branding'] or 'None'
return render_template(
'views/service-settings/set-email-branding.html',
form=form,
branding_dict=get_branding_as_dict(email_branding)
)
@main.route("/services/<service_id>/service-settings/set-letter-branding", methods=['GET', 'POST'])
@login_required
@user_is_platform_admin
def set_letter_branding(service_id):
form = LetterBranding(choices=email_branding_client.get_letter_email_branding().items())
if form.validate_on_submit():
service_api_client.update_service(
service_id,
dvla_organisation=form.dvla_org_id.data
)
return redirect(url_for('.service_settings', service_id=service_id))
form.dvla_org_id.data = current_service.get('dvla_organisation', '001')
return render_template(
'views/service-settings/set-letter-branding.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/link-service-to-organisation", methods=['GET', 'POST'])
@login_required
@user_is_platform_admin
def link_service_to_organisation(service_id):
organisations = organisations_client.get_organisations()
current_organisation = organisations_client.get_service_organisation(service_id).get('id', None)
form = LinkOrganisationsForm(
choices=convert_dictionary_to_wtforms_choices_format(organisations, 'id', 'name'),
organisations=current_organisation
)
if form.validate_on_submit():
if form.organisations.data != current_organisation:
organisations_client.update_service_organisation(
service_id,
form.organisations.data
)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/link-service-to-organisation.html',
has_organisations=organisations,
form=form,
)
@main.route("/services/<service_id>/branding-request/email", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def branding_request(service_id):
form = BrandingOptionsEmail(
options=current_service['branding']
)
if form.validate_on_submit():
zendesk_client.create_ticket(
subject='Email branding request - {}'.format(current_service['name']),
message=(
'Organisation: {}\n'
'Service: {}\n'
'{}\n'
'\n---'
'\nBranding requested: {}'
).format(
AgreementInfo.from_current_user().as_info_for_branding_request,
current_service['name'],
url_for('main.service_dashboard', service_id=current_service['id'], _external=True),
branding_options_dict[form.options.data],
),
ticket_type=zendesk_client.TYPE_QUESTION,
user_email=current_user.email_address,
user_name=current_user.name,
)
flash((
'Thanks for your branding request. We’ll get back to you '
'within one working day.'
), 'default')
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/branding/email-options.html',
form=form,
)
def get_branding_as_value_and_label(email_branding):
return [
(branding['id'], branding['name'])
for branding in email_branding
]
def get_branding_as_dict(email_branding):
return {
branding['id']: {
'logo': 'https://{}/{}'.format(get_cdn_domain(), branding['logo']),
'colour': branding['colour']
} for branding in email_branding
}
def convert_dictionary_to_wtforms_choices_format(dictionary, value, label):
return [
(item[value], item[label]) for item in dictionary
]
| 37.132881
| 119
| 0.709252
| 4,709
| 38,284
| 5.398598
| 0.063708
| 0.087798
| 0.054756
| 0.044371
| 0.702226
| 0.633506
| 0.586264
| 0.543191
| 0.495044
| 0.428881
| 0
| 0.000925
| 0.180754
| 38,284
| 1,030
| 120
| 37.168932
| 0.809623
| 0.009116
| 0
| 0.389412
| 0
| 0.025882
| 0.212276
| 0.121417
| 0.001176
| 0
| 0
| 0.000971
| 0
| 1
| 0.062353
| false
| 0.004706
| 0.010588
| 0.010588
| 0.168235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efe457cbb3f9ed9d770c24aeb1ca7014a5e1296d
| 3,094
|
py
|
Python
|
doctools/spelling.py
|
Sketch98/oil
|
2d5c51432b9699e48178236da2e5b3bf1a33d79f
|
[
"Apache-2.0"
] | null | null | null |
doctools/spelling.py
|
Sketch98/oil
|
2d5c51432b9699e48178236da2e5b3bf1a33d79f
|
[
"Apache-2.0"
] | null | null | null |
doctools/spelling.py
|
Sketch98/oil
|
2d5c51432b9699e48178236da2e5b3bf1a33d79f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
"""
spelling.py
Filter the output of 'lynx -dump' into a list of words to spell check.
"""
from __future__ import print_function
from collections import Counter
import optparse
import re
import sys
def log(msg, *args):
if args:
msg = msg % args
print(msg, file=sys.stderr)
def SplitWords(contents):
# Remove URLs so path components don't show up as words
contents = re.sub(r'(http|https|file)://\S+', '', contents)
# Take into account contractions with apostrophes
#
# - doesn't
# - can't
WORD_RE = re.compile(r'''
[a-zA-Z]+
(?:\'t\b)? # optional contraction
''', re.VERBOSE)
words = WORD_RE.findall(contents)
for w in words:
yield w
def WordList(f):
for line in f:
# no special characters allowed
yield line.strip().lower()
def Options():
"""Returns an option parser instance."""
p = optparse.OptionParser()
p.add_option(
'--known-words', dest='known_words',
help='List of words like /usr/share/dict/words')
p.add_option(
'--more-than-bash', dest='more_than_bash', type=int, default=0,
help='Expected number of cases where OSH starts more processes than bash')
return p
def main(argv):
o = Options()
opts, argv = o.parse_args(argv[1:])
action = argv[0]
if action == 'word-split':
contents = sys.stdin.read()
for w in SplitWords(contents):
print(w)
elif action == 'check':
word_files = argv[1:]
d = Counter()
for path in word_files:
with open(path) as f:
for word in WordList(f):
d[word] += 1
print('')
print('Most common words')
print('')
for word, count in d.most_common()[:20]:
print('%10d %s' % (count, word))
print('')
print('Least common words')
print('')
for word, count in d.most_common()[-20:]:
print('%10d %s' % (count, word))
log('%d word files', len(word_files))
log('%d unique words', len(d))
known_words = {}
with open(opts.known_words) as f:
for w in WordList(f):
known_words[w] = True
print('')
print('Potential Misspellings')
print('')
for path in word_files:
print()
print('\t%s' % path)
print()
with open(path) as f:
unknown = {}
for w in WordList(f):
#if d.get(word) == 1:
# print(word)
if w not in known_words:
unknown[w] = True
if unknown:
for u in sorted(unknown):
# only occurs once
if d.get(u) == 1:
print(u)
log('\t%d unknown words in %s', len(unknown), path)
# Checking algorithms:
#
# - Does it appear in the dictionary? Problem: most computer terms
# - Does it appear only once or twice in the whole corpus?
# - Is the edit distance very close to a dictinoary word?
# - e.g. subsitutions is a typo
else:
raise RuntimeError('Invalid action %r' % action)
if __name__ == '__main__':
try:
main(sys.argv)
except RuntimeError as e:
print('FATAL: %s' % e, file=sys.stderr)
sys.exit(1)
| 21.636364
| 80
| 0.591791
| 440
| 3,094
| 4.090909
| 0.402273
| 0.033333
| 0.013333
| 0.014444
| 0.121111
| 0.067778
| 0.067778
| 0.067778
| 0.067778
| 0.067778
| 0
| 0.007545
| 0.271816
| 3,094
| 142
| 81
| 21.788732
| 0.791389
| 0.184874
| 0
| 0.209302
| 0
| 0
| 0.164796
| 0.017642
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05814
| false
| 0
| 0.05814
| 0
| 0.127907
| 0.22093
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efe4b76066b7fc615a3d5cb419d39e72b57d7593
| 20,659
|
py
|
Python
|
train_deep_ls.py
|
Kamysek/DeepLocalShapes
|
24ee92889381d40acbb5ad1c7c8abb512a8c26b5
|
[
"MIT"
] | 4
|
2021-09-23T11:36:30.000Z
|
2022-02-23T20:10:46.000Z
|
train_deep_ls.py
|
Kamysek/DeepLocalShapes
|
24ee92889381d40acbb5ad1c7c8abb512a8c26b5
|
[
"MIT"
] | null | null | null |
train_deep_ls.py
|
Kamysek/DeepLocalShapes
|
24ee92889381d40acbb5ad1c7c8abb512a8c26b5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Based on: https://github.com/facebookresearch/DeepSDF using MIT LICENSE (https://github.com/facebookresearch/DeepSDF/blob/master/LICENSE)
# Copyright 2021-present Philipp Friedrich, Josef Kamysek. All Rights Reserved.
import functools
import json
import logging
import math
import os
import signal
import sys
import time
import warnings
import deep_ls
import deep_ls.workspace as ws
import torch
import torch.multiprocessing as mp
import torch.utils.data as data_utils
from scipy.spatial import cKDTree
import numpy as np
if not sys.warnoptions:
warnings.simplefilter("ignore")
class LearningRateSchedule:
def get_learning_rate(self, epoch):
pass
class ConstantLearningRateSchedule(LearningRateSchedule):
def __init__(self, value):
self.value = value
def get_learning_rate(self, epoch):
return self.value
class StepLearningRateSchedule(LearningRateSchedule):
def __init__(self, initial, interval, factor):
self.initial = initial
self.interval = interval
self.factor = factor
def get_learning_rate(self, epoch):
return self.initial * (self.factor ** (epoch // self.interval))
class WarmupLearningRateSchedule(LearningRateSchedule):
def __init__(self, initial, warmed_up, length):
self.initial = initial
self.warmed_up = warmed_up
self.length = length
def get_learning_rate(self, epoch):
if epoch > self.length:
return self.warmed_up
return self.initial + (self.warmed_up - self.initial) * epoch / self.length
def get_learning_rate_schedules(specs):
schedule_specs = specs["LearningRateSchedule"]
schedules = []
for schedule_specs in schedule_specs:
if schedule_specs["Type"] == "Step":
schedules.append(
StepLearningRateSchedule(
schedule_specs["Initial"],
schedule_specs["Interval"],
schedule_specs["Factor"],
)
)
elif schedule_specs["Type"] == "Warmup":
schedules.append(
WarmupLearningRateSchedule(
schedule_specs["Initial"],
schedule_specs["Final"],
schedule_specs["Length"],
)
)
elif schedule_specs["Type"] == "Constant":
schedules.append(ConstantLearningRateSchedule(schedule_specs["Value"]))
else:
raise Exception(
'no known learning rate schedule of type "{}"'.format(
schedule_specs["Type"]
)
)
return schedules
def save_model(experiment_directory, filename, decoder, epoch):
model_params_dir = ws.get_model_params_dir(experiment_directory, True)
torch.save(
{"epoch": epoch, "model_state_dict": decoder.state_dict()},
os.path.join(model_params_dir, filename),
)
def save_optimizer(experiment_directory, filename, optimizer, epoch):
optimizer_params_dir = ws.get_optimizer_params_dir(experiment_directory, True)
torch.save(
{"epoch": epoch, "optimizer_state_dict": optimizer.state_dict()},
os.path.join(optimizer_params_dir, filename),
)
def load_optimizer(experiment_directory, filename, optimizer):
full_filename = os.path.join(
ws.get_optimizer_params_dir(experiment_directory), filename
)
if not os.path.isfile(full_filename):
raise Exception(
'optimizer state dict "{}" does not exist'.format(full_filename)
)
data = torch.load(full_filename)
optimizer.load_state_dict(data["optimizer_state_dict"])
return data["epoch"]
def save_latent_vectors(experiment_directory, filename, latent_vec, epoch):
latent_codes_dir = ws.get_latent_codes_dir(experiment_directory, True)
all_latents = latent_vec.state_dict()
torch.save(
{"epoch": epoch, "latent_codes": all_latents},
os.path.join(latent_codes_dir, filename),
)
# TODO: duplicated in workspace
def load_latent_vectors(experiment_directory, filename, lat_vecs):
full_filename = os.path.join(
ws.get_latent_codes_dir(experiment_directory), filename
)
if not os.path.isfile(full_filename):
raise Exception('latent state file "{}" does not exist'.format(full_filename))
data = torch.load(full_filename)
if isinstance(data["latent_codes"], torch.Tensor):
# for backwards compatibility
if not lat_vecs.num_embeddings == data["latent_codes"].size()[0]:
raise Exception(
"num latent codes mismatched: {} vs {}".format(
lat_vecs.num_embeddings, data["latent_codes"].size()[0]
)
)
if not lat_vecs.embedding_dim == data["latent_codes"].size()[2]:
raise Exception("latent code dimensionality mismatch")
for i, lat_vec in enumerate(data["latent_codes"]):
lat_vecs.weight.data[i, :] = lat_vec
else:
lat_vecs.load_state_dict(data["latent_codes"])
return data["epoch"]
def save_logs(
experiment_directory,
loss_log,
lr_log,
timing_log,
lat_mag_log,
param_mag_log,
epoch,
):
torch.save(
{
"epoch": epoch,
"loss": loss_log,
"learning_rate": lr_log,
"timing": timing_log,
"latent_magnitude": lat_mag_log,
"param_magnitude": param_mag_log,
},
os.path.join(experiment_directory, ws.logs_filename),
)
def load_logs(experiment_directory):
full_filename = os.path.join(experiment_directory, ws.logs_filename)
if not os.path.isfile(full_filename):
raise Exception('log file "{}" does not exist'.format(full_filename))
data = torch.load(full_filename)
return (
data["loss"],
data["learning_rate"],
data["timing"],
data["latent_magnitude"],
data["param_magnitude"],
data["epoch"],
)
def clip_logs(loss_log, lr_log, timing_log, lat_mag_log, param_mag_log, epoch):
iters_per_epoch = len(loss_log) // len(lr_log)
loss_log = loss_log[: (iters_per_epoch * epoch)]
lr_log = lr_log[:epoch]
timing_log = timing_log[:epoch]
lat_mag_log = lat_mag_log[:epoch]
for n in param_mag_log:
param_mag_log[n] = param_mag_log[n][:epoch]
return loss_log, lr_log, timing_log, lat_mag_log, param_mag_log
def get_spec_with_default(specs, key, default):
try:
return specs[key]
except KeyError:
return default
def get_mean_latent_vector_magnitude(latent_vectors):
return torch.mean(torch.norm(latent_vectors.weight.data.detach(), dim=1))
def append_parameter_magnitudes(param_mag_log, model):
for name, param in model.named_parameters():
if len(name) > 7 and name[:7] == "module.":
name = name[7:]
if name not in param_mag_log.keys():
param_mag_log[name] = []
param_mag_log[name].append(param.data.norm().item())
def trainer(center_point, sdf_tree, sdf_grid_radius, lat_vecs, sdf_data, indices, cube_size, outer_sum, outer_lock, decoder, loss_l1, do_code_regularization, code_reg_lambda, epoch):
inner_sum = 0.0
# Get all indices of the samples that are within the L-radius around the cell center.
near_sample_indices = sdf_tree.query_ball_point(x=[center_point[1]], r=sdf_grid_radius, p=np.inf)
# Get number of samples located within the L-radius around the cell center
num_sdf_samples = len(near_sample_indices[0])
if num_sdf_samples < 1:
return
# Extract code from lat_vecs
code = lat_vecs((center_point[0] + indices[0].cuda() * (cube_size**3)).long()).cuda()
# Get groundtruth sdf value
sdf_gt = sdf_data[near_sample_indices[0], 3].unsqueeze(1)
sdf_gt = torch.tanh(sdf_gt)
transformed_sample = sdf_data[near_sample_indices[0], :3] - center_point[1]
transformed_sample.requires_grad = False
code = code.expand(1, 125)
code = code.repeat(transformed_sample.shape[0], 1)
decoder_input = torch.cat([code, transformed_sample.cuda()], dim=1).float().cuda()
# Get network prediction of current sample
pred_sdf = decoder(decoder_input)
# f_theta - s_j
inner_sum = loss_l1(pred_sdf.squeeze(0), sdf_gt.cuda()) / num_sdf_samples
# Right most part of formula (4) in DeepLS -> + 1/sigma^2 L2(z_i)
if do_code_regularization and num_sdf_samples != 0:
l2_size_loss = torch.sum(torch.norm(code, dim=0))
reg_loss = (code_reg_lambda * min(1.0, epoch / 100) * l2_size_loss) / num_sdf_samples
inner_sum = inner_sum.cuda() + reg_loss.cuda()
inner_sum.backward()
with outer_lock:
outer_sum.value += inner_sum.item()
return
def main_function(experiment_directory, continue_from, batch_split):
logging.debug("running " + experiment_directory)
specs = ws.load_experiment_specifications(experiment_directory)
logging.info("Experiment description: \n" + str(specs["Description"]))
data_source = specs["DataSource"]
train_split_file = specs["TrainSplit"]
arch = __import__("networks." + specs["NetworkArch"], fromlist=["Decoder"])
logging.debug(specs["NetworkSpecs"])
latent_size = specs["CodeLength"]
checkpoints = list(
range(
specs["SnapshotFrequency"],
specs["NumEpochs"] + 1,
specs["SnapshotFrequency"],
)
)
for checkpoint in specs["AdditionalSnapshots"]:
checkpoints.append(checkpoint)
checkpoints.sort()
lr_schedules = get_learning_rate_schedules(specs)
grad_clip = get_spec_with_default(specs, "GradientClipNorm", None)
if grad_clip is not None:
logging.debug("clipping gradients to max norm {}".format(grad_clip))
def save_latest(epoch):
save_model(experiment_directory, "latest.pth", decoder, epoch)
save_optimizer(experiment_directory, "latest.pth", optimizer_all, epoch)
save_latent_vectors(experiment_directory, "latest.pth", lat_vecs, epoch)
def save_checkpoints(epoch):
save_model(experiment_directory, str(epoch) + ".pth", decoder, epoch)
save_optimizer(experiment_directory, str(epoch) + ".pth", optimizer_all, epoch)
save_latent_vectors(experiment_directory, str(epoch) + ".pth", lat_vecs, epoch)
def signal_handler(sig, frame):
logging.info("Stopping early...")
sys.exit(0)
def adjust_learning_rate(lr_schedules, optimizer, epoch):
for i, param_group in enumerate(optimizer.param_groups):
param_group["lr"] = lr_schedules[i].get_learning_rate(epoch)
signal.signal(signal.SIGINT, signal_handler)
num_samp_per_scene = specs["SamplesPerScene"]
scene_per_batch = specs["ScenesPerBatch"]
do_code_regularization = get_spec_with_default(specs, "CodeRegularization", True)
code_reg_lambda = get_spec_with_default(specs, "CodeRegularizationLambda", 1e-4)
code_bound = get_spec_with_default(specs, "CodeBound", None)
cube_size = get_spec_with_default(specs, "CubeSize", 50)
box_size = get_spec_with_default(specs, "BoxSize", 2)
voxel_radius = get_spec_with_default(specs, "VoxelRadius", 1.5)
decoder = arch.Decoder(latent_size, **specs["NetworkSpecs"]).cuda()
logging.info("training with {} GPU(s)".format(torch.cuda.device_count()))
if torch.cuda.device_count() > 1:
decoder = torch.nn.DataParallel(decoder)
num_epochs = specs["NumEpochs"]
log_frequency = get_spec_with_default(specs, "LogFrequency", 10)
with open(train_split_file, "r") as f:
train_split = json.load(f)
sdf_dataset = deep_ls.data.SDFSamples(
data_source, train_split, num_samp_per_scene, load_ram=False
)
num_data_loader_threads = get_spec_with_default(specs, "DataLoaderThreads", 1)
logging.debug("loading data with {} threads".format(num_data_loader_threads))
sdf_loader = data_utils.DataLoader(
sdf_dataset,
batch_size=scene_per_batch,
shuffle=True,
num_workers=num_data_loader_threads,
drop_last=True,
)
sdf_grid_indices = deep_ls.data.generate_grid_center_indices(cube_size=cube_size, box_size=box_size)
# voxel_radius is defined as 1.5 times the voxel side length (see DeepLS sec. 4.1) since that value provides
# a good trade of between accuracy and efficiency
sdf_grid_radius = voxel_radius * ((box_size * 2) / cube_size)
logging.debug("torch num_threads: {}".format(torch.get_num_threads()))
num_scenes = len(sdf_dataset)
logging.info("There are {} scenes".format(num_scenes))
logging.debug(decoder)
# TODO check if there is something better than Embedding to store codes.
# TODO Not sure if max_norm=code_bound is necessary
# lat_vecs_size is num_scences times the grid (cube_size^3)
lat_vec_size = num_scenes * (cube_size**3)
lat_vecs = torch.nn.Embedding(lat_vec_size, latent_size, max_norm=code_bound).cuda()
torch.nn.init.normal_(
lat_vecs.weight.data,
0.0,
get_spec_with_default(specs, "CodeInitStdDev", 1.0) / math.sqrt(latent_size),
)
logging.debug(
"initialized with mean magnitude {}".format(
get_mean_latent_vector_magnitude(lat_vecs)
)
)
loss_l1 = torch.nn.L1Loss(reduction="sum").cuda()
optimizer_all = torch.optim.Adam(
[
{
"params": decoder.parameters(),
"lr": lr_schedules[0].get_learning_rate(0),
},
{
"params": lat_vecs.parameters(),
"lr": lr_schedules[1].get_learning_rate(0),
},
]
)
loss_log = []
lr_log = []
lat_mag_log = []
timing_log = []
param_mag_log = {}
start_epoch = 1
if continue_from is not None:
logging.info('continuing from "{}"'.format(continue_from))
lat_epoch = load_latent_vectors(
experiment_directory, continue_from + ".pth", lat_vecs
)
model_epoch = ws.load_model_parameters(
experiment_directory, continue_from, decoder
)
optimizer_epoch = load_optimizer(
experiment_directory, continue_from + ".pth", optimizer_all
)
loss_log, lr_log, timing_log, lat_mag_log, param_mag_log, log_epoch = load_logs(
experiment_directory
)
if not log_epoch == model_epoch:
loss_log, lr_log, timing_log, lat_mag_log, param_mag_log = clip_logs(
loss_log, lr_log, timing_log, lat_mag_log, param_mag_log, model_epoch
)
if not (model_epoch == optimizer_epoch and model_epoch == lat_epoch):
raise RuntimeError(
"epoch mismatch: {} vs {} vs {} vs {}".format(
model_epoch, optimizer_epoch, lat_epoch, log_epoch
)
)
start_epoch = model_epoch + 1
logging.debug("loaded")
logging.info("starting from epoch {}".format(start_epoch))
logging.info(
"Number of decoder parameters: {}".format(
sum(p.data.nelement() for p in decoder.parameters())
)
)
logging.info(
"Number of shape code parameters: {} (# codes {}, code dim {})".format(
lat_vecs.num_embeddings * lat_vecs.embedding_dim,
lat_vecs.num_embeddings,
lat_vecs.embedding_dim,
)
)
for epoch in range(start_epoch, num_epochs + 1):
start = time.time()
logging.info("epoch {}...".format(epoch))
decoder.train()
adjust_learning_rate(lr_schedules, optimizer_all, epoch)
current_scene = 0
scene_avg_loss = 0.0
len_data_loader = len(sdf_loader)
for sdf_data, indices in sdf_loader:
current_scene += 1
#logging.info("Scene: {}/{}".format(current_scene, len_data_loader))
# sdf_data contains the KDTree of the current scene and all the points in that scene
# indices is the index of the npz file -> the scene.
sdf_data = sdf_data.reshape(-1, 4)
sdf_data.requires_grad = False
xyz = sdf_data[:,:3]
num_sdf_samples_total = sdf_data.shape[0]
# TODO check leaf_size impact on speed. default = 40
# Default metric of kdtree is L2 norm, Paper uses L infinity -> chebyshev
sdf_tree = cKDTree(xyz)
outer_sum = 0.0
optimizer_all.zero_grad()
if __name__ == '__main__':
# Shared value counter and lock
mp.set_start_method('spawn', force=True)
manager = mp.Manager()
outer_sum = manager.Value('f', 0)
outer_lock = manager.Lock()
# Create Pool for multiprocessing
start = time.time()
pool = mp.Pool()
# Apply map on array of center points
res = pool.map(functools.partial(trainer,
sdf_tree = sdf_tree,
sdf_grid_radius = sdf_grid_radius,
lat_vecs = lat_vecs,
sdf_data = sdf_data,
indices = indices,
cube_size = cube_size,
outer_sum = outer_sum,
outer_lock = outer_lock,
decoder = decoder,
loss_l1 = loss_l1,
do_code_regularization = do_code_regularization,
code_reg_lambda = code_reg_lambda,
epoch = epoch),
enumerate(sdf_grid_indices))
pool.close()
pool.join()
logging.info("Multiprocessing Time {}".format(time.time() - start))
scene_avg_loss += outer_sum.value
logging.info("Scene {} loss = {}".format(current_scene, outer_sum))
loss_log.append(outer_sum.value)
optimizer_all.step()
logging.info("Epoch scene average loss: {}".format((scene_avg_loss/current_scene)))
end = time.time()
seconds_elapsed = end - start
timing_log.append(seconds_elapsed)
lr_log.append([schedule.get_learning_rate(epoch) for schedule in lr_schedules])
# TODO check what other functions do with lat_vecs and adapt if needed.
lat_mag_log.append(get_mean_latent_vector_magnitude(lat_vecs))
append_parameter_magnitudes(param_mag_log, decoder)
if epoch in checkpoints:
save_checkpoints(epoch)
if epoch % log_frequency == 0:
save_latest(epoch)
save_logs(
experiment_directory,
loss_log,
lr_log,
timing_log,
lat_mag_log,
param_mag_log,
epoch,
)
if __name__ == "__main__":
import argparse
arg_parser = argparse.ArgumentParser(description="Train a DeepLS autodecoder")
arg_parser.add_argument(
"--experiment",
"-e",
dest="experiment_directory",
required=True,
help="The experiment directory. This directory should include "
+ "experiment specifications in 'specs.json', and logging will be "
+ "done in this directory as well.",
)
arg_parser.add_argument(
"--continue",
"-c",
dest="continue_from",
help="A snapshot to continue from. This can be 'latest' to continue"
+ "from the latest running snapshot, or an integer corresponding to "
+ "an epochal snapshot.",
)
arg_parser.add_argument(
"--batch_split",
dest="batch_split",
default=1,
help="This splits the batch into separate subbatches which are "
+ "processed separately, with gradients accumulated across all "
+ "subbatches. This allows for training with large effective batch "
+ "sizes in memory constrained environments.",
)
deep_ls.add_common_args(arg_parser)
args = arg_parser.parse_args()
deep_ls.configure_logging(args)
main_function(args.experiment_directory, args.continue_from, int(args.batch_split))
| 32.330203
| 182
| 0.628588
| 2,467
| 20,659
| 4.978111
| 0.19092
| 0.04796
| 0.015227
| 0.016122
| 0.261624
| 0.163342
| 0.137122
| 0.108786
| 0.084358
| 0.060419
| 0
| 0.00666
| 0.27315
| 20,659
| 638
| 183
| 32.380878
| 0.811201
| 0.070236
| 0
| 0.121896
| 0
| 0
| 0.111042
| 0.001251
| 0
| 0
| 0
| 0.001567
| 0
| 1
| 0.056433
| false
| 0.002257
| 0.040632
| 0.006772
| 0.137698
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efe8537711357e13e0aa907bd882c404ad86cc4e
| 988
|
py
|
Python
|
interface.py
|
robotafm/motor
|
1c0838db12514304b930aec976d7adcbc51b7c92
|
[
"MIT"
] | null | null | null |
interface.py
|
robotafm/motor
|
1c0838db12514304b930aec976d7adcbc51b7c92
|
[
"MIT"
] | null | null | null |
interface.py
|
robotafm/motor
|
1c0838db12514304b930aec976d7adcbc51b7c92
|
[
"MIT"
] | null | null | null |
# /robotafm/motor/interface.py
# Main web interface, contains basic
# information display
# imports:
import xml.dom.minidom
from flask import Flask, render_template
# constants:
LANG = "./lang/rus.xml"
# XML: load text strings from language file
dom = xml.dom.minidom.parse(LANG)
main_title = dom.getElementsByTagName("main_title")[0].childNodes[0].nodeValue
language = dom.getElementsByTagName("language")[0].childNodes[0].nodeValue
greeting = dom.getElementsByTagName("greeting")[0].childNodes[0].nodeValue
invitation = dom.getElementsByTagName("invitation")[0].childNodes[0].nodeValue
main_page_text = dom.getElementsByTagName("main_page_text")[0].childNodes[0].nodeValue
# Flask init:
app = Flask(__name__)
# Main site page:
@app.route('/')
def index():
return render_template(
'index.html',
main_title=main_title,
greeting=greeting,
invitation=invitation,
main_page_text = main_page_text
)
| 29.058824
| 87
| 0.709514
| 117
| 988
| 5.837607
| 0.384615
| 0.168375
| 0.087848
| 0.153734
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01218
| 0.169028
| 988
| 33
| 88
| 29.939394
| 0.819732
| 0.175101
| 0
| 0
| 0
| 0
| 0.09715
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.105263
| 0.052632
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efed594b93f7036fd9e0fbb23d74fff628cd47d4
| 922
|
py
|
Python
|
CountingValleys/ValleyCounter.py
|
monemonesi/TDD_Katas_Python
|
f21a4f3516b75d7618dcd044453e25be015b4251
|
[
"MIT"
] | null | null | null |
CountingValleys/ValleyCounter.py
|
monemonesi/TDD_Katas_Python
|
f21a4f3516b75d7618dcd044453e25be015b4251
|
[
"MIT"
] | null | null | null |
CountingValleys/ValleyCounter.py
|
monemonesi/TDD_Katas_Python
|
f21a4f3516b75d7618dcd044453e25be015b4251
|
[
"MIT"
] | null | null | null |
UP = "U"
DOWN = "D"
ALLOWED_PATH_I = [UP, DOWN]
def update_high_for_step(high: int, step: str) -> int:
"""Update the current high given a step"""
if step == UP:
high += 1
elif step == DOWN:
high -= 1
return high
def update_valley_count(valleys_count: int, high: int, previous_high: int) -> int:
if high == 0 and previous_high < 0:
valleys_count += 1
return valleys_count
def count_valley(steps: int, path: str) -> int:
"""Function which returns the number of valley encountered in a given path"""
if len(path) != steps:
raise Exception("Steps should match length of path")
valleys = 0
high = 0
previous_high = 0
for i in range(steps):
previous_high = high
high = update_high_for_step(high, path[i])
valleys = update_valley_count(valleys, high, previous_high)
return valleys
| 27.117647
| 83
| 0.611714
| 129
| 922
| 4.209302
| 0.333333
| 0.110497
| 0.047882
| 0.062615
| 0.077348
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012214
| 0.289588
| 922
| 33
| 84
| 27.939394
| 0.816794
| 0.117137
| 0
| 0
| 0
| 0
| 0.045455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efee15be03037d97374bea9c4059f5490403f268
| 682
|
py
|
Python
|
Tree/Leetcode 226. Invert Binary Tree.py
|
kaizhengny/LeetCode
|
67d64536ab80f4966699fe7460d165f2a98d6a82
|
[
"MIT"
] | 31
|
2020-06-23T00:40:04.000Z
|
2022-01-08T11:06:24.000Z
|
Tree/Leetcode 226. Invert Binary Tree.py
|
kaizhengny/LeetCode
|
67d64536ab80f4966699fe7460d165f2a98d6a82
|
[
"MIT"
] | null | null | null |
Tree/Leetcode 226. Invert Binary Tree.py
|
kaizhengny/LeetCode
|
67d64536ab80f4966699fe7460d165f2a98d6a82
|
[
"MIT"
] | 7
|
2020-04-30T08:46:03.000Z
|
2021-08-28T16:25:54.000Z
|
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if not root: return root
root.left, root.right = root.right, root.left
self.invertTree(root.left)
self.invertTree(root.right)
return root
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if not root:
return None
q = collections.deque()
q.append(root)
while q:
node = q.popleft()
node.left, node.right =node.right, node.left
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
return root
| 29.652174
| 56
| 0.541056
| 79
| 682
| 4.670886
| 0.253165
| 0.086721
| 0.086721
| 0.140921
| 0.482385
| 0.352304
| 0.352304
| 0.352304
| 0.352304
| 0.352304
| 0
| 0
| 0.365103
| 682
| 23
| 57
| 29.652174
| 0.852194
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
efee470e855ae2a217e0a35720dd990d8a0f3c8b
| 333
|
py
|
Python
|
Ex044.py
|
JeanPauloGarcia/Python-Exercicios
|
faff4670806c423680ee00a88d3c4c49b437e72e
|
[
"MIT"
] | null | null | null |
Ex044.py
|
JeanPauloGarcia/Python-Exercicios
|
faff4670806c423680ee00a88d3c4c49b437e72e
|
[
"MIT"
] | null | null | null |
Ex044.py
|
JeanPauloGarcia/Python-Exercicios
|
faff4670806c423680ee00a88d3c4c49b437e72e
|
[
"MIT"
] | null | null | null |
preço = float(input('Preço: '))
print('''Preencha a forma de pagamento com:
1 - p/ À VISTA
2 - p/ CARTÃO 1x
3 - p/ CARTÃO 2x
4 - p/ CARTÃO 3x ou mais
''')
pagto = str(input('Pagamento: ')).strip()
if pagto == '1':
preço = preço*0.9
elif pagto == '2':
preço = preço*0.95
elif pagto == '4':
preço = preço*1.2
print(preço)
| 19.588235
| 43
| 0.597598
| 57
| 333
| 3.491228
| 0.54386
| 0.105528
| 0.110553
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065134
| 0.216216
| 333
| 16
| 44
| 20.8125
| 0.697318
| 0
| 0
| 0
| 0
| 0
| 0.391566
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eff27556e4f9b47dbc9ed41d42898d35ce432f5c
| 1,264
|
py
|
Python
|
scorebee/main.py
|
mikeboers/ScoreBee
|
e8c3476b6401808a61b495b9c42e8cbe752906b4
|
[
"BSD-3-Clause"
] | null | null | null |
scorebee/main.py
|
mikeboers/ScoreBee
|
e8c3476b6401808a61b495b9c42e8cbe752906b4
|
[
"BSD-3-Clause"
] | null | null | null |
scorebee/main.py
|
mikeboers/ScoreBee
|
e8c3476b6401808a61b495b9c42e8cbe752906b4
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import sys
from .application import Application
from .document import Document, Track, Event
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
app = Application(sys.argv)
if '--debug' in sys.argv:
# # Load a document.
# # We absolutely MUST have the document constructed fully BEFORE
# # setting it here. There are side effects to setting it.
# # HACK: This is just a hack for now.
# # doc = Document()
doc = Document('/Users/mikeboers/Desktop/example.MOV')
# doc = Document('/Users/mikeboers/Desktop/C00000S00A20091231112932302.avi')
doc.add_track(Track(
name='A behaviour',
key='q',
group='top two',
# events=[
# Event(10, 15), Event(50, 65), Event(500, 600)
# ]
))
doc.add_track(Track(
name='Nothin here',
key='w',
group='top two',
# events=[]
))
doc.add_track(Track(
name='Better one',
key='e',
# events=[
# Event(25, 26), Event(70, 71), Event(700, 701)
# ]
))
app.doc = doc
app.run()
| 28.088889
| 84
| 0.511076
| 136
| 1,264
| 4.669118
| 0.551471
| 0.051969
| 0.051969
| 0.075591
| 0.195276
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065081
| 0.36788
| 1,264
| 45
| 85
| 28.088889
| 0.729662
| 0.313291
| 0
| 0.32
| 0
| 0
| 0.117925
| 0.042453
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.16
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eff28154f7d481027598302c0ee3f1c65be8e270
| 45,609
|
py
|
Python
|
ceci/stage.py
|
eacharles/ceci
|
e52e956c9e373c9a632ad0c312770f32ceab0c8b
|
[
"BSD-3-Clause"
] | null | null | null |
ceci/stage.py
|
eacharles/ceci
|
e52e956c9e373c9a632ad0c312770f32ceab0c8b
|
[
"BSD-3-Clause"
] | 1
|
2022-01-05T22:04:57.000Z
|
2022-01-05T22:04:57.000Z
|
ceci/stage.py
|
eacharles/ceci
|
e52e956c9e373c9a632ad0c312770f32ceab0c8b
|
[
"BSD-3-Clause"
] | null | null | null |
"""Module with core functionality for a single pipeline stage """
import pathlib
import os
import sys
from textwrap import dedent
import shutil
import cProfile
from abc import abstractmethod
from . import errors
from .monitor import MemoryMonitor
from .config import StageConfig, cast_to_streamable
SERIAL = "serial"
MPI_PARALLEL = "mpi"
DASK_PARALLEL = "dask"
IN_PROGRESS_PREFIX = "inprogress_"
class PipelineStage:
"""A PipelineStage implements a single calculation step within a wider pipeline.
Each different type of analysis stage is represented by a subclass of this
base class. The base class handles the connection between different pipeline
stages, and the execution of the stages within a workflow system (parsl),
potentially in parallel (MPI).
An instance of one of these classes represents an actual run of the stage,
with the required inputs, outputs, and configuration specified.
See documentation pages for more details.
"""
parallel = True
dask_parallel = False
config_options = {}
doc = ""
def __init__(self, args, comm=None):
"""Construct a pipeline stage, specifying the inputs, outputs, and configuration for it.
The constructor needs a dict or namespace. It should include:
- input paths (required)
- config path (required)
- output paths (optional but usual)
- additional configuration (required if not specified elsewhere)
Input and output paths should map tags to paths.
Tags are strings, and the first elements in each item in the subclass's
"inputs" and "output" attributes.
e.g. for a subclass with:
inputs = [('eggs', TextFile)]
outputs = [('spam', TextFile)]
the args could contain:
{'eggs': 'inputs/eggs.txt',
'spam': 'outputs/spam.txt' }
If spam is not specified it will default to "./spam.txt"
}
The config should map "config" to a path where a YAML config file
is located, e.g. {'config':'/path/to/config.yml'}
Any config variables that are specified in the class's config attribute
will be searched for first in args, then in the config file, and then
by looking at any default value they have been given.
If they have no default value (and just a type, like int, is listed), then
it's an error if they are not specified somewhere.
The execute method can instantiate and run the class together, with added bonuses
like profiling and debugging tools.
Parameters
----------
args: dict or namespace
Specification of input and output paths and any missing config options
comm: MPI communicator
(default is None) An MPI comm object to use in preference to COMM_WORLD
"""
self._configs = StageConfig(**self.config_options)
self._inputs = None
self._outputs = None
self._parallel = SERIAL
self._comm = None
self._size = 1
self._rank = 0
self.dask_client = None
self.load_configs(args)
if comm is not None:
self.setup_mpi(comm)
def get_aliases(self):
""" Returns the dictionary of aliases used to remap inputs and outputs
in the case that we want to have multiple instance of this class in the pipeline """
return self.config.get('aliases', None)
def get_aliased_tag(self, tag):
""" Returns the possibly remapped value for an input or output tag
Parameter
---------
tag : `str`
The input or output tag we are checking
Returns
-------
aliased_tag : `str`
The aliases version of the tag
"""
aliases = self.get_aliases()
if aliases is None:
return tag
return aliases.get(tag, tag)
@abstractmethod
def run(self): #pragma: no cover
"""Run the stage and return the execution status"""
raise NotImplementedError('run')
def load_configs(self, args):
"""
Load the configuraiton
Parameters
----------
args: dict or namespace
Specification of input and output paths and any missing config options
"""
if not isinstance(args, dict):
args = vars(args)
# First, we extract configuration information from a combination of
# command line arguments and optional 'config' file
self._inputs = dict(config=args["config"])
self.read_config(args)
# We first check for missing input files, that's a show stopper
missing_inputs = []
for x in self.input_tags():
val = args.get(x)
aliased_tag = self.get_aliased_tag(x)
if val is None:
val = args.get(aliased_tag)
if val is None: #pragma: no cover
missing_inputs.append(f"--{x}")
else:
self._inputs[aliased_tag] = val
if missing_inputs: #pragma: no cover
missing_inputs = " ".join(missing_inputs)
raise ValueError(
f"""
{self.instance_name} Missing these names on the command line:
Input names: {missing_inputs}"""
)
# We alwys assume the config arg exists, whether it is in input_tags or not
if 'config' not in args: #pragma: no cover
raise ValueError("The argument --config was missing on the command line.")
# We prefer to receive explicit filenames for the outputs but will
# tolerate missing output filenames and will default to tag name in
# current folder (this is for CWL compliance)
self._outputs = {}
for i, x in enumerate(self.output_tags()):
if args.get(x) is None:
ftype = self.outputs[i][1] #pylint: disable=no-member
self._outputs[self.get_aliased_tag(x)] = ftype.make_name(x)
else:
self._outputs[self.get_aliased_tag(x)] = args[x]
def setup_mpi(self, comm=None):
"""
Setup the MPI interface
Parameters
----------
comm: MPI communicator
(default is None) An MPI comm object to use in preference to COMM_WORLD
"""
mpi = self.config.get('mpi', False)
if mpi: #pragma: no cover
try:
# This isn't a ceci dependency, so give a sensible error message if not installed.
import mpi4py.MPI
except ImportError:
print("ERROR: Using --mpi option requires mpi4py to be installed.")
raise
# For scripting and testing we allow an MPI communicator or anything
# with the same API to be passed in directly, overriding the --mpi
# flag.
if comm is not None:
self._parallel = MPI_PARALLEL
self._comm = comm
self._size = self._comm.Get_size()
self._rank = self._comm.Get_rank()
elif mpi: #pragma: no cover
self._parallel = MPI_PARALLEL
self._comm = mpi4py.MPI.COMM_WORLD
self._size = self._comm.Get_size()
self._rank = self._comm.Get_rank()
else:
self._parallel = SERIAL
self._comm = None
self._size = 1
self._rank = 0
# If we are running under MPI but this subclass has enabled dask
# then we note that here. It stops various MPI-specific things happening
# later
if (self._parallel == MPI_PARALLEL) and self.dask_parallel:
self._parallel = DASK_PARALLEL
pipeline_stages = {}
incomplete_pipeline_stages = {}
def __init_subclass__(cls, **kwargs):
"""
Python 3.6+ provides a facility to automatically
call a method (this one) whenever a new subclass
is defined. In this case we use that feature to keep
track of all available pipeline stages, each of which is
defined by a class.
"""
super().__init_subclass__(**kwargs)
# This is a hacky way of finding the file
# where our stage was defined
filename = sys.modules[cls.__module__].__file__
stage_is_complete = (
hasattr(cls, 'inputs') and hasattr(cls, 'outputs') and not getattr(cls.run, '__isabstractmethod__', False)
)
# If there isn't an explicit name already then set it here.
# by default use the class name.
if not hasattr(cls, "name"): #pragma: no cover
cls.name = cls.__name__
if cls.name is None:
cls.name = cls.__name__
if stage_is_complete:
# Deal with duplicated class names
if cls.name in cls.pipeline_stages:
other = cls.pipeline_stages[cls.name][1]
raise errors.DuplicateStageName(
"You created two pipeline stages with the"
f"name {cls.name}.\nOne was in {filename}\nand the "
f"other in {other}\nYou can either change the class "
"name or explicitly put a variable 'name' in the top"
"level of the class."
)
# Check for "config" in the inputs list - this is implicit
for name, _ in cls.inputs:
if name == "config":
raise errors.ReservedNameError(
"An input called 'config' is implicit in each pipeline "
"stage and should not be added explicitly. Please update "
f"your pipeline stage called {cls.name} to remove/rename "
"the input called 'config'."
)
# Check if user has over-written the config variable.
# Quite a common error I make myself.
if not isinstance(cls.config, property):
raise errors.ReservedNameError(
"You have a class variable called 'config', which "
"is reserved in ceci for its own configuration. "
"You may have meant to specify config_options?"
)
# Find the absolute path to the class defining the file
path = pathlib.Path(filename).resolve()
# Register the class
if stage_is_complete:
cls.pipeline_stages[cls.name] = (cls, path)
else:
cls.incomplete_pipeline_stages[cls.__name__] = (cls, path)
#############################################
# Life cycle-related methods and properties.
#############################################
@classmethod
def get_stage(cls, name):
"""
Return the PipelineStage subclass with the given name.
This is used so that we do not need a new entry point __main__ function
for each new stage - instead we can just use a single one which can query
which class it should be using based on the name.
Returns
-------
cls: class
The corresponding subclass
"""
stage = cls.pipeline_stages.get(name)
# If not found, then check for incomplete stages
if stage is None:
if name in cls.incomplete_pipeline_stages:
raise errors.IncompleteStage(
f"The stage {name} is not completely written. "
"Stages must specify 'inputs', 'outputs' as class variables "
f"and a 'run' method.\n{name} might be unfinished, or it might "
"be intended as a base for other classes and not to be run."
)
raise errors.StageNotFound(f"Unknown stage '{name}'")
return stage[0]
@classmethod
def get_module(cls):
"""
Return the path to the python package containing the current sub-class
If we have a PipelineStage subclass defined in a module called "bar", in
a package called "foo" e.g.:
/path/to/foo/bar.py <-- contains subclass "Baz"
Then calling Baz.get_module() will return "foo.bar".
We use this later to construct command lines like "python -m foo Baz"
Returns
-------
module: str
The module containing this class.
"""
return cls.pipeline_stages[cls.name][0].__module__
@classmethod
def usage(cls): #pragma: no cover
"""
Print a usage message.
"""
stage_names = "\n- ".join(cls.pipeline_stages.keys())
try:
module = cls.get_module().split(".")[0]
except: #pylint: disable=bare-except
module = "<module_name>"
sys.stderr.write(
f"""
Usage: python -m {module} <stage_name> <stage_arguments>
If no stage_arguments are given then usage information
for the chosen stage will be given.
I currently know about these stages:
- {stage_names}
"""
)
@classmethod
def main(cls):
"""
Create an instance of this stage and execute it with
inputs and outputs taken from the command line
"""
try:
stage_name = sys.argv[1]
except IndexError: #pragma: no cover
cls.usage()
return 1
if stage_name in ["--help", "-h"] and len(sys.argv) == 2: #pragma: no cover
cls.usage()
return 1
stage = cls.get_stage(stage_name)
args = stage.parse_command_line()
stage.execute(args)
return 0
@classmethod
def parse_command_line(cls, cmd=None):
"""Set up and argument parser and parse the command line
Parameters
----------
cmd : str or None
The command line to part (if None this will use the system arguments)
Returns
-------
args : Namespace
The resulting Mapping of arguement to values
"""
import argparse
parser = argparse.ArgumentParser(description=f"Run pipeline stage {cls.name}")
parser.add_argument("stage_name")
for conf, def_val in cls.config_options.items():
opt_type = def_val if isinstance(def_val, type) else type(def_val)
if opt_type == bool:
parser.add_argument(f"--{conf}", action="store_const", const=True)
parser.add_argument(f"--no-{conf}", dest=conf, action="store_const", const=False)
elif opt_type == list:
out_type = def_val[0] if isinstance(def_val[0], type) else type(def_val[0])
if out_type is str: #pragma: no cover
parser.add_argument(
f"--{conf}", type=lambda string: string.split(",")
)
elif out_type is int: #pragma: no cover
parser.add_argument(
f"--{conf}",
type=lambda string: [int(i) for i in string.split(",")],
)
elif out_type is float:
parser.add_argument(
f"--{conf}",
type=lambda string: [float(i) for i in string.split(",")],
)
else: #pragma: no cover
raise NotImplementedError(
"Only handles str, int and float list arguments"
)
else: #pragma: no cover
parser.add_argument(f"--{conf}", type=opt_type)
for inp in cls.input_tags():
parser.add_argument(f"--{inp}")
for out in cls.output_tags():
parser.add_argument(f"--{out}")
parser.add_argument("--config")
if cls.parallel:
parser.add_argument(
"--mpi", action="store_true", help="Set up MPI parallelism"
)
parser.add_argument(
"--pdb", action="store_true", help="Run under the python debugger"
)
parser.add_argument(
"--cprofile",
action="store",
default="",
type=str,
help="Profile the stage using the python cProfile tool",
)
parser.add_argument(
"--memmon",
type=int,
default=0,
help="Report memory use. Argument gives interval in seconds between reports",
)
if cmd is None:
args = parser.parse_args()
else:
args = parser.parse_args(cmd)
return args
@classmethod
def execute(cls, args, comm=None):
"""
Create an instance of this stage and run it
with the specified inputs and outputs.
This is calld by the main method.
Parameters
----------
args: namespace
The argparse namespace for this subclass.
"""
import pdb
# Create the stage instance. Running under dask this only
# actually needs to happen for one process, but it's not a major
# overhead and lets us do a whole bunch of other setup above
stage = cls(args)
stage.setup_mpi(comm)
# This happens before dask is initialized
if stage.rank == 0:
print(f"Executing stage: {cls.name}")
if stage.is_dask():
is_client = stage.start_dask()
# worker and scheduler stages do not execute the
# run method under dask
if not is_client:
return
if args.cprofile: #pragma: no cover
profile = cProfile.Profile()
profile.enable()
if args.memmon: #pragma: no cover
monitor = MemoryMonitor.start_in_thread(interval=args.memmon)
try:
stage.run()
except Exception as error: #pragma: no cover
if args.pdb:
print(
"There was an exception - starting python debugger because you ran with --pdb"
)
print(error)
pdb.post_mortem()
else:
raise
finally:
if args.memmon: #pragma: no cover
monitor.stop()
if stage.is_dask():
stage.stop_dask()
# The default finalization renames any output files to their
# final location, but subclasses can override to do other things too
try:
stage.finalize()
except Exception as error: #pragma: no cover
if args.pdb:
print(
"There was an exception in the finalization - starting python debugger because you ran with --pdb"
)
print(error)
pdb.post_mortem()
else:
raise
if args.cprofile: #pragma: no cover
profile.disable()
profile.dump_stats(args.cprofile)
profile.print_stats("cumtime")
# Under dask the
# the root process has gone off to become the scheduler,
# and process 1 becomes the client which runs this code
# and gets to this point
if stage.rank == 0 or stage.is_dask():
print(f"Stage complete: {cls.name}")
def finalize(self):
"""Finalize the stage, moving all its outputs to their final locations."""
# Synchronize files so that everything is closed
if self.is_mpi(): #pragma: no cover
self.comm.Barrier()
# Move files to their final path
# Only the root process moves things, except under dask it is
# process 1, which is the only process that reaches this point
# (as noted above)
if (self.rank == 0) or self.is_dask():
for tag in self.output_tags():
# find the old and new names
temp_name = self.get_output(tag)
final_name = self.get_output(tag, final_name=True)
# it's not an error here if the path does not exist,
# because that will be handled later.
if pathlib.Path(temp_name).exists():
# replace directories, rather than nesting more results
if pathlib.Path(final_name).is_dir(): #pragma: no cover
shutil.rmtree(final_name)
shutil.move(temp_name, final_name)
else: #pragma: no cover
sys.stderr.write(
f"NOTE/WARNING: Expected output file {final_name} was not generated.\n"
)
#############################################
# Parallelism-related methods and properties.
#############################################
@property
def rank(self):
"""The rank of this process under MPI (0 if not running under MPI)"""
return self._rank
@property
def size(self):
"""The number or processes under MPI (1 if not running under MPI)"""
return self._size
@property
def comm(self):
"""The MPI communicator object (None if not running under MPI)"""
return self._comm
def is_parallel(self):
"""
Returns True if the code is being run in parallel.
Right now is_parallel() will return the same value as is_mpi(),
but that may change in future if we implement other forms of
parallelization.
"""
return self._parallel != SERIAL
def is_mpi(self):
"""
Returns True if the stage is being run under MPI.
"""
return self._parallel == MPI_PARALLEL
def is_dask(self):
"""
Returns True if the stage is being run in parallel with Dask.
"""
return self._parallel == DASK_PARALLEL
def start_dask(self):
"""
Prepare dask to run under MPI. After calling this method
only a single process, MPI rank 1 will continue to exeute code
"""
# using the programmatic dask configuration system
# does not seem to work. Presumably the loggers have already
# been created by the time we modify the config. Doing it with
# env vars seems to work. If the user has already set this then
# we use that value. Otherwise we only want error logs
key = "DASK_LOGGING__DISTRIBUTED"
os.environ[key] = os.environ.get(key, "error")
try:
import dask
import dask_mpi
import dask.distributed
except ImportError: #pragma: no cover
print(
"ERROR: Using --mpi option on stages that use dask requires "
"dask[distributed] and dask_mpi to be installed."
)
raise
if self.size < 3: #pragma: no cover
raise ValueError(
"Dask requires at least three processes. One becomes a scheduler "
"process, one is a client that runs the code, and more are required "
"as worker processes."
)
# This requires my fork until/unless they merge the PR, to allow
# us to pass in these two arguments. In vanilla dask-mpi sys.exit
# is called at the end of the event loop without returning to us.
# After this point only a single process, MPI rank 1,
# should continue to exeute code. The others enter an event
# loop and return with is_client=False, which we return here
# to tell the caller that they should not run everything.
is_client = dask_mpi.initialize(comm=self.comm, exit=False)
if is_client:
# Connect this local process to remote workers.
self.dask_client = dask.distributed.Client()
# I don't yet know how to see this dashboard link at nersc
print(f"Started dask. Diagnostics at {self.dask_client.dashboard_link}")
return is_client
@staticmethod
def stop_dask():
"""
End the dask event loop
"""
from dask_mpi import send_close_signal
send_close_signal()
def split_tasks_by_rank(self, tasks):
"""Iterate through a list of items, yielding ones this process is responsible for/
Tasks are allocated in a round-robin way.
Parameters
----------
tasks: iterable
Tasks to split up
"""
for i, task in enumerate(tasks):
if i % self.size == self.rank:
yield task
def data_ranges_by_rank(self, n_rows, chunk_rows, parallel=True):
"""Split a number of rows by process.
Given a total number of rows to read and a chunk size, yield
the ranges within them that this process should handle.
Parameters
----------
n_rows: int
Total number of rows to split up
chunk_rows: int
Size of each chunk to be read.
Parallel: bool
Whether to split data by rank or just give all procs all data.
Default=True
"""
n_chunks = n_rows // chunk_rows
if n_chunks * chunk_rows < n_rows: #pragma: no cover
n_chunks += 1
if parallel:
it = self.split_tasks_by_rank(range(n_chunks))
else:
it = range(n_chunks)
for i in it:
start = i * chunk_rows
end = min((i + 1) * chunk_rows, n_rows)
yield start, end
##################################################
# Input and output-related methods and properties.
##################################################
def get_input(self, tag):
"""Return the path of an input file with the given tag"""
return self._inputs[tag]
def get_output(self, tag, final_name=False):
"""Return the path of an output file with the given tag
If final_name is False then use a temporary name - file will
be moved to its final name at the end
"""
path = self._outputs[tag]
# If not the final version, add a tag at the start of the filename
if not final_name:
p = pathlib.Path(path)
p = p.parent / (IN_PROGRESS_PREFIX + p.name)
path = str(p)
return path
def open_input(self, tag, wrapper=False, **kwargs):
"""
Find and open an input file with the given tag, in read-only mode.
For general files this will simply return a standard
python file object.
For specialized file types like FITS or HDF5 it will return
a more specific object - see the types.py file for more info.
"""
path = self.get_input(tag)
input_class = self.get_input_type(tag)
obj = input_class(path, "r", **kwargs)
if wrapper: #pragma: no cover
return obj
return obj.file
def open_output(self, tag, wrapper=False, final_name=False, **kwargs): #pragma: no cover
"""
Find and open an output file with the given tag, in write mode.
If final_name is True then they will be opened using their final
target output name. Otherwise we will prepend "inprogress_" to their
file name. This means we know that if the final file exists then it
is completed.
If wrapper is True this will return an instance of the class
of the file as specified in the cls.outputs. Otherwise it will
return an open file object (standard python one or something more
specialized).
Parameters
----------
tag: str
Tag as listed in self.outputs
wrapper: bool
Default=False. Whether to return a wrapped file
final_name: bool
Default=False. Whether to save to
**kwargs:
Extra args are passed on to the file's class constructor.
"""
path = self.get_output(tag, final_name=final_name)
output_class = self.get_output_type(tag)
# HDF files can be opened for parallel writing
# under MPI. This checks if:
# - we have been told to open in parallel
# - we are actually running under MPI
# and adds the flags required if all these are true
run_parallel = kwargs.pop("parallel", False) and self.is_mpi()
if run_parallel:
kwargs["driver"] = "mpio"
kwargs["comm"] = self.comm
# XXX: This is also not a dependency, but it should be.
# Or even better would be to make it a dependency of descformats where it
# is actually used.
import h5py
if not h5py.get_config().mpi:
print(
dedent(
"""\
Your h5py installation is not MPI-enabled.
Options include:
1) Set nprocess to 1 for all stages
2) Upgrade h5py to use mpi. See instructions here:
http://docs.h5py.org/en/latest/build.html#custom-installation
Note: If using conda, the most straightforward way is to enable it is
conda install -c spectraldns h5py-parallel
"""
)
)
raise RuntimeError("h5py module is not MPI-enabled.")
# Return an opened object representing the file
obj = output_class(path, "w", **kwargs)
if wrapper:
return obj
return obj.file
@classmethod
def inputs_(cls):
"""
Return the dict of inputs
"""
return cls.inputs #pylint: disable=no-member
@classmethod
def outputs_(cls):
"""
Return the dict of inputs
"""
return cls.outputs #pylint: disable=no-member
@classmethod
def output_tags(cls):
"""
Return the list of output tags required by this stage
"""
return [tag for tag, _ in cls.outputs_()]
@classmethod
def input_tags(cls):
"""
Return the list of input tags required by this stage
"""
return [tag for tag, _ in cls.inputs_()]
def get_input_type(self, tag):
"""Return the file type class of an input file with the given tag."""
for t, dt in self.inputs_():
if t == tag:
return dt
raise ValueError(f"Tag {tag} is not a known input") #pragma: no cover
def get_output_type(self, tag):
"""Return the file type class of an output file with the given tag."""
for t, dt in self.outputs_():
if t == tag:
return dt
raise ValueError(f"Tag {tag} is not a known output") #pragma: no cover
##################################################
# Configuration-related methods and properties.
##################################################
@property
def instance_name(self):
"""Return the name associated to this particular instance of this stage"""
return self._configs.get('name', self.name)
@property
def config(self):
"""
Returns the configuration dictionary for this stage, aggregating command
line options and optional configuration file.
"""
return self._configs
def read_config(self, args):
"""
This function looks for the arguments of the pipeline stage using a
combination of default values, command line options and separate
configuration file.
The order for resolving config options is first looking for a default
value, then looking for a
In case a mandatory argument (argument with no default) is missing,
an exception is raised.
Note that we recognize arguments with no default as the ones where
self.config_options holds a type instead of a value.
"""
# Try to load configuration file if provided
import yaml
config_file = self.get_input("config")
# This is all the config information in the file, including
# things for other stages
if config_file is not None:
with open(config_file) as _config_file:
overall_config = yaml.safe_load(_config_file)
else:
overall_config = {}
# The user can define global options that are inherited by
# all the other sections if not already specified there.
input_config = overall_config.get("global", {})
# This is just the config info in the file for this stage.
# It may be incomplete - there may be things specified on the
# command line instead, or just using their default values
stage_config = overall_config.get(self.instance_name, {})
input_config.update(stage_config)
self._configs.set_config(input_config, args)
def get_config_dict(self, ignore=None, reduce_config=False):
"""Write the current configuration to a dict
Parameters
----------
ignore : dict or None
Global parameters not to write
reduce_config : bool
If true, reduce the configuration by parsing out the inputs, outputs and global params
Returns
-------
out_dict : dict
The configuration
"""
out_dict = {}
if reduce_config:
ignore_keys = self.input_tags() + self.output_tags() + ['config']
else:
ignore_keys = []
ignore = ignore or {}
for key, val in self.config.items():
if reduce_config:
if key in ignore:
if ignore[key] == val:
continue
if key in ignore_keys:
continue
out_dict[key] = cast_to_streamable(val)
return out_dict
def find_inputs(self, pipeline_files):
"""Find and retrun all the inputs associated to this stage in the FileManager
These are returned as a dictionary of tag : path pairs
"""
ret_dict = {}
for tag, _ in self.inputs_():
aliased_tag = self.get_aliased_tag(tag)
ret_dict[aliased_tag] = pipeline_files[aliased_tag]
return ret_dict
def find_outputs(self, outdir):
"""Find and retrun all the outputs associated to this stage
These are returned as a dictionary of tag : path pairs
"""
ret_dict = {}
for tag, ftype in self.outputs_():
aliased_tag = self.get_aliased_tag(tag)
ret_dict[aliased_tag] = f"{outdir}/{ftype.make_name(aliased_tag)}"
return ret_dict
def print_io(self, stream=sys.stdout):
"""Print out the tags, paths and types for all the inputs and outputs of this stage"""
stream.write("Inputs--------\n")
for tag, ftype in self.inputs_():
aliased_tag = self.get_aliased_tag(tag)
stream.write(f"{tag:20} : {aliased_tag:20} :{str(ftype):20} : {self._inputs[tag]}\n")
stream.write("Outputs--------\n")
for tag, ftype in self.outputs_():
aliased_tag = self.get_aliased_tag(tag)
stream.write(f"{tag:20} : {aliased_tag:20} :{str(ftype):20} : {self._outputs[aliased_tag]}\n")
def should_skip(self, run_config):
"""Return true if we should skip a stage b/c it's outputs already exist and we are in resume mode"""
outputs = self.find_outputs(run_config["output_dir"]).values()
already_run_stage = all(os.path.exists(output) for output in outputs)
return already_run_stage and run_config["resume"]
def already_finished(self):
"""Print a warning that a stage is being skipped"""
print(f"Skipping stage {self.instance_name} because its outputs exist already")
def iterate_fits(self, tag, hdunum, cols, chunk_rows, parallel=True): #pragma: no cover
"""
Loop through chunks of the input data from a FITS file with the given tag
TODO: add ceci tests of this functions
Parameters
----------
tag: str
The tag from the inputs list to use
hdunum: int
The extension number to read
cols: list
The columns to read
chunk_rows: int
Number of columns to read and return at once
parallel: bool
Whether to split up data among processes (parallel=True) or give
all processes all data (parallel=False). Default = True.
Returns
-------
it: iterator
Iterator yielding (int, int, array) tuples of (start, end, data)
data is a structured array.
"""
fits = self.open_input(tag)
ext = fits[hdunum]
n = ext.get_nrows()
for start, end in self.data_ranges_by_rank(n, chunk_rows, parallel=parallel):
data = ext.read_columns(cols, rows=range(start, end))
yield start, end, data
def iterate_hdf(
self, tag, group_name, cols, chunk_rows, parallel=True, longest=False
):
"""
Loop through chunks of the input data from an HDF5 file with the given tag.
All the selected columns must have the same length.
Parameters
----------
tag: str
The tag from the inputs list to use
group: str
The group within the HDF5 file to use, looked up as
file[group]
cols: list
The columns to read
chunk_rows: int
Number of columns to read and return at once
parallel: bool
Whether to split up data among processes (parallel=True) or give
all processes all data (parallel=False). Default = True.
longest: bool
Whether to allow mixed length arrays and keep going until the longest
array is completed, returning empty arrays for shorter ones
Returns
-------
it: iterator
Iterator yielding (int, int, dict) tuples of (start, end, data)
"""
import numpy as np
hdf = self.open_input(tag)
group = hdf[group_name]
# Check all the columns are the same length
N = [len(group[col]) for col in cols]
n = max(N)
if not longest:
if not np.equal(N, n).all():
raise ValueError(
f"Different columns among {cols} in file {tag} group {group_name}"
"are different sizes - if this is acceptable set longest=True"
)
# Iterate through the data providing chunks
for start, end in self.data_ranges_by_rank(n, chunk_rows, parallel=parallel):
data = {col: group[col][start:end] for col in cols}
yield start, end, data
################################
# Pipeline-related methods
################################
@classmethod
def generate_command(cls, inputs, config, outputs, aliases=None):
"""
Generate a command line that will run the stage
"""
module = cls.get_module()
module = module.split(".")[0]
flags = [cls.name]
aliases = aliases or {}
for tag, _ in cls.inputs_():
aliased_tag = aliases.get(tag, tag)
try:
fpath = inputs[aliased_tag]
except KeyError as msg: #pragma: no cover
raise ValueError(f"Missing input location {aliased_tag} {str(inputs)}") from msg
flags.append(f"--{tag}={fpath}")
flags.append(f"--config={config}")
for tag, _ in cls.outputs_():
aliased_tag = aliases.get(tag, tag)
try:
fpath = outputs[aliased_tag]
except KeyError as msg: #pragma: no cover
raise ValueError(f"Missing output location {aliased_tag} {str(outputs)}") from msg
flags.append(f"--{tag}={fpath}")
flags = " ".join(flags)
# We just return this, instead of wrapping it in a
# parsl job
cmd = f"python3 -m {module} {flags}"
return cmd
@classmethod
def generate_cwl(cls, log_dir=None):
"""
Produces a CWL App object which can then be exported to yaml
"""
import cwlgen
module = cls.get_module()
module = module.split(".")[0]
# Basic definition of the tool
cwl_tool = cwlgen.CommandLineTool(
tool_id=cls.name,
label=cls.name,
base_command="python3",
cwl_version="v1.0",
doc=cls.__doc__,
)
if log_dir is not None:
cwl_tool.stdout = f"{cls.name}.out"
cwl_tool.stderr = f"{cls.name}.err"
# Adds the first input binding with the name of the module and pipeline stage
input_arg = cwlgen.CommandLineBinding(position=-1, value_from=f"-m{module}")
cwl_tool.arguments.append(input_arg)
input_arg = cwlgen.CommandLineBinding(position=0, value_from=f"{cls.name}")
cwl_tool.arguments.append(input_arg)
type_dict = {int: "int", float: "float", str: "string", bool: "boolean"}
# Adds the parameters of the tool
for opt, def_val in cls.config_options.items():
# Handles special case of lists:
if isinstance(def_val, list):
v = def_val[0]
param_type = {
"type": "array",
"items": type_dict[v] if isinstance(v, type) else type_dict[type(v)],
}
default = def_val if not isinstance(v, type) else None
input_binding = cwlgen.CommandLineBinding(
prefix=f"--{opt}=", item_separator=",", separate=False
)
else:
param_type = (
type_dict[def_val]
if isinstance(def_val, type)
else type_dict[type(def_val)]
)
default = def_val if not isinstance(def_val, type) else None
if param_type == "boolean":
input_binding = cwlgen.CommandLineBinding(prefix=f"--{opt}")
else:
input_binding = cwlgen.CommandLineBinding(
prefix=f"--{opt}=", separate=False
)
input_param = cwlgen.CommandInputParameter(
opt,
label=opt,
param_type=param_type,
input_binding=input_binding,
default=default,
doc="Some documentation about this parameter",
)
# We are bypassing the cwlgen builtin type check for the special case
# of arrays until that gets added to the standard
if isinstance(def_val, list):
input_param.type = param_type
cwl_tool.inputs.append(input_param)
# Add the inputs of the tool
for i, inp in enumerate(cls.input_tags()):
input_binding = cwlgen.CommandLineBinding(prefix=f"--{inp}")
input_param = cwlgen.CommandInputParameter(
inp,
label=inp,
param_type="File",
param_format=cls.inputs[i][1].format, #pylint: disable=no-member
input_binding=input_binding,
doc="Some documentation about the input",
)
cwl_tool.inputs.append(input_param)
# Adds the overall configuration file
input_binding = cwlgen.CommandLineBinding(prefix="--config")
input_param = cwlgen.CommandInputParameter(
"config",
label="config",
param_type="File",
param_format="http://edamontology.org/format_3750",
input_binding=input_binding,
doc="Configuration file",
)
cwl_tool.inputs.append(input_param)
# Add the definition of the outputs
for i, out in enumerate(cls.output_tags()):
output_name = cls.outputs[i][1].make_name(out) #pylint: disable=no-member
output_binding = cwlgen.CommandOutputBinding(glob=output_name)
output = cwlgen.CommandOutputParameter(
out,
label=out,
param_type="File",
output_binding=output_binding,
param_format=cls.outputs[i][1].format, #pylint: disable=no-member
doc="Some results produced by the pipeline element",
)
cwl_tool.outputs.append(output)
if log_dir is not None:
output = cwlgen.CommandOutputParameter(
f"{cls.name}@stdout",
label="stdout",
param_type="stdout",
doc="Pipeline elements standard output",
)
cwl_tool.outputs.append(output)
error = cwlgen.CommandOutputParameter(
f"{cls.name}@stderr",
label="stderr",
param_type="stderr",
doc="Pipeline elements standard output",
)
cwl_tool.outputs.append(error)
# Potentially add more metadata
# This requires a schema however...
# metadata = {'name': cls.name,
# 'about': 'Some additional info',
# 'publication': [{'id': 'one_doi'}, {'id': 'another_doi'}],
# 'license': ['MIT']}
# cwl_tool.metadata = cwlgen.Metadata(**metadata)
return cwl_tool
| 35.912598
| 118
| 0.568967
| 5,561
| 45,609
| 4.566445
| 0.146736
| 0.010396
| 0.016894
| 0.005671
| 0.240175
| 0.18847
| 0.15862
| 0.123691
| 0.103371
| 0.092069
| 0
| 0.002534
| 0.342476
| 45,609
| 1,269
| 119
| 35.940898
| 0.844243
| 0.341161
| 0
| 0.269592
| 0
| 0.003135
| 0.134274
| 0.005551
| 0
| 0
| 0
| 0.000788
| 0
| 1
| 0.073668
| false
| 0
| 0.03605
| 0
| 0.178683
| 0.020376
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eff99e10986bd9b8e0f53017db77d82913562ddf
| 1,102
|
py
|
Python
|
topology.py
|
Patatone/ryu-static-load-balancing
|
7f3508ff8b135736150ad5c38b544d6e6ba90509
|
[
"Apache-2.0"
] | null | null | null |
topology.py
|
Patatone/ryu-static-load-balancing
|
7f3508ff8b135736150ad5c38b544d6e6ba90509
|
[
"Apache-2.0"
] | null | null | null |
topology.py
|
Patatone/ryu-static-load-balancing
|
7f3508ff8b135736150ad5c38b544d6e6ba90509
|
[
"Apache-2.0"
] | null | null | null |
from mininet.topo import Topo
from mininet.link import TCLink
class Topology(Topo):
def build(self):
# Hosts and switches
host1 = self.addHost('H1')
host2 = self.addHost('H2')
host3 = self.addHost('H3')
host4 = self.addHost('H4')
host5 = self.addHost('H5')
server1 = self.addHost('SRV1', ip='10.0.1.1/8', mac="00:00:00:00:01:01")
server2 = self.addHost('SRV2', ip='10.0.1.2/8', mac="00:00:00:00:01:02")
switch1 = self.addSwitch('SW1')
# Links
self.addLink(server1, switch1, port2=1, cls=TCLink, bw=1000, delay='1ms')
self.addLink(server2, switch1, port2=2, cls=TCLink, bw=1000, delay='1ms')
self.addLink(host1, switch1, cls=TCLink, bw=1000, delay='5ms')
self.addLink(host2, switch1, cls=TCLink, bw=1000, delay='5ms')
self.addLink(host3, switch1, cls=TCLink, bw=1000, delay='5ms')
self.addLink(host4, switch1, cls=TCLink, bw=1000, delay='5ms')
self.addLink(host5, switch1, cls=TCLink, bw=1000, delay='5ms')
topos = { 'topology': ( lambda: Topology() ) }
| 39.357143
| 81
| 0.607985
| 156
| 1,102
| 4.294872
| 0.339744
| 0.114925
| 0.114925
| 0.156716
| 0.432836
| 0.432836
| 0.432836
| 0.346269
| 0.244776
| 0
| 0
| 0.121951
| 0.218693
| 1,102
| 27
| 82
| 40.814815
| 0.656214
| 0.021779
| 0
| 0
| 0
| 0
| 0.096744
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eff9cec3835ce08f6cdd64396a53993ba845ce23
| 5,155
|
py
|
Python
|
JFJB.py
|
stevevai/JFJB-crawler
|
182c8930e5e979ea9176452764e9494a17574b1f
|
[
"Apache-2.0"
] | 1
|
2019-04-14T16:28:28.000Z
|
2019-04-14T16:28:28.000Z
|
JFJB.py
|
stevevai/JFJB-crawler
|
182c8930e5e979ea9176452764e9494a17574b1f
|
[
"Apache-2.0"
] | null | null | null |
JFJB.py
|
stevevai/JFJB-crawler
|
182c8930e5e979ea9176452764e9494a17574b1f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 12 23:00:28 2018
@author: wangshuai
"""
import urllib
import urllib.request as urllib2
import http.cookiejar as cookielib
import io
import re
import gzip
from selenium import webdriver
import datetime
def get_Time():
begin = datetime.date(2016,1,1)
end = datetime.date(2018,4,23)
time_list = []
for i in range((end - begin).days+1):
day = begin + datetime.timedelta(days=i)
time_list.append(day.strftime("%Y-%m/%d"))
return time_list
class Config:
def __init__(self):
self.config = {}
self.config["headers"] = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36"
self.config["outputPath"] = "./"
self.config["keywords"] = ["习近平","习主席","中央军委主席","中共中央总书记","国家主席"]
self.config["base_url"] = "http://www.81.cn/jfjbmap/content/"
def get(self, key, parent=None):
if key and key in self.config.keys():
return self.config[key]
def get_Html(url, js = False, time = 0):
config = Config()
if js:
try:
driver = webdriver.PhantomJS()
driver.get(url)
except Exception as err:
print (err)
print ("=== 网络不稳定,再次连接 ...")
if time==0:
return -1
time -= 1
return get_Html(url, js=True, time=time)
html = driver.page_source
driver.close()
return html
else:
try:
cj = cookielib.CookieJar()
proxy = urllib2.ProxyHandler({'https': '127.0.0.1:1080'})
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [("User-agent", config.get("headers"))]
urllib2.install_opener(opener)
req=urllib2.Request(url)
con=urllib2.urlopen(req)
html=con.read()
if con.getheader('Content-Encoding') == "gzip":
buf = io.BytesIO(html)
gf = gzip.GzipFile(fileobj=buf)
html = gf.read()
html = html.decode('utf-8')
except Exception as err:
print (err)
print ("=== 网络不稳定,再次连接 ...")
if time==0:
return -1
time -= 1
return get_Html(url, js=False, time=time)
return html
def save(info, handler):
for i in range(len(info["time"])):
for ss in ["time","title"]:
txt = info[ss][i].strip(" ")
if ss=="time":
txt+="->"
handler.write(txt)
handler.write("\r\n")
class GetArticle:
def __init__(self, config, handler = None):
self.config = config
self.url = self.config.get("base_url")
self.handler = handler
self.article={}
self.article["url"] = []
self.article["title"] = []
self.article["detail"] = []
self.article["time"] = []
def index_detail(self):
pattern_index = re.compile('<li><a href="(.*?)">(.*?)</a></li>')
pattern_detail = re.compile('<P>(.*?)</P>')
time_list = get_Time()
# ifile = open("detail_info.txt","w",encoding='utf-8')
for i in range(len(time_list)):
url_loop = self.url+time_list[i]+"/node_2.htm"
try:
index = pattern_index.findall(get_Html(url_loop,js=False,time=3))
url = urllib.parse.urljoin(url_loop,index[0][0])
title = index[0][1]
# detail_list = pattern_detail.findall(get_Html(url,js=False,time=3))
# detail = ""
# for j in range(len(detail_list)):
# detail += detail_list[j]
key_flag = 0
for key in self.config.get("keywords"):
if key in title:
key_flag = 1
if key_flag:
self.article["time"].append(time_list[i])
self.article["title"].append(title)
self.article["url"].append(url)
# self.article["detail"].append(detail)
# ifile.write(time_list[i]+": "+title+"\r\n"+url+"\r\n"+detail+"\r\n")
if i%30 == 0:
print(str(i)+"->"+time_list[i]+": "+title)
print(url)
else:
continue
except Exception as err:
print(err)
print("...网址: "+url_loop+" 获取|解析 错误...")
continue
# ifile.close()
save(self.article, self.handler)
if __name__ == '__main__':
config = Config()
ifile = open(config.get("outputPath")+"rough_info.txt","w",encoding='utf-8')
getArticle = GetArticle(config, handler = ifile)
getArticle.index_detail()
ifile.close()
| 31.625767
| 156
| 0.49098
| 582
| 5,155
| 4.249141
| 0.314433
| 0.04448
| 0.020218
| 0.01941
| 0.125354
| 0.114032
| 0.077234
| 0.06389
| 0.06389
| 0.06389
| 0
| 0.029132
| 0.36741
| 5,155
| 162
| 157
| 31.820988
| 0.729224
| 0.098157
| 0
| 0.213675
| 0
| 0.008547
| 0.109001
| 0.005828
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059829
| false
| 0
| 0.068376
| 0
| 0.213675
| 0.068376
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
effded4514a6e107993718820a8e681baef231bd
| 4,743
|
py
|
Python
|
spinup/examples/pg_math/1_simple_pg.py
|
MengTianjian/spinningup-pytorch
|
6b9b87ed7a8140a52f3c86cc88f61428a9fd1176
|
[
"MIT"
] | 1
|
2019-04-23T04:32:35.000Z
|
2019-04-23T04:32:35.000Z
|
spinup/examples/pg_math/1_simple_pg.py
|
MengTianjian/spinningup-pytorch
|
6b9b87ed7a8140a52f3c86cc88f61428a9fd1176
|
[
"MIT"
] | null | null | null |
spinup/examples/pg_math/1_simple_pg.py
|
MengTianjian/spinningup-pytorch
|
6b9b87ed7a8140a52f3c86cc88f61428a9fd1176
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
import numpy as np
import gym
from gym.spaces import Discrete, Box
class MLP(nn.Module):
def __init__(self, obs_dim, sizes, activation=nn.Tanh, output_activation=None):
super(MLP, self).__init__()
sizes = [obs_dim] + sizes
layers = nn.ModuleList()
for i in range(len(sizes)-2):
layers.append(nn.Linear(sizes[i], sizes[i+1]))
if activation is not None:
layers.append(activation())
layers.append(nn.Linear(sizes[-2], sizes[-1]))
if output_activation is not None:
layers.append(output_activation())
self.mlp = nn.Sequential(*layers)
def forward(self, x):
out = self.mlp(x)
return out
def train(env_name='CartPole-v0', hidden_sizes=[32], lr=1e-2,
epochs=50, batch_size=5000, render=False):
# make environment, check spaces, get obs / act dims
env = gym.make(env_name)
assert isinstance(env.observation_space, Box), \
"This example only works for envs with continuous state spaces."
assert isinstance(env.action_space, Discrete), \
"This example only works for envs with discrete action spaces."
obs_dim = env.observation_space.shape[0]
n_acts = env.action_space.n
# make core of policy network
policy_network = MLP(obs_dim, sizes=hidden_sizes+[n_acts])
# make train optimizer
optimizer = torch.optim.Adam(policy_network.parameters(), lr=lr)
# for training policy
def train_one_epoch():
# make some empty lists for logging.
batch_obs = [] # for observations
batch_log_probs = [] # for log probabilities
batch_acts = [] # for actions
batch_weights = [] # for R(tau) weighting in policy gradient
batch_rets = [] # for measuring episode returns
batch_lens = [] # for measuring episode lengths
# reset episode-specific variables
obs = env.reset() # first obs comes from starting distribution
done = False # signal from environment that episode is over
ep_rews = [] # list for rewards accrued throughout ep
# render first episode of each epoch
finished_rendering_this_epoch = False
# collect experience by acting in the environment with current policy
while True:
# rendering
if (not finished_rendering_this_epoch) and render:
env.render()
# save obs
batch_obs.append(obs.copy())
# act in the environment
logits = policy_network(torch.tensor(obs).view(1,-1).float())
m = Categorical(logits=logits)
act = m.sample()
batch_log_probs.append(m.log_prob(act))
obs, rew, done, _ = env.step(act.item())
# save action, reward
batch_acts.append(act)
ep_rews.append(rew)
if done:
# if episode is over, record info about episode
ep_ret, ep_len = sum(ep_rews), len(ep_rews)
batch_rets.append(ep_ret)
batch_lens.append(ep_len)
# the weight for each logprob(a|s) is R(tau)
batch_weights += [ep_ret] * ep_len
# reset episode-specific variables
obs, done, ep_rews = env.reset(), False, []
# won't render again this epoch
finished_rendering_this_epoch = True
# end experience loop if we have enough of it
if len(batch_obs) > batch_size:
break
# take a single policy gradient update step
optimizer.zero_grad()
batch_loss = torch.cat(batch_log_probs).mul(torch.tensor(batch_weights))
loss = -batch_loss.mean()
loss.backward()
optimizer.step()
return loss.detach(), batch_rets, batch_lens
# training loop
for i in range(epochs):
batch_loss, batch_rets, batch_lens = train_one_epoch()
print('epoch: %3d \t loss: %.3f \t return: %.3f \t ep_len: %.3f'%
(i, batch_loss, np.mean(batch_rets), np.mean(batch_lens)))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', '--env', type=str, default='CartPole-v0')
parser.add_argument('--render', action='store_true')
parser.add_argument('--lr', type=float, default=1e-2)
args = parser.parse_args()
print('\nUsing simplest formulation of policy gradient.\n')
train(env_name=args.env_name, render=args.render, lr=args.lr)
| 37.346457
| 83
| 0.609741
| 604
| 4,743
| 4.619205
| 0.344371
| 0.012545
| 0.011828
| 0.027957
| 0.107527
| 0.044444
| 0.022222
| 0
| 0
| 0
| 0
| 0.007452
| 0.292642
| 4,743
| 126
| 84
| 37.642857
| 0.824143
| 0.185958
| 0
| 0
| 0
| 0.011905
| 0.077244
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.178571
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
56027f5cae2f8100bbcabdb3f59b412acf2181e4
| 6,402
|
py
|
Python
|
client/python/thegame/entity.py
|
afq984/thegame
|
3769fffa281b7d5e8d1336d57e73c8e8d4d2289a
|
[
"MIT"
] | 3
|
2017-08-18T00:32:54.000Z
|
2017-11-18T02:25:51.000Z
|
client/python/thegame/entity.py
|
afq984/thegame
|
3769fffa281b7d5e8d1336d57e73c8e8d4d2289a
|
[
"MIT"
] | 3
|
2017-08-15T09:59:25.000Z
|
2018-08-22T17:28:13.000Z
|
client/python/thegame/entity.py
|
afq984/thegame
|
3769fffa281b7d5e8d1336d57e73c8e8d4d2289a
|
[
"MIT"
] | 1
|
2018-08-07T12:38:48.000Z
|
2018-08-07T12:38:48.000Z
|
import collections
from thegame.abilities import Ability
Vector = collections.namedtuple('Vector', ('x', 'y'))
Vector.__doc__ = '''
A 2D vector.
Used to represent a point and velocity in thegame
'''
class _EntityAttribute:
def __init__(self, doc=None):
self.__doc__ = doc
def __set_name__(self, klass, name):
self.name = name
def __get__(self, instance, klass=None):
if instance is None:
return self
return getattr(instance.data.entity, self.name)
def __set__(self, obj, value):
raise AttributeError(f'read-only attribute {self.name!r}')
class _DataAttribute:
def __init__(self, doc=None):
self.__doc__ = doc
def __set_name__(self, klass, name):
self.name = name
def __get__(self, instance, klass=None):
if instance is None:
return self
return getattr(instance.data, self.name)
def __set__(self, obj, value):
raise AttributeError(f'read-only attribute {self.name!r}')
class Entity:
def __init__(self, data):
self.data = data
def __repr__(self):
return (
f'<{self.__class__.__name__}#{self.id} '
f'BD={self.body_damage} '
f'HP={self.health}/{self.max_health} '
f'@({self.position.x:.0f},{self.position.y:.0f})>'
)
id = _EntityAttribute('The id of the entity')
@property
def position(self):
'''
The position of the entity in a 2-tuple (x, y).
'''
p = self.data.entity.position
return Vector(p.x, p.y)
@property
def velocity(self):
'''
The velocity of the entity in a 2-tuple (x, y).
'''
v = self.data.entity.velocity
return Vector(v.x, v.y)
radius = _EntityAttribute('The radius of the entity')
health = _EntityAttribute(
'''
The health of the entity in a non-negative integer.
When a entity's health is less than or equal to zero it dies.
And the one dealing the killing blow is rewarded with
``rewarding_experience``.
'''
)
body_damage = _EntityAttribute(
'''
The body damage of the entity.
When two entities collide, they reduce each other's health
with their body damage.
'''
)
rewarding_experience = _EntityAttribute(
'''
How much experience you will get if you kill this entity.
'''
)
max_health = _EntityAttribute(
'''
The maximum health of this entity.
'''
)
class Polygon(Entity):
'''
The netural polygons.
'''
@property
def edges(self):
'''
How many edges does the polygon have
'''
return self.data.edges
class Bullet(Entity):
'''
The bullet. Shot from a Hero.
'''
@property
def owner_id(self):
'''
The id of the hero owning the bullet
'''
return self.data.owner
HeroAbility = collections.namedtuple(
'HeroAbility',
['level', 'value']
)
HeroAbilityList = collections.namedtuple(
'HeroAbilityList',
[ab.as_camel for ab in Ability]
)
class _HeroAbilityShortcut:
def __init__(self, ability):
self.ability = ability
self.__doc__ = \
f'shortcut to ``hero.abilities.{ability.as_camel}.value``'
def __get__(self, instance, klass=None):
if instance is None:
return self
return instance.abilities[self.ability].value
def __set__(self, obj, value):
raise AttributeError(f'read-only attribute {self.name!r}')
class _HeroAbilityLevelShortcut:
def __init__(self, ability):
self.ability = ability
self.__doc__ = \
f'shortcut to ``hero.abilities.{ability.as_camel}.level``'
def __get__(self, instance, klass=None):
if instance is None:
return self
return instance.abilities[self.ability].level
def __set__(self, obj, value):
raise AttributeError(f'read-only attribute {self.name!r}')
class _HeroMeta(type):
@classmethod
def __prepare__(mcs, name, bases, **kwds):
return {
**{
ab.as_camel: _HeroAbilityShortcut(ab)
for ab in Ability
},
**{
ab.as_camel + '_level': _HeroAbilityLevelShortcut(ab)
for ab in Ability
}
}
class Hero(Entity, metaclass=_HeroMeta):
'''
A Hero is a player in thegame.
'''
def __init__(self, data):
super().__init__(data)
# we're doing this so it will not be modified accidently
# maybe not a good way, though.
self.__dict__['abilities'] = HeroAbilityList(
*[HeroAbility(*x) for x in zip(
self.data.ability_levels, self.data.ability_values)]
)
@property
def abilities(self):
'''
returns a tuple of abilities.
Example::
hero.abilities[MaxHealth].value # get the hero's max health
hero.abilities.max_health.value # the same thing
hero.abilities[MaxHealth].level # get the ability level
hero.abilities.max_health.level # the same thing again
'''
return self.__dict__['abilities']
orientation = _DataAttribute(
'''
The orientation of the hero; the direction the barrel is facing at,
in radians.
'''
)
level = _DataAttribute('The level of the hero')
score = _DataAttribute('The score of the hero')
experience = _DataAttribute('The experience the hero has')
experience_to_level_up = _DataAttribute(
'The experience required for the hero to level up')
skill_points = _DataAttribute(
'Number of skill points available to level up abilities'
)
cooldown = _DataAttribute(
'''
How many ticks until a bullet is ready.
Increase the *reload* ability to reduce the cooldown.
``shoot`` and ``shoot_at`` can still be called when on cooldown, but
nothing will happen instead.
'''
)
health_regen_cooldown = _DataAttribute(
'''
How many ticks until the hero can start to regenerate health
'''
)
name = _DataAttribute(
'''
The name of the hero. Not guranteed to be unique
'''
)
| 25.710843
| 76
| 0.592002
| 745
| 6,402
| 4.871141
| 0.252349
| 0.015156
| 0.018187
| 0.01984
| 0.303114
| 0.283825
| 0.262882
| 0.262882
| 0.262882
| 0.250758
| 0
| 0.00113
| 0.308654
| 6,402
| 248
| 77
| 25.814516
| 0.818798
| 0.097001
| 0
| 0.335821
| 0
| 0
| 0.16112
| 0.049372
| 0
| 0
| 0
| 0
| 0
| 1
| 0.171642
| false
| 0
| 0.014925
| 0.014925
| 0.477612
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bc69e662f7af10d0c2438ee8ea0f1bb00d372e9
| 3,456
|
py
|
Python
|
services/web/project/__init__.py
|
shekharRavi/croationa_topic_api
|
a68bc69a69c5a6898b74ee0f3adf83b23d29b40b
|
[
"MIT"
] | null | null | null |
services/web/project/__init__.py
|
shekharRavi/croationa_topic_api
|
a68bc69a69c5a6898b74ee0f3adf83b23d29b40b
|
[
"MIT"
] | null | null | null |
services/web/project/__init__.py
|
shekharRavi/croationa_topic_api
|
a68bc69a69c5a6898b74ee0f3adf83b23d29b40b
|
[
"MIT"
] | null | null | null |
import os
import json
# import wget
from flask import (
Flask,
jsonify,
send_from_directory,
request,
redirect,
url_for
)
from flask_sqlalchemy import SQLAlchemy
import werkzeug
werkzeug.cached_property = werkzeug.utils.cached_property
from werkzeug.utils import secure_filename
from werkzeug.middleware.proxy_fix import ProxyFix
from flask_restx import Api, Resource, fields, abort, reqparse
from celery import Celery
import celery.states as states
from . import api_functions
from . import topic_model_classifier
# global variables
CELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL')
CELERY_RESULT_BACKEND = os.environ.get('CELERY_RESULT_BACKEND')
celery = Celery('tasks', broker=CELERY_BROKER_URL, backend=CELERY_RESULT_BACKEND)
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config.from_object("project.config.Config")
db = SQLAlchemy(app)
api = Api(app, version='1.0',
title='UGC API services',
description='REST APIs for processing user-generated content')
ns = api.namespace('comments_api', description='REST services API for news comments')
# input and output definitions
topic_model_single_input = api.model('TopicModelSingleInput', {
'text': fields.String(required=True, description='input text for topic')
})
topic_model_single_output = api.model('TopicModelSingleOutput', {
'suggested_label': fields.List(fields.String(), required=True, description='suggested label for topics'),
'description': fields.List(fields.String(), required=True, description='description of suggested label'),
'topic_words': fields.List(fields.String(), required=True, description='topic words')
})
topic_model_list_input = api.model('TopicModelListInput', {
'texts': fields.List(fields.String, required=True, description='input list of texts for topic')
})
topic_model_list_output = api.model('TopicModelListOutput', {
'suggested_label': fields.List(fields.String(), required=True, description='suggested label for topics'),
'description': fields.List(fields.String(), required=True, description='description of suggested label'),
'topic_words': fields.List(fields.String(), required=True, description='topic words')
})
@ns.route('/topic_model/')
class TopicModelClassifier(Resource):
@ns.doc('predict topic from single text')
@ns.expect(topic_model_single_input, validate=True)
@ns.marshal_with(topic_model_single_output)
def post(self):
topics = topic_model_classifier.predict([api.payload['text']])
return {'suggested_label':topics['suggested_label'],
'description':topics['description'],
'topic_words':topics['topic_words'] }
@ns.route('/topic_model_list/')
class TopicModelListClassifier(Resource):
@ns.doc('predict topic from list of texts')
@ns.expect(topic_model_list_input, validate=True)
@ns.marshal_with(topic_model_list_output)
def post(self):
topics = topic_model_classifier.predict(api.payload['texts'])
return {'suggested_label': topics['suggested_label'],
'description': topics['description'],
'topic_words': topics['topic_words']}
@app.route("/health/")
#@app.doc('get information about the health of this API')
def health():
return api_functions.health()
@app.route("/documentation/")
#@app.doc('get Swagger documentation about this API')
def documentation():
return api_functions.documentation()
| 35.628866
| 109
| 0.739005
| 427
| 3,456
| 5.796253
| 0.259953
| 0.052525
| 0.064646
| 0.077576
| 0.416162
| 0.416162
| 0.360808
| 0.342626
| 0.310303
| 0.310303
| 0
| 0.000673
| 0.139757
| 3,456
| 96
| 110
| 36
| 0.83182
| 0.047743
| 0
| 0.246575
| 0
| 0
| 0.242996
| 0.025883
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054795
| false
| 0
| 0.164384
| 0.027397
| 0.30137
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bc9a28e7931530bacfb9f635e9e8859c38140a3
| 1,460
|
py
|
Python
|
scripts/inspect_docker.py
|
lijing1996/DockerMonitor
|
b1105e120d9079a0d24a90ef401221dfceeed7b6
|
[
"Apache-2.0"
] | 1
|
2021-04-12T09:35:08.000Z
|
2021-04-12T09:35:08.000Z
|
scripts/inspect_docker.py
|
lijing1996/DockerMonitor
|
b1105e120d9079a0d24a90ef401221dfceeed7b6
|
[
"Apache-2.0"
] | null | null | null |
scripts/inspect_docker.py
|
lijing1996/DockerMonitor
|
b1105e120d9079a0d24a90ef401221dfceeed7b6
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import sys
import subprocess
import psutil
def insepect_process(pid):
"""Determine
1. is the process running in the container
2. if it's true, ourput the container id and the user
:return:
"""
assert psutil.pid_exists(pid), "The process doesn't exist"
try:
result = subprocess.check_output(f'cat /proc/{pid}/cgroup', shell=True)
# print(result)
except subprocess.CalledProcessError as e:
return_code = e.returncode
print(f"Inspect Wrong Error Code{return_code}")
sys.exit(1)
line = result.decode('utf-8').split('\n')[0].strip()
is_in_container = 'docker' in line
container_id = ''
user_name = ''
if is_in_container:
container_id = line.split('/')[-1][:12] #Only save first 12 char of container id
container_info = subprocess.check_output(f'docker ps -a|grep {container_id}', shell=True).decode('utf-8')
user_name = container_info.strip().split()[-1]
return is_in_container, container_id, user_name
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Inspector for docker")
parser.add_argument("-p", type=int, help="the pid")
args = parser.parse_args()
is_in_container, container_id, user_name = insepect_process(args.p)
print(f"Is the process running in the container :{is_in_container}")
print(f"The container id {container_id}")
print(f"The user name {user_name}")
| 33.181818
| 113
| 0.678767
| 207
| 1,460
| 4.594203
| 0.410628
| 0.104101
| 0.068349
| 0.059937
| 0.18612
| 0.136698
| 0.136698
| 0
| 0
| 0
| 0
| 0.010274
| 0.2
| 1,460
| 44
| 114
| 33.181818
| 0.803938
| 0.116438
| 0
| 0
| 0
| 0
| 0.22573
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 1
| 0.034483
| false
| 0
| 0.137931
| 0
| 0.206897
| 0.137931
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bcbbf9c4a02cc75f67572b9d3e876126fc65c10
| 313
|
py
|
Python
|
bin/bucrm.py
|
aelzenaar/bucephalus
|
49cc084a5444ffbde2f850fc1f7b230d3bb8dfbc
|
[
"MIT"
] | null | null | null |
bin/bucrm.py
|
aelzenaar/bucephalus
|
49cc084a5444ffbde2f850fc1f7b230d3bb8dfbc
|
[
"MIT"
] | 12
|
2018-11-09T03:00:28.000Z
|
2019-01-02T05:39:55.000Z
|
bin/bucrm.py
|
aelzenaar/bucephalus
|
49cc084a5444ffbde2f850fc1f7b230d3bb8dfbc
|
[
"MIT"
] | null | null | null |
import sys
import dbops
from pathlib import Path
if len(sys.argv) < 2:
print("Bucephalus Remove File Script")
print("Usage: " + sys.argv[0] + " <identifier>")
sys.exit()
sys.argv.pop(0)
ident = sys.argv.pop(0)
if dbops.remove_record_by_id(ident) == None:
print("*** Error: failed to remove record.")
| 18.411765
| 50
| 0.677316
| 49
| 313
| 4.265306
| 0.571429
| 0.133971
| 0.095694
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015267
| 0.162939
| 313
| 16
| 51
| 19.5625
| 0.782443
| 0
| 0
| 0
| 0
| 0
| 0.269231
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.272727
| 0
| 0.272727
| 0.272727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bcc388c3974bdfcd63888beb8ed71bb0fa61380
| 5,133
|
py
|
Python
|
GUI/GUI_windows/TranslationLanguageWindow.py
|
Chenger1/stellaris-trpack
|
5d85bbbc7374975b5da729899b5691ea77c16ea2
|
[
"MIT"
] | 3
|
2020-07-23T00:32:06.000Z
|
2020-10-09T18:05:56.000Z
|
GUI/GUI_windows/TranslationLanguageWindow.py
|
Chenger1/stellaris-trpack
|
5d85bbbc7374975b5da729899b5691ea77c16ea2
|
[
"MIT"
] | 105
|
2020-07-16T12:23:57.000Z
|
2021-01-18T18:11:40.000Z
|
GUI/GUI_windows/TranslationLanguageWindow.py
|
Letiso/Stellaris-True-Machine-Translation-Tool
|
b80431c1c9b49c2482cb9aefa02eb0de62d7cc56
|
[
"MIT"
] | 1
|
2020-07-15T13:30:57.000Z
|
2020-07-15T13:30:57.000Z
|
"""
↓ Инициализация данных ↓
"""
from PyQt5 import QtWidgets, QtCore
from GUI.GUI_windows_source import TranslationLanguage
from json import load, dump
from functools import partial
import copy
from scripts.stylesheets import choosen_lang_style, not_chosen_lang_style
class TranslationLanguageWindow(QtWidgets.QDialog, TranslationLanguage.Ui_Dialog):
def __init__(self, parent):
super().__init__(parent)
self.setupUi(self)
self.setWindowFlags(QtCore.Qt.Window | QtCore.Qt.FramelessWindowHint)
self.setModal(True)
self.parent = parent
self.oldPos = self.pos()
self.buttons_data = {
'RussianButton': 'ru', 'UkrainianButton': 'uk', 'PolishButton': 'pl',
'ChineseButton': 'zh-cn', 'ArabicButton': 'ar', 'BelarusianButton': 'be',
'BulgarianButton': 'bg', 'CroatianButton': 'hr', 'CzechButton': 'cs',
'DanishButton': 'da', 'DutchButton': 'nl', 'EstonianButton': 'et',
'FinnishButton': 'fi', 'FrenchButton': 'fr', 'GermanButton': 'de',
'GreekButton': 'el', 'HungarianButton': 'hu', 'ItalianButton': 'it',
'JapaneseButton': 'ja', 'KoreanButton': 'ko', 'LithuanianButton': 'lt',
'NorwegianButton': 'no', 'PortugueseButton': 'pt', 'SlovakButton': 'sk',
'SpanishButton': 'es', 'SwedishButton': 'sv', 'TurkishButton': 'tr'
}
self.string = self.LanguagesList.text().split()
self.buttons = self.prep_buttons()
self.init_handlers()
self.gridLayout.setColumnMinimumWidth(1, 50)
self.generator = copy.copy(self.buttons)
self.row_index = 0
self.column_index = -1
self.paint_elements()
def init_handlers(self):
self.WindowMoveButton.installEventFilter(self)
self.ExitButton.clicked.connect(self.close)
self.SearchLine.textChanged.connect(self.search_init)
self.ReferenceButton.clicked.connect(lambda: self.parent.reference_window('QLabel_5_TargetLanguage'))
def prep_buttons(self):
buttons = {}
index = 0
for button, lang in self.buttons_data.items():
buttons[button] = QtWidgets.QPushButton(self.string[index])
buttons[button].setObjectName(button)
buttons[button].clicked.connect(partial(self.set_target_language, target_language=lang))
index += 1
return buttons
def search_init(self, text):
self.clean()
self.search(text)
self.choose_lang()
def eventFilter(self, source, event):
"""
Данная функция предназначена для отслеживания позиции окна
и его перемещения кликом по шапке
"""
if source == self.WindowMoveButton:
if event.type() == QtCore.QEvent.MouseButtonPress:
self.oldPos = event.pos()
elif event.type() == QtCore.QEvent.MouseMove and self.oldPos is not None:
self.move(self.pos() - self.oldPos + event.pos())
return True
elif event.type() == QtCore.QEvent.MouseButtonRelease:
self.oldPos = None
return super().eventFilter(source, event)
"""
↓ Рендер ↓
"""
def clean(self):
for i in reversed(range(self.gridLayout.count())):
self.gridLayout.itemAt(i).widget().setParent(None)
def search(self, text):
with open('Properties.json', 'r', encoding='utf-8') as prop:
properties = load(prop)
self.column_index = -1
self.generator = copy.copy(self.buttons)
for object_name, button in self.buttons.items():
if text not in button.text().lower():
if properties["target_language"] not in self.buttons_data[object_name]:
del self.generator[object_name]
self.paint_elements()
def paint_elements(self):
for object_name, button in self.generator.items():
if self.column_index < 2:
self.column_index += 1
else:
self.column_index = 0
self.row_index += 1
self.gridLayout.addWidget(button, self.row_index, self.column_index)
self.choose_lang()
"""
↓ Выбор языка, на который будут переводиться файлы ↓
"""
def choose_lang(self):
with open("Properties.json", 'r', encoding='utf-8') as prop:
properties = load(prop)
for object_name, button in self.buttons.items():
if self.buttons_data[object_name] == properties["target_language"]:
choosen_lang_style(button)
else:
not_chosen_lang_style(button)
def set_target_language(self, target_language=None):
with open("Properties.json", 'r', encoding='utf-8') as prop:
properties = load(prop)
properties["target_language"] = target_language
with open("Properties.json", 'w', encoding='utf-8') as prop:
dump(properties, prop)
self.choose_lang()
| 37.742647
| 109
| 0.601403
| 543
| 5,133
| 5.574586
| 0.373849
| 0.03634
| 0.029732
| 0.029072
| 0.164519
| 0.113644
| 0.084242
| 0.084242
| 0.084242
| 0.058474
| 0
| 0.004861
| 0.27859
| 5,133
| 135
| 110
| 38.022222
| 0.810964
| 0.022794
| 0
| 0.183673
| 0
| 0
| 0.118743
| 0.004817
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102041
| false
| 0
| 0.061224
| 0
| 0.204082
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bcdc5c2dfab2675a93de75f43fee73049b1f7fb
| 1,347
|
py
|
Python
|
demosauruswebapp/demosaurus/subject_headings.py
|
KBNLresearch/Demosaurus
|
9235e315d9eef9d8d64f94a90ab4fc8220670ef2
|
[
"Apache-2.0"
] | 1
|
2020-06-25T16:39:35.000Z
|
2020-06-25T16:39:35.000Z
|
demosauruswebapp/demosaurus/subject_headings.py
|
KBNLresearch/Demosaurus
|
9235e315d9eef9d8d64f94a90ab4fc8220670ef2
|
[
"Apache-2.0"
] | 6
|
2020-03-06T12:31:38.000Z
|
2021-09-20T15:08:17.000Z
|
demosauruswebapp/demosaurus/subject_headings.py
|
KBNLresearch/Demosaurus
|
9235e315d9eef9d8d64f94a90ab4fc8220670ef2
|
[
"Apache-2.0"
] | null | null | null |
from flask import (
Blueprint, request)#, flash, g, redirect, render_template, get_template_attribute, url_for, jsonify
# )
# from werkzeug.exceptions import abort
import requests
# from demosaurus.db import get_db
# import pandas as pd
# from nltk.metrics import distance
# import re
# import numpy as np
bp = Blueprint('subject_headings', __name__)
annif_url = 'https://kbresearch.nl/annif/v1/'
@bp.route('/annif-projects/')
def annif_projects():
response = requests.get(annif_url+'projects')
if response.status_code == 200:
return response.json()
else:
print('Unable to obtain Annif projects from', response.url)
@bp.route('/annif-suggestions/')
def annif_suggestions():
params = dict(request.args) # turn into a mutable dictionary
project = params.pop('project')
project_options = [proj['project_id'] for proj in annif_projects()['projects']]
print(project_options)
if project not in project_options:
print("Annif was called with non-existing project parameter:", project)
url = annif_url + "projects/" + project + "/suggest"
response = requests.post(url, data = params)
if response.status_code == 200:
return response.json()
else:
print('Unable to obtain Annif suggestions from', response.url)
print(response.status_code)
| 32.071429
| 104
| 0.697105
| 171
| 1,347
| 5.356725
| 0.461988
| 0.056769
| 0.058952
| 0.043668
| 0.150655
| 0.150655
| 0.150655
| 0.150655
| 0.150655
| 0.150655
| 0
| 0.006428
| 0.191537
| 1,347
| 42
| 105
| 32.071429
| 0.834711
| 0.197476
| 0
| 0.222222
| 0
| 0
| 0.242311
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.074074
| 0
| 0.222222
| 0.259259
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bd7fb5f5d36389c2c5a61d083613ef4ed377538
| 15,928
|
py
|
Python
|
moleculegen/estimation/model.py
|
sanjaradylov/moleculegen-ml
|
4acb77244909cf8cfe4fb75461d4bed9b77f29f1
|
[
"BSD-3-Clause"
] | 3
|
2021-11-18T11:41:21.000Z
|
2022-02-08T22:01:20.000Z
|
moleculegen/estimation/model.py
|
sanjaradylov/moleculegen-ml
|
4acb77244909cf8cfe4fb75461d4bed9b77f29f1
|
[
"BSD-3-Clause"
] | 20
|
2019-12-12T11:47:32.000Z
|
2021-06-02T07:55:18.000Z
|
moleculegen/estimation/model.py
|
sanjaradylov/moleculegen-ml
|
4acb77244909cf8cfe4fb75461d4bed9b77f29f1
|
[
"BSD-3-Clause"
] | 2
|
2019-12-23T08:17:01.000Z
|
2022-02-08T22:01:21.000Z
|
"""
Generative language models.
Classes
-------
SMILESEncoderDecoder
A generative recurrent neural network to encode-decode SMILES strings.
SMILESEncoderDecoderFineTuner
The fine-tuner of SMILESEncoderDecoder model.
"""
__all__ = (
'SMILESEncoderDecoder',
'SMILESEncoderDecoderFineTuner',
)
import json
import warnings
from typing import Optional, Union
import mxnet as mx
from mxnet import gluon
from . import _gluon_common
from .base import SMILESEncoderDecoderABC
from ..description.common import OneHotEncoder
class SMILESEncoderDecoder(SMILESEncoderDecoderABC):
"""A generative recurrent neural network to encode-decode SMILES strings.
Parameters
----------
vocab_size : int
The vocabulary dimension, which will indicate the number of output
neurons of a decoder.
initialize : bool, default True
Whether to initialize model parameters.
When one decides to load parameters from a file, deferred
initialization is needless.
use_one_hot : bool, default False
Whether to use one-hot-encoding or an embedding layer.
embedding_dim : int, default 4
The output dimension of an embedding layer.
embedding_init : str or mxnet.init.Initializer,
default mxnet.init.Orthogonal()
The parameter initializer of an embedding layer.
embedding_prefix : str, default 'embedding_'
The prefix of an embedding block.
rnn : {'vanilla', 'lstm', 'gru'}, default 'lstm'
A recurrent layer.
n_rnn_layers : int, default 1
The number of layers of a (deep) recurrent layer.
n_rnn_units : int, default 64
The number of neurons in an RNN.
rnn_dropout : float, default 0.0
The dropout rate of a recurrent layer.
rnn_init : str or mxnet.init.Initializer,
default mxnet.init.Orthogonal()
The parameter initializer of a recurrent layer.
rnn_prefix : str, default 'encoder_'
The prefix of an encoder block.
n_dense_layers : int, default 1
The number of dense layers.
n_dense_units : int, default 128
The number of neurons in each dense layer.
dense_activation : str, default 'relu'
The activation function in a dense layer.
dense_dropout : float, default 0.0
The dropout rate of a dense layer.
dense_init : str or mxnet.init.Initializer,
default mxnet.init.Xavier()
The parameter initializer of a dense layer.
dense_prefix : str, default 'decoder_'
The prefix of a decoder block.
tie_weights : bool, default False
Whether to share the embedding block parameters w/ a decoder block.
dtype : str, default 'float32'
Data type.
ctx : mxnet.context.Context, default mxnet.context.cpu()
CPU or GPU.
prefix : str, default None
params : mxnet.gluon.ParameterDict, default None
Attributes
----------
ctx : mxnet.context.Context
The model's context.
embedding : OneHotEncoder or mxnet.gluon.nn.Embedding
An embedding layer.
encoder : mxnet.gluon.rnn.RNN or mxnet.gluon.rnn.LSTM
or mxnet.gluon.rnn.GRU
An RNN encoder.
decoder : mxnet.gluon.nn.Dense or mxnet.gluon.nn.Sequential
A Feed-Forward NN decoder.
"""
def __init__(
self,
vocab_size: int,
initialize: bool = True,
use_one_hot: bool = False,
embedding_dim: int = 4,
embedding_dropout: float = 0.,
embedding_init: Optional[
Union[str, mx.init.Initializer]] = mx.init.Uniform(),
embedding_prefix: str = 'embedding_',
rnn: str = 'lstm',
n_rnn_layers: int = 1,
n_rnn_units: int = 64,
rnn_dropout: float = 0.,
rnn_init: Optional[Union[str, mx.init.Initializer]] = mx.init.Orthogonal(),
rnn_prefix: str = 'encoder_',
n_dense_layers: int = 1,
n_dense_units: int = 128,
dense_activation: str = 'relu',
dense_dropout: float = 0.,
dense_init: Optional[Union[str, mx.init.Initializer]] = mx.init.Xavier(),
dense_prefix: str = 'decoder_',
tie_weights: bool = False,
dtype: Optional[str] = 'float32',
*,
ctx: mx.context.Context = mx.context.cpu(),
prefix: Optional[str] = None,
params: Optional[gluon.ParameterDict] = None,
):
warnings.warn(
message=(
f'{self.__class__.__name__} is deprecated; '
f'wil be removed in 1.1.0.'
f'consider `moleculegen.estimation.SMILESRNN` instead.'
),
category=DeprecationWarning,
)
# Validate the formal parameters that are not explicitly sent into and
# validated in mxnet.gluon objects.
if not isinstance(use_one_hot, bool):
raise TypeError(
'`use_one_hot` must be either True for OneHotEncoder layer '
'or False for Embedding layer.'
)
if not isinstance(initialize, bool):
raise TypeError(
'`initialize` must be either True for deferred '
'initialization or False for no initialization.'
)
if rnn not in _gluon_common.RNN_MAP:
raise ValueError(
f'The recurrent layer must be one of '
f'{list(_gluon_common.RNN_MAP.keys())}.'
)
if n_dense_layers < 1:
raise ValueError(
'The number of dense layers must be positive non-zero.'
)
if (
tie_weights
and (
embedding_dim != n_rnn_units
or
n_dense_layers > 1
and embedding_dim != n_dense_units
)
):
raise ValueError(
f'When sharing weights, the number of hidden units must be equal to '
f'the embedding dimension.'
)
# Initialize mxnet.gluon.Block parameters.
super().__init__(ctx=ctx, prefix=prefix, params=params)
with self.name_scope():
# Define (and initialize) an embedding layer.
if use_one_hot:
self._embedding = OneHotEncoder(vocab_size)
else:
embedding_block = gluon.nn.Embedding(
input_dim=vocab_size,
output_dim=embedding_dim,
dtype=dtype,
prefix=embedding_prefix,
)
if embedding_dropout > 1e-3:
seq_prefix = f'{embedding_prefix.rstrip("_")}seq_'
self._embedding = gluon.nn.HybridSequential(prefix=seq_prefix)
self._embedding.add(embedding_block)
self._embedding.add(gluon.nn.Dropout(embedding_dropout))
shared_params = self._embedding[0].params if tie_weights else None
else:
self._embedding = embedding_block
shared_params = self._embedding.params if tie_weights else None
if initialize:
self._embedding.initialize(init=embedding_init, ctx=ctx)
# Select and initialize a recurrent block.
self._encoder = _gluon_common.RNN_MAP[rnn](
hidden_size=n_rnn_units,
num_layers=n_rnn_layers,
dropout=rnn_dropout,
dtype=dtype,
prefix=rnn_prefix,
)
if initialize:
self._encoder.initialize(init=rnn_init, ctx=ctx)
# Define and initialize a dense layer(s).
self._decoder = _gluon_common.mlp(
n_layers=n_dense_layers,
n_units=n_dense_units,
activation=dense_activation,
output_dim=vocab_size,
dtype=dtype,
dropout=dense_dropout,
prefix=dense_prefix,
params=shared_params,
)
if initialize:
self._decoder.initialize(init=dense_init, ctx=ctx)
@property
def embedding(self) -> Union[OneHotEncoder, gluon.nn.Embedding]:
"""Return the embedding layer.
"""
return self._embedding
@property
def encoder(self) -> Union[gluon.rnn.RNN, gluon.rnn.LSTM, gluon.rnn.GRU]:
"""Return the RNN encoder.
"""
return self._encoder
@property
def decoder(self) -> Union[gluon.nn.Dense, gluon.nn.Sequential]:
"""Return the Feed-Forward NN decoder.
"""
return self._decoder
@classmethod
def from_config(cls, config_file: str) -> 'SMILESEncoderDecoder':
"""Instantiate a model loading formal parameters from a JSON file `config_file`.
config_file : str
A JSON file to load formal parameters from.
model : SMILESEncoderDecoder
"""
with open(config_file) as fh:
raw_data = json.load(fh)
return cls(
vocab_size=raw_data['vocab_size'],
initialize=raw_data['initialize'],
tie_weights=raw_data['tie_weights'],
dtype=raw_data['dtype'],
ctx=_gluon_common.get_ctx(raw_data['ctx'].lower()),
prefix=raw_data['prefix'],
use_one_hot=raw_data['embedding']['use_one_hot'],
embedding_dim=raw_data['embedding']['dim'],
embedding_dropout=raw_data['embedding']['dropout'],
embedding_init=_gluon_common.INIT_MAP[raw_data['embedding']['init'].lower()],
embedding_prefix=raw_data['embedding']['prefix'],
rnn=raw_data['encoder']['rnn'],
n_rnn_layers=raw_data['encoder']['n_layers'],
n_rnn_units=raw_data['encoder']['n_units'],
rnn_dropout=raw_data['encoder']['dropout'],
rnn_init=_gluon_common.INIT_MAP[raw_data['encoder']['init'].lower()],
rnn_prefix=raw_data['encoder']['prefix'],
n_dense_layers=raw_data['decoder']['n_layers'],
n_dense_units=raw_data['decoder']['n_units'],
dense_activation=raw_data['decoder']['activation'],
dense_dropout=raw_data['decoder']['dropout'],
dense_init=_gluon_common.INIT_MAP[raw_data['decoder']['init'].lower()],
dense_prefix=raw_data['decoder']['prefix'],
)
@classmethod
def load_fine_tuner(
cls,
path: str,
update_features: bool = True,
decoder_init: Optional[Union[str, mx.init.Initializer]] = mx.init.Xavier(),
) -> 'SMILESEncoderDecoder':
"""Create a new fine-tuner model: load model configuration and parameters, and
initialize decoder weights.
Parameters
----------
path : str
The path to the directory of model configuration and parameters.
path/config.json - the formal parameters of a model;
path/weights.params - the parameters of a model.
update_features : bool, default True
Whether to update embedding and encoder parameters during training.
decoder_init : str or mxnet.init.Initializer, default None
A decoder initializer.
Returns
-------
model : SMILESEncoderDecoder
"""
model = cls.from_config(f'{path}/config.json')
model.load_parameters(f'{path}/weights.params', ctx=model.ctx)
if not update_features:
model.embedding.collect_params().setattr('grad_req', 'null')
model.encoder.collect_params().setattr('grad_req', 'null')
model.decoder.initialize(init=decoder_init, force_reinit=True, ctx=model.ctx)
return model
class SMILESEncoderDecoderFineTuner(SMILESEncoderDecoderABC):
"""The fine-tuner of SMILESEncoderDecoder model. Loads embedding and encoder blocks,
and trains a new decoder block.
Parameters
----------
model : SMILESEncoderDecoder
An encoder-decoder model to fine-tune.
output_dim : int
The number of output neurons.
initialize : bool, default True
Whether to initialize decoder's parameters.
update_features : bool, default True
Whether to update embedding and encoder parameters during training.
n_dense_layers : int, default 1
The number of dense layers.
n_dense_units : int, default 128
The number of neurons in each dense layer.
dense_activation : str, default 'relu'
The activation function in a dense layer.
dense_dropout : float, default 0.0
The dropout rate of a dense layer.
dense_init : str or mxnet.init.Initializer,
default mxnet.init.Xavier()
The parameter initializer of a dense layer.
dense_prefix : str, default 'decoder_'
The prefix of a decoder block.
dtype : str, default 'float32'
Data type.
ctx : mxnet.context.Context, default mxnet.context.cpu()
CPU or GPU.
prefix : str, default None
params : mxnet.gluon.ParameterDict, default None
Attributes
----------
ctx : mxnet.context.Context
The model's context.
embedding : OneHotEncoder or mxnet.gluon.nn.Embedding
An embedding layer.
encoder : mxnet.gluon.rnn.RNN or mxnet.gluon.rnn.LSTM
or mxnet.gluon.rnn.GRU
An RNN encoder.
decoder : mxnet.gluon.nn.Dense or mxnet.gluon.nn.Sequential
A Feed-Forward NN decoder.
"""
def __init__(
self,
model: SMILESEncoderDecoder,
output_dim: int,
initialize: bool = True,
update_features: bool = True,
n_dense_layers: int = 1,
n_dense_units: int = 128,
dense_activation: str = 'relu',
dense_dropout: float = 0.,
dense_init: Optional[Union[str, mx.init.Initializer]] = mx.init.Xavier(),
dense_prefix: str = 'fine_tuner_decoder_',
dtype: Optional[str] = 'float32',
*,
ctx: mx.context.Context = mx.context.cpu(),
prefix: Optional[str] = None,
params: Optional[gluon.ParameterDict] = None,
):
warnings.warn(
message=(
f'{self.__class__.__name__} is deprecated; '
f'wil be removed in 1.1.0.'
f'consider `moleculegen.estimation.SMILESRNN.load_fine_tuner` instead.'
),
category=DeprecationWarning,
)
super().__init__(ctx=ctx, prefix=prefix, params=params)
model.ctx = self.ctx
self._embedding = model.embedding
self._encoder = model.encoder
if not update_features:
self._embedding.collect_params().setattr('grad_req', 'null')
self._encoder.collect_params().setattr('grad_req', 'null')
self._decoder = _gluon_common.mlp(
n_layers=n_dense_layers,
n_units=n_dense_units,
activation=dense_activation,
output_dim=output_dim,
dtype=dtype,
dropout=dense_dropout,
prefix=dense_prefix,
params=None,
)
if initialize:
self._decoder.initialize(init=dense_init, ctx=self.ctx)
@property
def embedding(self) -> Union[OneHotEncoder, gluon.nn.Embedding]:
"""Return the embedding layer.
"""
return self._embedding
@property
def encoder(self) -> Union[gluon.rnn.RNN, gluon.rnn.LSTM, gluon.rnn.GRU]:
"""Return the RNN encoder.
"""
return self._encoder
@property
def decoder(self) -> Union[gluon.nn.Dense, gluon.nn.Sequential]:
"""Return the Feed-Forward NN decoder.
"""
return self._decoder
| 36.28246
| 89
| 0.597878
| 1,793
| 15,928
| 5.134412
| 0.123815
| 0.018249
| 0.011949
| 0.010428
| 0.516837
| 0.488051
| 0.47393
| 0.429503
| 0.42103
| 0.389746
| 0
| 0.004836
| 0.311966
| 15,928
| 438
| 90
| 36.365297
| 0.835204
| 0.338398
| 0
| 0.405172
| 0
| 0
| 0.121981
| 0.025771
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043103
| false
| 0
| 0.034483
| 0
| 0.12069
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4be3c4c8872c7fe3765bcf529106a1cedf839f7c
| 7,008
|
py
|
Python
|
util/post_db.py
|
ReadMoa/web-service
|
f47c6cce471d97104074d403ab9ec39a08276213
|
[
"MIT"
] | null | null | null |
util/post_db.py
|
ReadMoa/web-service
|
f47c6cce471d97104074d403ab9ec39a08276213
|
[
"MIT"
] | 21
|
2020-08-19T05:05:45.000Z
|
2021-02-07T23:21:17.000Z
|
util/post_db.py
|
ReadMoa/web-service
|
f47c6cce471d97104074d403ab9ec39a08276213
|
[
"MIT"
] | 1
|
2020-09-05T03:40:45.000Z
|
2020-09-05T03:40:45.000Z
|
"""PostDB class definition.
PostDB encapsualte interactions (lookup, scan, insert) with the posts table.
Typical usage example:
from post import Post
from post_db import PostDB
post_db = PostDB(mode = "dev")
post = Post(
post_url = "https://www.example.com/",
title = "Test",
main_image_url = "https://www.example.com/foo.png",
description = "Bar")
post_db.insert(post)
"""
import logging
import sqlalchemy
from util.database import Database
from util.post import Post
# Max post index to return in scan().
MAX_POSTS_TO_START = 1000
logger = logging.getLogger()
class PostDB:
"""PostDB class to interact with the posts table.
PostDB provides lookup, scan, insert operations for posts.
Attributes:
...
"""
def __init__(self, mode="dev"):
self.db_instance = Database.get_instance().connection
self.mode = mode
def lookup(self, key):
"""Looks up a post from posts table with the input key.
Args:
key: A hash of a post URL.
Returns:
A Post instance with retrieved data from posts table or None.
"""
post = None
with self.db_instance.connect() as conn:
# Execute the query and fetch all results
returned_posts = conn.execute("""
SELECT post_url_hash, post_url, title, post_author, post_author_hash,
post_published_date, submission_time,
main_image_url, description, user_display_name,
user_email, user_photo_url, user_id, user_provider_id
FROM {mode}_posts_serving
where post_url_hash = '{key}'
""".format(mode=self.mode, key=key)
).fetchall()
if len(returned_posts) > 0:
row = returned_posts[0]
post = Post(
post_url=row[1], title=row[2], author=row[3],
author_hash=row[4], published_date=row[5],
submission_time=row[6], main_image_url=row[7],
description=row[8], user_display_name=row[9],
user_email=row[10], user_photo_url=row[11],
user_id=row[12], user_provider_id=row[13])
return post
def scan(self, author_key="", start_idx=0, count=10):
"""Scans posts table and resturns a list of Post instances.
Posts of [start_idx, start_idx + count) records will be returned.
Args:
author_key: return posts written by the 'author' if not empty.
start_idx: The start index of the scan.
count: # of posts to return
Returns:
A list of posts.
"""
# pylint: disable=fixme
# TODO: Can we change 'start' as an absolute position e.g. timestamp
# to make the result consistent even when there is a new item
# to posts_serving db.
posts = []
if start_idx < 0 or start_idx > MAX_POSTS_TO_START:
logger.warning("start_idx is out of range: %d", start_idx)
return posts # Empty list
if count < 0 or count > MAX_POSTS_TO_START:
logger.warning("count is out of range: %d", count)
return posts # Empty list
with self.db_instance.connect() as conn:
where_str = ""
if author_key:
where_str = "where post_author_hash = '" + author_key + "'"
sql_str = """
SELECT post_url_hash, post_url, title, post_author,
post_author_hash, post_published_date, submission_time,
main_image_url, description, user_display_name, user_email,
user_photo_url, user_id, user_provider_id
FROM {mode}_posts_serving
{where_clause}
ORDER BY submission_time DESC LIMIT {limit:d}
""".format(
mode=self.mode, where_clause=where_str,
limit=start_idx + count)
# Execute the query and fetch all results
recent_posts = conn.execute(sql_str).fetchall()
if len(recent_posts) > start_idx:
for row in recent_posts[start_idx:]:
posts.append(
Post(
post_url=row[1], title=row[2], author=row[3],
author_hash=row[4], published_date=row[5],
submission_time=row[6], main_image_url=row[7],
description=row[8], user_display_name=row[9],
user_email=row[10], user_photo_url=row[11],
user_id=row[12], user_provider_id=row[13]
)
)
return posts
def insert(self, post):
"""Insert a post record into posts table.
Args:
post: A Post instance.
"""
if not post.is_valid():
logger.error("Invalid post.")
return
stmt = sqlalchemy.text("""
INSERT INTO {mode}_posts_serving
(post_url_hash, post_url, post_author, post_author_hash,
post_published_date, submission_time, title, main_image_url,
description, user_id, user_display_name, user_email,
user_photo_url, user_provider_id)
VALUES
(:url_hash, :url, :author, :author_hash, :published_date,
:submission_time, :title, :main_image_url, :description,
:user_id, :user_display_name, :user_email, :user_photo_url,
:user_provider_id)
""".format(mode=self.mode)
)
logger.info(stmt)
try:
with self.db_instance.connect() as conn:
conn.execute(
stmt, url_hash=post.post_url_hash, url=post.post_url,
author=post.author, author_hash=post.author_hash,
published_date=post.published_date,
submission_time=post.submission_time,
title=post.title, main_image_url=post.main_image_url,
description=post.description, user_id=post.user_id,
user_display_name=post.user_display_name,
user_email=post.user_email,
user_photo_url=post.user_photo_url,
user_provider_id=post.user_provider_id)
except self.db_instance.Error as ex:
logger.exception(ex)
return
def delete(self, key):
"""Deletes a post from posts table with the input key.
Args:
key: A hash of a post URL.
"""
with self.db_instance.connect() as conn:
conn.execute("""
DELETE FROM {mode}_posts_serving
where post_url_hash = '{key}'
""".format(mode=self.mode, key=key)
)
| 36.884211
| 85
| 0.558219
| 841
| 7,008
| 4.413793
| 0.209275
| 0.028287
| 0.029095
| 0.036369
| 0.462284
| 0.412985
| 0.390894
| 0.356412
| 0.356412
| 0.333782
| 0
| 0.00996
| 0.355308
| 7,008
| 189
| 86
| 37.079365
| 0.811642
| 0.207192
| 0
| 0.227273
| 0
| 0
| 0.30577
| 0
| 0
| 0
| 0
| 0.005291
| 0
| 1
| 0.045455
| false
| 0
| 0.036364
| 0
| 0.145455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4be4aa437d26726d4e8976afdb8dcefd45f45a42
| 9,491
|
py
|
Python
|
plugins/leading_bot_mention.py
|
YukiSinonome/guided_bot
|
3aff47c4192e9dae4ad4d95c1553a4752ce043cc
|
[
"MIT"
] | null | null | null |
plugins/leading_bot_mention.py
|
YukiSinonome/guided_bot
|
3aff47c4192e9dae4ad4d95c1553a4752ce043cc
|
[
"MIT"
] | null | null | null |
plugins/leading_bot_mention.py
|
YukiSinonome/guided_bot
|
3aff47c4192e9dae4ad4d95c1553a4752ce043cc
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from slackbot.bot import respond_to
from slacker import Slacker
import slackbot_settings
# @respond_to("疲れた")
# @respond_to("つかれた")
# def cheer(message):
# message.reply("ファイト!")
import MeCab
import random
import ChatBotScript
import SentenceGenerator
import datetime
import webbrowser
import time
import sys
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import json
import requests
from requests.exceptions import Timeout
import os
def count(f_count):
f_count += 1
# count_talk = 0
def weather(message, something, number):
try: citycode = sys.argv[1]
except: citycode = '130010' #東京
resp = urllib2.urlopen('http://weather.livedoor.com/forecast/webservice/json/v1?city=%s'%citycode).read().decode('utf-8')
# 読み込んだJSONデータをディクショナリ型に変換
resp = json.loads(resp)
# 明日の天気
if number == 1:
message.reply("私の住んでいるところ" + resp['title'][7:] + "は" + resp['forecasts'][1]['telop'] + "になると思います。")
# 今日の天気
else:
message.reply("私の住んでいるところ" + resp['title'][7:] + "は" + resp['forecasts'][0]['telop'] + "です。")
#現在時刻
def time_now(message, something):
todaydetail = datetime.datetime.today()
message.reply("現在時刻は" + str(todaydetail.hour) + ":" + str(todaydetail.minute) + "です。")
#挨拶
# def greeting():
# todaydetail = datetime.datetime.today()
# if 4 <= todaydetail.hour <= 10:
# message.reply(ChatBotScript.greeting[0] + symbol[random.randrange(2)])
# elif 11 <= todaydetail.hour <= 17:
# message.reply(ChatBotScript.greeting[1] + symbol[random.randrange(2)])
# else:
# message.reply(ChatBotScript.greeting[2])
# 天気の会話
def weather_talk():
count_weather = 0
count = 0
# 入力
@respond_to("(.*)")
def sentence(message, something):
global count_talk
sentence = SentenceGenerator.sentence_generator(something)
# \\\\\\\\\\
# message.reply("----------変換後: " + sentence + "--weather--")
# パターンマッチング
if ("天気" in sentence or "晴れ" in sentence or "曇り" in sentence or "雨" in sentence) and ("?" in sentence or "?" in sentence or "何" in sentence) and ("明日" not in sentence):
weather_talk.count_weather = 1
weather(message, something, 0)
elif ("天気" in sentence or "晴れ" in sentence or "曇り" in sentence or "雨" in sentence) and ("?" in sentence or "?" in sentence or "何" in sentence) and ("明日" in sentence):
weather_talk.count_weather = 1
weather(message, something, 1)
elif ("どこに" in sentence and "住んで" in sentence) or ("どこ住み" in sentence):
message.reply("どこかです。")
elif "リセット" in sentence:
count_talk = 0
main_talk()
elif "晴れ" in sentence and "?" not in sentence and "?" not in sentence:
message.reply(random.choice(ChatBotScript.sunny))
elif "曇" in sentence and "?" not in sentence and "?" not in sentence:
message.reply(random.choice(ChatBotScript.cloudy))
elif "雨" in sentence and "?" not in sentence and "?" not in sentence:
message.reply(random.choice(ChatBotScript.rainy))
elif ("風" in sentence and "強い" in sentence) or ("強風" in sentence):
message.reply("吹き飛ばされないように気をつけてくださいね")
elif "台風" in sentence:
message.reply(random.choice(ChatBotScript.typhoon))
elif "元気" in sentence:
message.reply(random.choice(ChatBotScript.physical_condition))
elif "本当" in sentence and ("?" in sentence or "?" in sentence):
message.reply(random.choice(ChatBotScript.response2))
elif "今何時" in sentence:
time_now(message, something)
elif "元気" in sentence or ("本当" in sentence and ("?" in sentence or "?" in sentence)) or "朝食" in sentence or "昼食" in sentence or "晩飯" in sentence or "夜食" in sentence or "食事" in sentence or "ご飯" in sentence or "ランチ" in sentence or "ディナー" in sentence or "かっこいい" in sentence or "かっこ良い" in sentence or "かわいい" in sentence or "高い" in sentence or "安い" in sentence or "難しい" in sentence or "簡単" in sentence or "面白" in sentence or "おもしろ" in sentence or "おいし" in sentence or "美味し" in sentence or (("体重" in sentence or "身長" in sentence or "スリーサイズ" in sentence) and ("?" in sentence or "?" in sentence)):
weather_talk.count = 1
main_talk()
else:
if weather_talk.count_weather == 1:
weather_talk.count_weather += 1
message.reply("今週の天気は安定しそうですか?")
elif weather_talk.count_weather == 3:
if "はい" in sentence or "よろ" in sentence or "お願い" in sentence or "調べて" in sentence:
message.reply("http://weather.yahoo.co.jp/weather/")
weather_talk.count = 1
main_talk()
else:
message.reply("わかりました。何か別の話をしませんか?")
weather_talk.count = 1
talk.count_talk = 2
main_talk()
else:
weather_talk.count_weather = 3
message.reply("天気を調べられるページのリンク載せましょうか?")
def food_talk():
global f_count
# 入力
@respond_to("(.*)")
def sentence(message, something):
global f_count
global count_talk
sentence = SentenceGenerator.sentence_generator(something)
# \\\\\\\\\\
# message.reply("----------変換後: " + sentence + "--food--")
if "ない" in sentence or "いや" in sentence:
message.reply("では、おすすめの食べ物ありますか?")
food_talk()
elif "リセット" in sentence:
count_talk = 0
main_talk()
elif "元気" in sentence or ("本当" in sentence and ("?" in sentence or "?" in sentence)) or "かっこいい" in sentence or "かっこ良い" in sentence or "かわいい" in sentence or "高い" in sentence or "安い" in sentence or "難しい" in sentence or "簡単" in sentence or "面白" in sentence or "おもしろ" in sentence or (("体重" in sentence or "身長" in sentence or "スリーサイズ" in sentence) and ("?" in sentence or "?" in sentence)):
main_talk()
else:
if f_count == 0:
message.reply("では、5つ質問をするので答えてください。答えていただいた条件から当てます。")
message.reply("晩御飯の種類は?(スープ系・どんぶり系・定食系・パン系など)")
f_count = 1
elif f_count == 1:
message.reply("晩御飯の味は?")
f_count = 2
elif f_count == 2:
message.reply("晩御飯の色は?")
f_count = 3
elif f_count == 3:
message.reply("晩御飯は温かいもの?冷たいもの?")
f_count = 4
elif f_count == 4:
message.reply("晩御飯の食感は?")
f_count = 5
elif f_count == 5:
message.reply("予測したメニューを送ります。正解ですか?")
f_count = 0
c_name = "guided_bot_test"
f_path = "food_result.pdf"
slacker = Slacker(slackbot_settings.API_TOKEN)
def upload():
try:
slacker.files.upload(f_path, channels=[c_name], title="晩御飯の予測結果")
except requests.exceptions.Timeout:
print("Timeout occurred")
upload()
upload()
main_talk()
def work_talk():
@respond_to("(.*)")
def sentence(message, something):
global count_talk
sentence = SentenceGenerator.sentence_generator(something)
if "いい" in sentence or "送って" in sentence or "確認" in sentence or "大丈夫" in sentence or "わか" in sentence:
message.reply("ありがとうございます。確認よろしくお願いします。")
c_name = "guided_bot_test"
f_path = "work.pdf"
slacker = Slacker(slackbot_settings.API_TOKEN)
def upload():
try:
slacker.files.upload(f_path, channels=[c_name], title="議事録")
except requests.exceptions.Timeout:
print("Timeout occurred")
upload()
upload()
main_talk()
elif "リセット" in sentence:
count_talk = 0
main_talk()
else:
message.reply("了解しました。別の機会にお願いします。")
main_talk()
def main_talk():
# 話題選択
@respond_to("(.*)")
def talk(message, something):
global count_talk
if count_talk == 0:
message.reply("何のお話をしましょうか?")
count_talk = 2
elif count_talk == 1:
message.reply("何の話ですか?")
else:
pass
@respond_to("(.*)")
def sentence(message, something):
global count_talk
sentence = SentenceGenerator.sentence_generator(something)
# \\\\\\\\\\
# message.reply("----------変換後: " + sentence + "--main--")
if "天気" in sentence:
message.reply("あなたの地域の今日の天気はどうですか?")
weather_talk()
count_talk = 1
elif "食" in sentence or "飯" in sentence:
message.reply("昨日の晩御飯が何か当てましょうか?")
food_talk()
count_talk = 1
elif "仕事" in sentence or "職場" in sentence:
message.reply("急な連絡ですみません。前回の会議の件で少し気になったことがあったので、今晩確認してもらいたいのですがよろしいでしょうか?よろしければ、気になった部分の資料をすぐに送りますので確認してください。")
work_talk()
count_talk = 1
#--------------
#-----メイン-----
#--------------
t_count = 0
f_count = 0
count_talk = 0
# count()
symbol = ["", "!", "?"]
main_talk()
| 38.425101
| 599
| 0.565272
| 1,085
| 9,491
| 4.852535
| 0.213825
| 0.193732
| 0.14359
| 0.0585
| 0.490218
| 0.459829
| 0.453941
| 0.409307
| 0.40057
| 0.376068
| 0
| 0.011006
| 0.310715
| 9,491
| 246
| 600
| 38.581301
| 0.793335
| 0.083658
| 0
| 0.391534
| 0
| 0
| 0.110059
| 0.026677
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0.005291
| 0.095238
| 0
| 0.169312
| 0.010582
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4be5a05c40ee31ef9f187f13c41d25d878a65ca6
| 7,099
|
py
|
Python
|
Pix2Pix/Streamlit_Pix2Pix_Main.py
|
NB094/LHL_Final_Project
|
5df15d7bbf33d51840ea274629591cd938f58fce
|
[
"Apache-2.0"
] | 2
|
2021-10-04T05:53:29.000Z
|
2022-01-21T12:53:43.000Z
|
Pix2Pix/Streamlit_Pix2Pix_Main.py
|
NB094/LHL_Final_Project
|
5df15d7bbf33d51840ea274629591cd938f58fce
|
[
"Apache-2.0"
] | null | null | null |
Pix2Pix/Streamlit_Pix2Pix_Main.py
|
NB094/LHL_Final_Project
|
5df15d7bbf33d51840ea274629591cd938f58fce
|
[
"Apache-2.0"
] | 1
|
2021-10-04T05:53:32.000Z
|
2021-10-04T05:53:32.000Z
|
from PIL import Image
import streamlit as st
from streamlit_drawable_canvas import st_canvas
from Streamlit_Pix2Pix_Generator import Generator
import numpy as np
import urllib.request
from keras.preprocessing.image import load_img
from keras.models import load_model
import requests
# Page intro
st.title('Pix2Pix – See Your Sketches Brought to Life!')
st.text('')
st.markdown('Sketch out an object using the canvas below, and let your computer do the rest of the heavy lifting.')
st.text('')
st.text('')
# Links and FAQ section
st.sidebar.markdown("### [SRGANs Web Page](https://share.streamlit.io/nb094/easy-gans/main/SRGAN/Streamlit_SRGAN_Main.py)")
st.sidebar.markdown("### [NumGen Web Page](https://share.streamlit.io/nb094/easy-gans/main/NumGen/Streamlit_NumGen_Main.py)")
st.sidebar.text('')
expander = st.sidebar.expander("Pix2Pix Frequently-Asked Questions", expanded=True)
expander.write("**What type of machine learning is being used?** \n\n \
The model's architecture is based on solving image-to-image translation with a Conditional Generative Adversarial Network, or cGAN. \n\n   \n\n \
**How do GANs work?** \n\n \
There are two main components to GAN models: a *discriminator* and a *generator*. \n\n \
The purpose of the discriminator is to classify images presented to it as real or fake. \
The purpose of the generator is to create plausible images to fool the discriminator. \n\n \
After many cycles of training, the skill of the generator improves enough to produce some impressive results! \n\n   \n\n \
**What is the difference between a GAN and a cGAN?** \n\n \
The basic idea behind cGANs is the same. The primary difference is way the model improves after each cycle, which is based on \
a *loss* calculation. For cGANs, this calculation optimizes the structure or joint configuration of the output. \n\n   \n\n \
**What are the possible applications of cGANs?** \n\n \
cGANs have been used in self-driving cars, creating maps from satellite images, colorizing black and white photos, and much more. \n\n   \n\n \
**Where can I read more about cGANs?** \n\n \
For more information on cGANs, check out [this paper.](https://arxiv.org/abs/1611.07004) \n\n   \n\n \
**Who developed this web page?** \n\n \
This web page and the underlying models were developed by Niklas Bergen with the help of some additional resources. \
Check out the [GitHub repo](https://github.com/NB094/Easy-GANs) for more information.")
##### CODE FOR Pix2Pix #####
# Define page layout
left_column, right_column = st.columns([2,1])
# Create selection box and logic for various sketch subjects.
subject_selection = left_column.selectbox(label = 'Select what you wish to draw...', options = ['Human', 'Shoe', 'Handbag'], index = 0)
if subject_selection == 'Human':
stroke_color = '#F44F36'
background_color='#000000'
else:
stroke_color = '#F44F36'
background_color='#FFFFFF'
# Initialize a random number in the session state. Used to randomize examples shown.
if 'random_num' not in st.session_state:
st.session_state.random_num = 1
# Change the random example number whenever the radio buttons are changed.
def random_num():
st.session_state.random_num = np.random.randint(1,5+1)
return
# Retrieve a randomly-selected example image
urllib.request.urlretrieve(f'https://github.com/NB094/Easy-GANs/raw/main/Pix2Pix/example_images_streamlit/example_{str.lower(subject_selection)}{st.session_state.random_num}.jpg?raw=true', \
'example_img.jpg')
# Create more options menus
canvas_mode = st.radio(label = 'Select canvas mode...', options = ('Draw on a blank canvas', 'View an example sketch', 'Try tracing an example sketch'), \
index = 1, help='Example sketches are chosen randomly out of 5 options.', on_change=random_num)
drawing_mode = right_column.selectbox(label = "Drawing tool:", options = ("freedraw", "line", "rect", "circle", "polygon", "transform"), index = 0)
# Create the drawing canvas
if canvas_mode == 'View an example sketch':
st.image('example_img.jpg')
else:
canvas_result = st_canvas(
fill_color="rgba(255, 255, 255, 0.0)", # Fill colors from shape objects have full transparency
stroke_width=1,
stroke_color=stroke_color,
background_color=background_color,
background_image=Image.open('example_img.jpg') if canvas_mode == 'Try tracing an example sketch' else None,
height=256,
width=256,
drawing_mode=drawing_mode,
key="canvas")
##### SKETCH PROCESSING #####
if canvas_mode == 'View an example sketch':
drawn_image = load_img('example_img.jpg')
else:
# Store canvas sketch data into a variable
drawn_image = canvas_result.image_data
# Insert try/except loop to prevent website from temporarily throwing error when unchecking the box.
try:
# Convert sketch data into parseable numpy array
drawn_image = np.array(Image.fromarray((drawn_image * 255).astype(np.uint8)).resize((256, 256)).convert('RGB'))
drawn_image = (drawn_image * 255).astype(np.uint8)
# If needed, convert black background to white before passing image to generator.
if subject_selection != 'Human':
drawn_image[drawn_image == 0] = 255
except:
pass
# Download load model files. Cache due to large file sizes
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
def cache_all_models():
st.text('Downloading models...')
r = requests.get('https://onedrive.live.com/download?cid=200A679661E47E0E&resid=200A679661E47E0E%211074&authkey=AKxNvSc7K-dVn9k')
with open('humans_fully_trained.h5', 'wb') as f:
f.write(r.content)
r = requests.get('https://onedrive.live.com/download?cid=200A679661E47E0E&resid=200A679661E47E0E%211076&authkey=AOXgLqS3bQIuwbU')
with open('shoes_fully_trained.h5', 'wb') as f:
f.write(r.content)
r = requests.get('https://onedrive.live.com/download?cid=200A679661E47E0E&resid=200A679661E47E0E%211075&authkey=AAtjUZTrsNbE2zk')
with open('handbags_fully_trained.h5', 'wb') as f:
f.write(r.content)
humans_model = load_model('humans_fully_trained.h5', compile=False)
shoes_model = load_model('shoes_fully_trained.h5', compile=False)
handbags_model = load_model('handbags_fully_trained.h5', compile=False)
st.text('Download complete')
return humans_model, shoes_model, handbags_model
humans_model, shoes_model, handbags_model = cache_all_models()
if subject_selection=='Human':
model = humans_model
elif subject_selection=='Shoe':
model = shoes_model
elif subject_selection=='Handbag':
model = handbags_model
# Insert try/except loop to prevent website from temporarily throwing error when unchecking the box.
try:
# Pass numpy array into generator, and predict
gen = Generator(drawn_image, subject_selection)
gen_image = gen.generate_image(model)
# Display prediction
st.image(gen_image)
except:
pass
| 41.273256
| 190
| 0.720947
| 1,022
| 7,099
| 4.900196
| 0.347358
| 0.007189
| 0.016773
| 0.006989
| 0.228834
| 0.171925
| 0.132388
| 0.120008
| 0.120008
| 0.120008
| 0
| 0.031649
| 0.172137
| 7,099
| 172
| 191
| 41.273256
| 0.820316
| 0.136357
| 0
| 0.196262
| 0
| 0.149533
| 0.250123
| 0.022977
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018692
| false
| 0.018692
| 0.084112
| 0
| 0.121495
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4be8b0689a8d30b24d0eb351d73f642c1be6c5a9
| 4,584
|
py
|
Python
|
rbs/rbs.py
|
dexbiobot/SML-Cogs
|
e8d3d12e5bf1d760196006f86a6c16ed95e3c964
|
[
"MIT"
] | 17
|
2017-05-30T13:21:18.000Z
|
2022-03-27T13:08:17.000Z
|
rbs/rbs.py
|
dexbiobot/SML-Cogs
|
e8d3d12e5bf1d760196006f86a6c16ed95e3c964
|
[
"MIT"
] | 16
|
2017-06-11T12:55:06.000Z
|
2019-02-20T21:00:59.000Z
|
rbs/rbs.py
|
dexbiobot/SML-Cogs
|
e8d3d12e5bf1d760196006f86a6c16ed95e3c964
|
[
"MIT"
] | 17
|
2017-05-03T16:09:46.000Z
|
2020-05-13T21:19:37.000Z
|
"""
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import os
from __main__ import send_cmd_help
from cogs.utils import checks
from cogs.utils.dataIO import dataIO
from discord.ext import commands
import discord
LOOP_INTERVAL = 60
SERVER_DEFAULTS = {
'autorole': {
"role_name": "Guest",
"role_id": None,
"timer": 86400
}
}
PATH = os.path.join('data', 'rbs')
JSON = os.path.join(PATH, 'settings.json')
class RBS:
"""Reddit Band System (RBS) general utility cog.
Functionality:
# Autorole
Automatically convert users with no role-assignements to Guest
"""
def __init__(self, bot):
"""Init."""
self.bot = bot
self.settings = dataIO.load_json(JSON)
self.task = bot.loop.create_task(self.loop_task())
async def loop_task(self):
"""Loop tasks.
- auto-role guests.
"""
await self.bot.wait_until_ready()
if self is self.bot.get_cog('RBS'):
self.task = self.bot.loop.create_task(self.loop_task())
@checks.mod_or_permissions()
@commands.group(pass_context=True, no_pm=True)
async def setrbs(self, ctx):
"""Set RBS settings."""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@checks.serverowner_or_permissions(manage_server=True)
@setrbs.command(name="initserver", pass_context=True, no_pm=True)
async def setrbs_initserver(self, ctx):
"""Initialize server settings to default values.
Requires confirmation as this is a destructive process.
"""
await self.bot.say(
'This is a destructive operation. '
'Are you sure that you want to continue? '
'Type **I agree** to execute.')
answer = await self.bot.wait_for_message(
timeout=30,
author=ctx.message.author)
if answer == 'I agree':
self.settings = SERVER_DEFAULTS
dataIO.save_json(JSON, self.settings)
await self.bot.say(
'Settings set to server defaults.')
else:
await self.bot.say(
'Operation aborted.')
@setrbs.command(name="autorolename", pass_context=True, no_pm=True)
async def setrbs_autorolename(self, ctx, role_name):
"""Set auto-role’s role name.
This is the role name automatically assigned to
users when they have been on the server for x amount of time.
The exact amount of time to use is also settable.
"""
if 'autorole' not in self.settings:
self.settings = SERVER_DEFAULTS
dataIO.save_json(JSON, self.settings)
server = ctx.message.server
role = discord.utils.get(server.roles, name=role_name)
if role is None:
await self.bot.say(
'{} is not a valid role on this server.'.format(
role_name))
return
self.settings['autorole']['role_name'] = role.name
self.settings['autorole']['role_id'] = role.id
await self.bot.say(
'Auto-role’s role set to {}'.format(
role.name))
dataIO.save_json(JSON, self.settings)
def check_folder():
"""Check folder."""
if not os.path.exists(PATH):
os.makedirs(PATH)
def check_file():
"""Check files."""
if not dataIO.is_valid_json(JSON):
dataIO.save_json(JSON, SERVER_DEFAULTS)
def setup(bot):
"""Setup bot."""
check_folder()
check_file()
n = RBS(bot)
bot.add_cog(n)
| 29.960784
| 75
| 0.648778
| 619
| 4,584
| 4.707593
| 0.365105
| 0.026424
| 0.028826
| 0.025738
| 0.106726
| 0.106726
| 0.096431
| 0.076527
| 0.076527
| 0.038435
| 0
| 0.00382
| 0.257635
| 4,584
| 152
| 76
| 30.157895
| 0.852483
| 0.272688
| 0
| 0.12987
| 0
| 0
| 0.119691
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051948
| false
| 0.038961
| 0.077922
| 0
| 0.155844
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4beabadec3de979135423c3abb7be1e6a84c41ad
| 2,845
|
py
|
Python
|
tests/nutsflow/test_iterfunction.py
|
maet3608/nuts-flow
|
0d7b8eefc80cb45c079b155ff5062d1d93ff2caf
|
[
"Apache-2.0"
] | 21
|
2017-05-01T10:15:41.000Z
|
2022-01-25T07:02:44.000Z
|
tests/nutsflow/test_iterfunction.py
|
maet3608/nuts-flow
|
0d7b8eefc80cb45c079b155ff5062d1d93ff2caf
|
[
"Apache-2.0"
] | 7
|
2017-02-09T03:36:37.000Z
|
2017-08-22T11:23:03.000Z
|
tests/nutsflow/test_iterfunction.py
|
maet3608/nuts-flow
|
0d7b8eefc80cb45c079b155ff5062d1d93ff2caf
|
[
"Apache-2.0"
] | 5
|
2017-05-30T01:56:31.000Z
|
2020-10-05T08:21:43.000Z
|
"""
.. module:: test_iterfunction
:synopsis: Unit tests for iterfunction module
"""
import time
import nutsflow.iterfunction as itf
from six.moves import range
def test_length():
assert itf.length(range(10)) == 10
assert itf.length([]) == 0
def test_interleave():
it1 = [1, 2]
it2 = 'abc'
it = itf.interleave(it1, it2)
assert list(it) == [1, 'a', 2, 'b', 'c']
assert list(itf.interleave([], [])) == []
assert list(itf.interleave('12', [])) == ['1', '2']
def test_take():
it = itf.take(range(10), 3)
assert list(it) == [0, 1, 2]
it = itf.take(range(10), 0)
assert list(it) == []
it = itf.take(range(0), 3)
assert list(it) == []
def test_nth():
assert itf.nth(range(10), 2) == 2
assert itf.nth(range(10), 100) is None
assert itf.nth(range(10), 100, -1) == -1
def test_unique():
assert list(itf.unique([1, 2, 3])) == [1, 2, 3]
assert list(itf.unique([2, 3, 1, 1, 2, 4])) == [2, 3, 1, 4]
assert list(itf.unique([])) == []
data = [(1, 'a'), (2, 'a'), (3, 'b')]
it = itf.unique(data, key=lambda t: t[1])
assert list(it) == [(1, 'a'), (3, 'b')]
def test_chunked():
it = itf.chunked(range(5), 2)
assert list(map(tuple, it)) == [(0, 1), (2, 3), (4,)]
it = itf.chunked(range(6), 3)
assert list(map(tuple, it)) == [(0, 1, 2), (3, 4, 5)]
assert list(itf.chunked([], 2)) == []
def test_consume():
it = iter(range(10))
itf.consume(it)
assert next(it, None) is None
it = iter(range(10))
itf.consume(it, 5)
assert next(it, None) == 5
def test_flatten():
assert list(itf.flatten([])) == []
iterable = [(1, 2), (3, 4, 5)]
assert list(itf.flatten(iterable)) == [1, 2, 3, 4, 5]
def test_flatmap():
f = lambda n: str(n) * n
it = itf.flatmap(f, [1, 2, 3])
assert list(it) == ['1', '2', '2', '3', '3', '3']
it = itf.flatmap(f, [])
assert list(it) == []
def test_partition():
pred = lambda x: x < 6
smaller, larger = itf.partition(range(10), pred)
assert list(smaller) == [0, 1, 2, 3, 4, 5]
assert list(larger) == [6, 7, 8, 9]
def test_prefetch_iterator_speed():
def sleep():
time.sleep(0.01)
def number_generator():
for i in range(10):
sleep()
yield i
start = time.time()
for _ in number_generator():
sleep()
duration1 = time.time() - start
start = time.time()
for _ in itf.PrefetchIterator(number_generator()):
sleep()
duration2 = time.time() - start
assert duration2 < duration1
def test_prefetch_iterator_thread_safe():
from multiprocessing.pool import ThreadPool
data = set(range(100))
prefetch_it = itf.PrefetchIterator(data)
pool = ThreadPool()
result = set(pool.map(lambda x: 2 * x - x, prefetch_it))
assert result == data
| 23.319672
| 63
| 0.555712
| 422
| 2,845
| 3.687204
| 0.208531
| 0.122108
| 0.066838
| 0.012853
| 0.26928
| 0.154242
| 0.125964
| 0.09383
| 0.07455
| 0.07455
| 0
| 0.06215
| 0.247803
| 2,845
| 121
| 64
| 23.512397
| 0.664953
| 0.027417
| 0
| 0.121951
| 0
| 0
| 0.007617
| 0
| 0
| 0
| 0
| 0
| 0.341463
| 1
| 0.170732
| false
| 0
| 0.04878
| 0
| 0.219512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4beb4afba8d4e82f6ec0587a4a66ce29bdfa1be9
| 6,591
|
py
|
Python
|
microcosm_flask/tests/conventions/test_upload.py
|
Sinon/microcosm-flask
|
c1404ebc94459c8156b04f5e04490a330117524c
|
[
"Apache-2.0"
] | 11
|
2017-01-30T21:53:20.000Z
|
2020-05-29T22:39:19.000Z
|
microcosm_flask/tests/conventions/test_upload.py
|
Sinon/microcosm-flask
|
c1404ebc94459c8156b04f5e04490a330117524c
|
[
"Apache-2.0"
] | 139
|
2016-03-09T19:09:59.000Z
|
2021-09-03T17:14:00.000Z
|
microcosm_flask/tests/conventions/test_upload.py
|
Sinon/microcosm-flask
|
c1404ebc94459c8156b04f5e04490a330117524c
|
[
"Apache-2.0"
] | 10
|
2016-12-19T22:39:42.000Z
|
2021-03-09T19:23:15.000Z
|
"""
Alias convention tests.
"""
from io import BytesIO
from json import loads
from uuid import uuid4
from hamcrest import (
all_of,
anything,
assert_that,
contains,
equal_to,
has_entries,
has_entry,
has_item,
has_key,
is_,
is_not,
)
from marshmallow import Schema, fields
from microcosm.api import create_object_graph
from microcosm_flask.conventions.base import EndpointDefinition
from microcosm_flask.conventions.swagger import configure_swagger
from microcosm_flask.conventions.upload import configure_upload
from microcosm_flask.namespaces import Namespace
from microcosm_flask.operations import Operation
from microcosm_flask.swagger.definitions import build_path
from microcosm_flask.tests.conventions.fixtures import Person
class FileExtraSchema(Schema):
extra = fields.String(missing="something")
class FileResponseSchema(Schema):
id = fields.UUID(required=True)
class FileController:
def __init__(self):
self.calls = []
def upload(self, files, extra):
self.calls.append(
dict(
files=files,
extra=extra,
),
)
def upload_for_person(self, files, extra, person_id):
self.calls.append(
dict(
extra=extra,
files=files,
person_id=person_id,
),
)
return dict(
id=person_id,
)
class TestUpload:
def setup(self):
self.graph = create_object_graph(name="example", testing=True)
self.ns = Namespace(subject="file")
self.relation_ns = Namespace(subject=Person, object_="file")
self.controller = FileController()
UPLOAD_MAPPINGS = {
Operation.Upload: EndpointDefinition(
func=self.controller.upload,
request_schema=FileExtraSchema(),
),
}
UPLOAD_FOR_MAPPINGS = {
Operation.UploadFor: EndpointDefinition(
func=self.controller.upload_for_person,
request_schema=FileExtraSchema(),
response_schema=FileResponseSchema(),
),
}
configure_upload(self.graph, self.ns, UPLOAD_MAPPINGS)
configure_upload(self.graph, self.relation_ns, UPLOAD_FOR_MAPPINGS)
configure_swagger(self.graph)
self.client = self.graph.flask.test_client()
def test_upload_url_for(self):
with self.graph.app.test_request_context():
url = self.ns.url_for(Operation.Upload)
assert_that(url, is_(equal_to("http://localhost/api/file")))
def test_upload_for_url_for(self):
with self.graph.app.test_request_context():
url = self.relation_ns.url_for(Operation.UploadFor, person_id=1)
assert_that(url, is_(equal_to("http://localhost/api/person/1/file")))
def test_upload_swagger_path(self):
with self.graph.app.test_request_context():
path = build_path(Operation.Upload, self.ns)
assert_that(path, is_(equal_to("/api/file")))
def test_upload_for_swagger_path(self):
with self.graph.app.test_request_context():
path = build_path(Operation.UploadFor, self.relation_ns)
assert_that(path, is_(equal_to("/api/person/{person_id}/file")))
def test_swagger(self):
response = self.client.get("/api/swagger")
assert_that(response.status_code, is_(equal_to(200)))
data = loads(response.data)
upload = data["paths"]["/file"]["post"]
upload_for = data["paths"]["/person/{person_id}/file"]["post"]
# both endpoints return form data
assert_that(
upload["consumes"],
contains("multipart/form-data"),
)
assert_that(
upload_for["consumes"],
contains("multipart/form-data"),
)
# one endpoint gets an extra query string parameter (and the other doesn't)
assert_that(
upload["parameters"],
has_item(
has_entries(name="extra"),
),
)
assert_that(
upload_for["parameters"],
has_item(
is_not(has_entries(name="extra")),
),
)
# one endpoint gets a custom response type (and the other doesn't)
assert_that(
upload["responses"],
all_of(
has_key("204"),
is_not(has_key("200")),
has_entry("204", is_not(has_key("schema"))),
),
)
assert_that(
upload_for["responses"],
all_of(
has_key("200"),
is_not(has_key("204")),
has_entry("200", has_entry("schema", has_entry("$ref", "#/definitions/FileResponse"))),
),
)
def test_upload(self):
response = self.client.post(
"/api/file",
data=dict(
file=(BytesIO(b"Hello World\n"), "hello.txt"),
),
)
assert_that(response.status_code, is_(equal_to(204)))
assert_that(self.controller.calls, contains(
has_entries(
files=contains(contains("file", anything(), "hello.txt")),
extra="something",
),
))
def test_upload_for(self):
person_id = uuid4()
response = self.client.post(
"/api/person/{}/file".format(person_id),
data=dict(
file=(BytesIO(b"Hello World\n"), "hello.txt"),
),
)
assert_that(response.status_code, is_(equal_to(200)))
response_data = loads(response.get_data().decode("utf-8"))
assert_that(response_data, is_(equal_to(dict(
id=str(person_id),
))))
assert_that(self.controller.calls, contains(
has_entries(
files=contains(contains("file", anything(), "hello.txt")),
extra="something",
person_id=person_id,
),
))
def test_upload_multipart(self):
response = self.client.post(
"/api/file",
data=dict(
file=(BytesIO(b"Hello World\n"), "hello.txt"),
extra="special",
),
)
assert_that(response.status_code, is_(equal_to(204)))
assert_that(self.controller.calls, contains(
has_entries(
files=contains(contains("file", anything(), "hello.txt")),
extra="special",
),
))
| 29.823529
| 103
| 0.576847
| 708
| 6,591
| 5.141243
| 0.193503
| 0.052198
| 0.022253
| 0.018681
| 0.401374
| 0.308242
| 0.297253
| 0.297253
| 0.263736
| 0.231044
| 0
| 0.007679
| 0.308451
| 6,591
| 220
| 104
| 29.959091
| 0.790917
| 0.029586
| 0
| 0.405556
| 0
| 0
| 0.081441
| 0.012216
| 0
| 0
| 0
| 0
| 0.105556
| 1
| 0.066667
| false
| 0
| 0.072222
| 0
| 0.177778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bf119d7edb9acf18b1f1e428e435fcd728fc1f4
| 866
|
py
|
Python
|
tests/check-result.py
|
getupcloud/tiny-controllers
|
e896b2015a9e29eab421225cb5a5f0d488df9e37
|
[
"Apache-2.0"
] | null | null | null |
tests/check-result.py
|
getupcloud/tiny-controllers
|
e896b2015a9e29eab421225cb5a5f0d488df9e37
|
[
"Apache-2.0"
] | null | null | null |
tests/check-result.py
|
getupcloud/tiny-controllers
|
e896b2015a9e29eab421225cb5a5f0d488df9e37
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import sys
import json
from flatten_dict import flatten as _flatten
try:
data = json.load(sys.stdin)['object']
except Exception as ex:
print("Missing or invalid test data:", ex)
sys.exit(1)
try:
results = json.load(open(sys.argv[1], "r"))['results']
except Exception as ex:
print("Missing or invalid test results:", ex)
sys.exit(1)
def flatten(d):
return _flatten(d, reducer='dot', keep_empty_types=(dict,), enumerate_types=(list,))
data = flatten(data)
ok = True
for r in [ flatten(i) for i in results ]:
for k, v in r.items():
if k not in data:
print(f'{k} not found in {data}')
ok = False
elif v != data[k]:
print(f'{k}={data[k]} do not matches {k}={v}')
ok = False
else:
print(f"Match: {r}")
sys.exit(0 if ok else 1)
| 23.405405
| 88
| 0.590069
| 137
| 866
| 3.686131
| 0.430657
| 0.041584
| 0.067327
| 0.075248
| 0.174257
| 0.174257
| 0.174257
| 0.174257
| 0.174257
| 0
| 0
| 0.007825
| 0.262125
| 866
| 36
| 89
| 24.055556
| 0.782473
| 0.023095
| 0
| 0.285714
| 0
| 0
| 0.173965
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.107143
| 0.035714
| 0.178571
| 0.178571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bf224e8c8f4fa354c35d1431a9957707b55eb9b
| 331
|
py
|
Python
|
thriftpy2_httpx_client/__init__.py
|
hans00/ThriftPy2-HTTPX-Client
|
e94944218915bcec6b2e0c00200f5d5e6f823053
|
[
"MIT"
] | null | null | null |
thriftpy2_httpx_client/__init__.py
|
hans00/ThriftPy2-HTTPX-Client
|
e94944218915bcec6b2e0c00200f5d5e6f823053
|
[
"MIT"
] | 5
|
2021-07-13T13:56:17.000Z
|
2022-03-02T02:43:46.000Z
|
thriftpy2_httpx_client/__init__.py
|
hans00/ThriftPy2-HTTPX-Client
|
e94944218915bcec6b2e0c00200f5d5e6f823053
|
[
"MIT"
] | 2
|
2021-07-13T06:08:59.000Z
|
2022-03-16T22:15:57.000Z
|
__all__ = [
'make_aio_client',
'make_sync_client',
'TAsyncHTTPXClient',
'THTTPXClient',
]
from .aio import TAsyncHTTPXClient, make_client as make_aio_client
from .sync import THTTPXClient, make_client as make_sync_client
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 23.642857
| 66
| 0.770393
| 41
| 331
| 5.682927
| 0.341463
| 0.141631
| 0.111588
| 0.137339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148036
| 331
| 13
| 67
| 25.461538
| 0.826241
| 0
| 0
| 0
| 0
| 0
| 0.202417
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.272727
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bf41bde14de2173375d4d1e4381757de1699557
| 3,553
|
py
|
Python
|
kalc/model/kinds/Node.py
|
KellyGriffin/kalc
|
9b78c4177ed9ffccbf1ecfbf9a7946286cd7c583
|
[
"Apache-2.0"
] | null | null | null |
kalc/model/kinds/Node.py
|
KellyGriffin/kalc
|
9b78c4177ed9ffccbf1ecfbf9a7946286cd7c583
|
[
"Apache-2.0"
] | null | null | null |
kalc/model/kinds/Node.py
|
KellyGriffin/kalc
|
9b78c4177ed9ffccbf1ecfbf9a7946286cd7c583
|
[
"Apache-2.0"
] | null | null | null |
import sys
import random
from kalc.model.system.base import ModularKind
from typing import Set
from kalc.model.system.primitives import Label, StatusNode
from kalc.model.system.base import HasLabel
from kalc.misc.util import cpuConvertToAbstractProblem, memConvertToAbstractProblem
from kalc.misc.const import STATUS_NODE
from kalc.model.system.globals import GlobalVar
class Node(ModularKind, HasLabel):
# k8s attributes
metadata_ownerReferences__name: str
metadata_name: str
spec_priorityClassName: str
labels: Set[Label]
# pods: Set[mpod.Pod]
cpuCapacity: int
memCapacity: int
currentFormalCpuConsumption: int
currentFormalMemConsumption: int
currentRealMemConsumption: int
currentRealCpuConsumption: int
AmountOfPodsOverwhelmingMemLimits: int
isNull: bool
status: StatusNode
amountOfActivePods: int
searchable: bool
isSearched: bool
different_than: Set["Node"]
allocatedPodList: Set["Pod"]
allocatedPodList_length: int
directedPodList: Set["Pod"]
directedPodList_length: int
daemonset_podList: Set["Pod"]
daemonset_podList_lenght: int
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.metadata_name = "modelNode"+str(random.randint(100000000, 999999999))
# self.metadata_name = "model-default-name"
self.AmountOfPodsOverwhelmingMemLimits = 0
self.currentFormalCpuConsumption = 0
self.currentFormalMemConsumption = 0
self.currentRealCpuConsumption = 0
self.currentRealMemConsumption = 0
self.cpuCapacity = 0
self.memCapacity = 0
self.isNull = False
self.status = STATUS_NODE["Active"]
self.amountOfActivePods = 0
self.searchable = True
self.isSearched = False
self.allocatedPodList_length = 0
self.directedPodList_length = 0
self.daemonset_podList_lenght = 0
def hook_after_create(self, object_space):
globalVar = next(filter(lambda x: isinstance(x, GlobalVar), object_space))
globalVar.amountOfNodes += 1
nodes = filter(lambda x: isinstance(x, Node), object_space)
for node in nodes:
if node != self:
self.different_than.add(node)
node.different_than.add(self)
def hook_after_load(self, object_space):
globalVar = next(filter(lambda x: isinstance(x, GlobalVar), object_space))
globalVar.amountOfNodes += 1
nodes = filter(lambda x: isinstance(x, Node), object_space)
for node in nodes:
if node != self:
self.different_than.add(node)
node.different_than.add(self)
@property
def status_allocatable_memory(self):
pass
@status_allocatable_memory.setter
def status_allocatable_memory(self, value):
self.memCapacity = memConvertToAbstractProblem(value)
@property
def status_allocatable_cpu(self):
pass
@status_allocatable_cpu.setter
def status_allocatable_cpu(self, value):
self.cpuCapacity = cpuConvertToAbstractProblem(value)
def __str__(self):
if str(self.metadata_name) == "None":
return "<unnamed node>"
return str(self.metadata_name)
# def __repr__(self):
# return 'Nodename : ' + str(self._get_value())
Node.NODE_NULL = Node("NULL")
Node.NODE_NULL.isNull = True
Node.NODE_NULL.status = STATUS_NODE["Inactive"]
Node.NODE_NULL.metadata_name = "Null-Node"
Node.NODE_NULL.searchable = False
| 33.838095
| 83
| 0.690684
| 386
| 3,553
| 6.170984
| 0.274611
| 0.020991
| 0.025189
| 0.031906
| 0.24937
| 0.201511
| 0.177162
| 0.177162
| 0.177162
| 0.177162
| 0
| 0.011632
| 0.225725
| 3,553
| 104
| 84
| 34.163462
| 0.854235
| 0.041092
| 0
| 0.204545
| 0
| 0
| 0.019712
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0.022727
| 0.102273
| 0
| 0.488636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bf46aef0cec7975f957c42ac0e9212705e2eac4
| 6,154
|
py
|
Python
|
Betsy/Betsy/modules/summarize_fastqc_results.py
|
jefftc/changlab
|
11da8c415afefcba0b0216238387c75aeb3a56ac
|
[
"MIT"
] | 9
|
2017-01-13T02:38:41.000Z
|
2021-04-08T00:44:39.000Z
|
Betsy/Betsy/modules/summarize_fastqc_results.py
|
jefftc/changlab
|
11da8c415afefcba0b0216238387c75aeb3a56ac
|
[
"MIT"
] | null | null | null |
Betsy/Betsy/modules/summarize_fastqc_results.py
|
jefftc/changlab
|
11da8c415afefcba0b0216238387c75aeb3a56ac
|
[
"MIT"
] | 4
|
2017-01-05T16:25:25.000Z
|
2019-12-12T20:07:38.000Z
|
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
outfile):
import os
from genomicode import filelib
from genomicode import sortlib
from Betsy import module_utils as mlib
# Should be a folder of fastqc results.
fastqc_path = in_data.identifier
# Find all the FASTQC results.
x = filelib.list_files_in_path(fastqc_path, endswith="summary.txt")
x = [os.path.split(x)[0] for x in x]
paths = x
assert paths, "No FASTQC files found."
# Read the results.
all_results = [read_fastqc_results(x) for x in paths]
assert all_results
# Make table where the rows are the samples and the columns
# are the statistics.
sample2results = {}
for x in all_results:
assert x.sample not in sample2results
sample2results[x.sample] = x
all_statistics = all_results[0].statistics_order
all_samples = sortlib.sort_natural(sample2results)
table = []
header = [
"Sample", "Total Sequences", "Filtered Sequences",
"Sequence length", "GC"] + all_statistics
table.append(header)
for sample in all_samples:
results = sample2results[sample]
x1 = [sample]
x2 = [
results.total_sequences, results.filtered_sequences,
results.sequence_length, results.percent_gc]
x3 = [results.statistics[x] for x in all_statistics]
x = x1 + x2 + x3
assert len(x) == len(header)
table.append(x)
# Write out the table as text file.
TXT_FILE = "fastqc_summary.txt"
handle = open(TXT_FILE, 'w')
for x in table:
print >>handle, "\t".join(map(str, x))
handle.close()
x = mlib.get_config("txt2xls", which_assert_file=True, quote=True)
os.system("%s -b %s > %s" % (x, TXT_FILE, outfile))
filelib.assert_exists_nz(outfile)
def name_outfile(self, antecedents, user_options):
return "fastqc_summary.xls"
class FastQCResults:
def __init__(self, sample, total_sequences, filtered_sequences,
sequence_length, percent_gc, statistics, statistics_order):
# statistics is a dictionary of name of statistic -> status
# statistics_order is the order that the statistics were given
# in the fastqc output.
assert sorted(statistics) == sorted(statistics_order)
self.sample = sample
self.total_sequences = total_sequences
self.filtered_sequences = filtered_sequences
self.sequence_length = sequence_length
self.percent_gc = percent_gc
self.statistics = statistics.copy()
self.statistics_order = statistics_order[:]
def read_fastqc_results(fastqc_path):
import os
from genomicode import filelib
summary_file = os.path.join(fastqc_path, "summary.txt")
data_file = os.path.join(fastqc_path, "fastqc_data.txt")
filelib.assert_exists_nz(summary_file)
filelib.assert_exists_nz(data_file)
summary = read_fastqc_summary(summary_file)
data = read_fastqc_data(data_file)
# Figure out the sample names from the filenames.
samples = sorted([x[-1] for x in summary])
assert samples[0] == samples[-1], "%s %s" % (samples[0], samples[-1])
sample = samples[0]
if sample.lower().endswith(".gz"):
sample = sample[:-3]
if sample.lower().endswith(".fq"):
sample = sample[:-3]
if sample.lower().endswith(".fastq"):
sample = sample[:-6]
# Make the statistics dictionary.
statistics = {}
statistics_order = []
for x in summary:
status, statistic, x = x
assert statistic not in statistics
statistics[statistic] = status
statistics_order.append(statistic)
x = FastQCResults(
sample, data["total_sequences"], data["filtered_sequences"],
data["sequence_length"], data["percent_gc"],
statistics, statistics_order)
return x
def read_fastqc_summary(filename):
# Return list of (<status>, <statistic>, <filename>)
import os
from genomicode import filelib
assert os.path.exists(filename)
data = []
for x in filelib.read_cols(filename):
assert len(x) == 3
status, statistic, filename = x
data.append((status, statistic, filename))
return data
def read_fastqc_data(filename):
# Return a dictionary of:
# total_sequences <int>
# filtered_sequences <int>
# sequence_length <str> "205", "15-205"
# percent_gc <float>
from genomicode import parselib
data = {}
for line in open(filename):
# Line seems to end with:
# 'Total Sequences\t1056547\t\n'
# Not enough just to strip \r\n.
#cols = line.rstrip("\r\n").split("\t")
cols = line.rstrip().split("\t")
if line.startswith("Total Sequences"):
assert len(cols) == 2, repr(line)
data["total_sequences"] = int(cols[1])
elif line.startswith("Filtered Sequences"):
assert len(cols) == 2
data["filtered_sequences"] = int(cols[1])
elif line.startswith("Sequences flagged as poor quality"):
# Seems to be alternative to "Filtered Sequences".
assert len(cols) == 2
data["filtered_sequences"] = int(cols[1])
elif line.startswith("Sequence length"):
assert len(cols) == 2
data["sequence_length"] = cols[1]
elif line.startswith("%GC"):
assert len(cols) == 2
data["percent_gc"] = float(cols[1])/100
expected = [
"total_sequences", "filtered_sequences", "sequence_length",
"percent_gc"]
x = [x for x in expected if x not in data]
assert not x, "Missing (%s) from fastqc_data: %s" % (
parselib.pretty_list(x), filename)
return data
| 34.573034
| 76
| 0.614722
| 738
| 6,154
| 4.968835
| 0.218157
| 0.055631
| 0.014726
| 0.019089
| 0.1958
| 0.157349
| 0.115626
| 0.071993
| 0.042542
| 0.042542
| 0
| 0.011773
| 0.282255
| 6,154
| 177
| 77
| 34.768362
| 0.818429
| 0.128372
| 0
| 0.125984
| 0
| 0
| 0.09399
| 0
| 0
| 0
| 0
| 0
| 0.149606
| 1
| 0.055118
| false
| 0
| 0.07874
| 0.007874
| 0.181102
| 0.007874
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bf674c2dd9e1aaac9f80a20682c800896278be3
| 792
|
py
|
Python
|
propnet/models/__init__.py
|
nile0316/propnet
|
3e1f1476c70a878c6eb43587c328d108b0e2a410
|
[
"BSD-3-Clause-LBNL"
] | 57
|
2018-01-09T14:56:20.000Z
|
2022-02-24T11:44:42.000Z
|
propnet/models/__init__.py
|
ruriboshi/propnet
|
770703fb4fc344f785f89c02f26b31ea5733d2bd
|
[
"BSD-3-Clause-LBNL"
] | 214
|
2017-09-26T23:31:09.000Z
|
2022-03-14T04:50:58.000Z
|
propnet/models/__init__.py
|
nile0316/propnet
|
3e1f1476c70a878c6eb43587c328d108b0e2a410
|
[
"BSD-3-Clause-LBNL"
] | 26
|
2017-10-29T21:34:22.000Z
|
2022-01-12T05:59:12.000Z
|
# noinspection PyUnresolvedReferences
import propnet.symbols
from propnet.models import serialized, python, composite
from propnet.core.registry import Registry
# This is just to enable importing the model directly from this module for example code generation
def _update_globals():
for name, model in Registry("models").items():
if model.is_builtin:
globals()[name] = model
def add_builtin_models_to_registry(register_symbols=True):
if register_symbols:
propnet.symbols.add_builtin_symbols_to_registry()
serialized.add_builtin_models_to_registry(register_symbols=False)
python.add_builtin_models_to_registry(register_symbols=False)
composite.add_builtin_models_to_registry(register_symbols=False)
_update_globals()
_update_globals()
| 33
| 98
| 0.792929
| 101
| 792
| 5.90099
| 0.376238
| 0.083893
| 0.107383
| 0.120805
| 0.300336
| 0.300336
| 0.300336
| 0.231544
| 0
| 0
| 0
| 0
| 0.141414
| 792
| 23
| 99
| 34.434783
| 0.876471
| 0.166667
| 0
| 0.133333
| 0
| 0
| 0.009132
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.2
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bf6a8cffebce41ae5095ad681541b2d2a477027
| 1,369
|
py
|
Python
|
python/clean_dataset.py
|
catarinaacsilva/user_mapping_twitter
|
7350ed35b465a7db6747c4035e7b119bff23131d
|
[
"MIT"
] | null | null | null |
python/clean_dataset.py
|
catarinaacsilva/user_mapping_twitter
|
7350ed35b465a7db6747c4035e7b119bff23131d
|
[
"MIT"
] | null | null | null |
python/clean_dataset.py
|
catarinaacsilva/user_mapping_twitter
|
7350ed35b465a7db6747c4035e7b119bff23131d
|
[
"MIT"
] | null | null | null |
import csv
import re
regex = re.compile('[^a-zA-Z]')
def f7(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def clean_dataset(screen_name, n_tweets=300):
# open CSV file
all_words = []
with open('%s_tweets.csv' % screen_name, 'r') as f:
reader = csv.reader(f)
c = 0
for row in reader:
if len(row) > 0:
c += 1
words = row[0].split()
for w in words:
s = regex.sub('', w.lower()).strip()
if(len(s) > 2 and len(s) < 13):
all_words.append(s)
if c >= n_tweets:
break
# Filter out repetition
# But since we are build shingles, there is no need
# final_words = f7(all_words)
#outtweets = [[word] for word in final_words]
outtweets = [[word] for word in all_words]
#print(final_words)
with open('%s_tweets_words.csv' % screen_name, 'w') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerows(outtweets)
if __name__ == '__main__':
for user in ['katyperry', 'TheEllenShow', 'YouTube', 'realDonaldTrump', 'BillGates',
'nytimes', 'CNN', 'espn', 'NASA', 'aliciakeys']:
clean_dataset(user)
| 30.422222
| 89
| 0.519357
| 178
| 1,369
| 3.842697
| 0.477528
| 0.046784
| 0.038012
| 0.040936
| 0.137427
| 0.078947
| 0
| 0
| 0
| 0
| 0
| 0.013544
| 0.352812
| 1,369
| 44
| 90
| 31.113636
| 0.758465
| 0.127831
| 0
| 0
| 0
| 0
| 0.116462
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bf9bd37e91a5feca68c63420808cdbf5f96022e
| 6,736
|
py
|
Python
|
models/analysis_transform.py
|
LiuLei95/PyTorch-Learned-Image-Compression-with-GMM-and-Attention
|
484aced5bea25fbc1ba1380f4ab81bda9b099c1e
|
[
"Apache-2.0"
] | 27
|
2021-07-28T01:33:02.000Z
|
2022-03-18T04:01:02.000Z
|
models/analysis_transform.py
|
LiuLei95/PyTorch-Learned-Image-Compression-with-GMM-and-Attention
|
484aced5bea25fbc1ba1380f4ab81bda9b099c1e
|
[
"Apache-2.0"
] | 5
|
2021-11-13T05:58:51.000Z
|
2022-02-13T09:07:44.000Z
|
models/analysis_transform.py
|
LiuLei95/PyTorch-Learned-Image-Compression-with-GMM-and-Attention
|
484aced5bea25fbc1ba1380f4ab81bda9b099c1e
|
[
"Apache-2.0"
] | 1
|
2021-08-21T13:14:28.000Z
|
2021-08-21T13:14:28.000Z
|
#!/Library/Frameworks/Python.framework/Versions/3.5/bin/python3.5
import math
import torch.nn as nn
import torch
from .GDN import GDN
from .attention import Attention
# class Analysis_transform(nn.Module):
# def __init__(self, num_filters=128):
# super(Analysis_transform, self).__init__()
# self.conv_shortcut0 = nn.Conv2d(3, num_filters, 1, stride=2, padding=0)
# self.conv0 = nn.Conv2d(3, num_filters, 3, stride=2, padding=1)
# self.conv1 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
# self.leaky_relu1 = nn.LeakyReLU()
# self.conv2 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
# self.leaky_relu2 = nn.LeakyReLU()
# self.conv_shortcut = nn.Conv2d(num_filters, num_filters, 1, stride=2, padding=0)
# self.conv3 = nn.Conv2d(num_filters, num_filters, 3, stride=2, padding=1)
# self.leaky_relu3 = nn.LeakyReLU()
# self.conv4 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
# self.gdn = GDN(num_filters)
# # self.leaky_relu4 = nn.LeakyReLU()
# self.conv5 = nn.Conv2d(num_filters, num_filters, 3, stride=2, padding=1, bias=False)
# self.attention1 = Attention(num_filters)
# self.attention2 = Attention(num_filters)
#
#
# def forward(self, x):
# for i in range(4):
# if i > 0:
# x2 = self.conv1(x)
# x2 = self.leaky_relu1(x2)
# # print("a 3x3 1")
# # print("%d"%(i), x2.shape)
# x2 = self.conv2(x2)
# x2 = self.leaky_relu2(x2)
# # print("b 3x3 1")
# # print("%d"%(i), x2.shape)
# x = x + x2
# # print("resblock result: ", x.shape)
#
#
# if i == 0:
# shortcut_tensor = self.conv_shortcut0(x)
# x = self.conv0(x)
# x = self.leaky_relu3(x)
# # print("c 3x3 2")
# # print("%d"%(i), x.shape)
# x = self.conv4(x)
# # x = self.leaky_relu4(x)
# x = self.gdn(x)
# # print("d 3x3 1")
# # print("%d"%(i), x.shape)
# x = x + shortcut_tensor
# # print("resblock result: ", x.shape)
# elif i < 3:
# shortcut_tensor = self.conv_shortcut(x)
# x = self.conv3(x)
# x = self.leaky_relu3(x)
# # print("c 3x3 2")
# # print("%d"%(i), x.shape)
# x = self.conv4(x)
# # x = self.leaky_relu4(x)
# x = self.gdn(x)
# # print("d 3x3 1")
# # print("%d"%(i), x.shape)
# x = x + shortcut_tensor
# # print("resblock result: ", x.shape)
# if i == 1:
# # Attenation
# x = self.attention1(x)
#
# else:
# x = self.conv5(x)
# x = self.attention2(x)
#
# return x
class Analysis_transform(nn.Module):
def __init__(self, num_filters=128):
super(Analysis_transform, self).__init__()
# i = 0
self.b0_shortcut = nn.Conv2d(3, num_filters, 1, stride=2)
self.b0_layer2 = nn.Conv2d(3, num_filters, 3, stride=2, padding=1)
self.b0_layer2_relu = nn.LeakyReLU()
self.b0_layer3 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b0_layer3_GDN = GDN(num_filters)
# i = 1
self.b1_layer0 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b1_layer0_relu = nn.LeakyReLU()
self.b1_layer1 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b1_layer1_relu = nn.LeakyReLU()
self.b1_shortcut = nn.Conv2d(num_filters, num_filters, 1, stride=2)
self.b1_layer2 = nn.Conv2d(num_filters, num_filters, 3, stride=2, padding=1)
self.b1_layer2_relu = nn.LeakyReLU()
self.b1_layer3 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b1_layer3_GDN = GDN(num_filters)
self.attention1 = Attention(num_filters)
# i = 2
self.b2_layer0 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b2_layer0_relu = nn.LeakyReLU()
self.b2_layer1 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b2_layer1_relu = nn.LeakyReLU()
self.b2_shortcut = nn.Conv2d(num_filters, num_filters, 1, stride=2)
self.b2_layer2 = nn.Conv2d(num_filters, num_filters, 3, stride=2, padding=1)
self.b2_layer2_relu = nn.LeakyReLU()
self.b2_layer3 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b2_layer3_GDN = GDN(num_filters)
# i = 3
self.b3_layer0 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b3_layer0_relu = nn.LeakyReLU()
self.b3_layer1 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b3_layer1_relu = nn.LeakyReLU()
self.b3_layer2 = nn.Conv2d(num_filters, num_filters, 3, stride=2, padding=1, bias=False)
self.attention2 = Attention(num_filters)
def forward(self, x):
# i = 0
shortcut0 = self.b0_shortcut(x)
b0 = self.b0_layer2(x)
b0 = self.b0_layer2_relu(b0)
b0 = self.b0_layer3(b0)
b0 = self.b0_layer3_GDN(b0)
b0 += shortcut0
# i = 1
b1 = self.b1_layer0(b0)
b1 = self.b1_layer0_relu(b1)
b1 = self.b1_layer1(b1)
b1 = self.b1_layer1_relu(b1)
b1 += b0
shortcut1 = self.b1_shortcut(b1)
b1 = self.b1_layer2(b1)
b1 = self.b1_layer2_relu(b1)
b1 = self.b1_layer3(b1)
b1 = self.b1_layer3_GDN(b1)
b1 += shortcut1
b1 = self.attention1(b1)
# i = 2
b2 = self.b2_layer0(b1)
b2 = self.b2_layer0_relu(b2)
b2 = self.b2_layer1(b2)
b2 = self.b2_layer1_relu(b2)
b2 += b1
shortcut2 = self.b2_shortcut(b2)
b2 = self.b2_layer2(b2)
b2 = self.b2_layer2_relu(b2)
b2 = self.b2_layer3(b2)
b2 = self.b2_layer3_GDN(b2)
b2 += shortcut2
# i = 3
b3 = self.b3_layer0(b2)
b3 = self.b3_layer0_relu(b3)
b3 = self.b3_layer1(b3)
b3 = self.b3_layer1_relu(b3)
b3 += b2
b3 = self.b3_layer2(b3)
b3 = self.attention2(b3)
return b3
if __name__ == "__main__":
analysis_transform = Analysis_transform()
input_image = torch.zeros([1,3,256,256])
feature = analysis_transform(input_image)
print(feature.shape)
| 38.936416
| 96
| 0.55478
| 911
| 6,736
| 3.899012
| 0.103183
| 0.152027
| 0.061937
| 0.101351
| 0.712838
| 0.530687
| 0.51661
| 0.497748
| 0.475788
| 0.449887
| 0
| 0.079393
| 0.315618
| 6,736
| 172
| 97
| 39.162791
| 0.691106
| 0.428593
| 0
| 0
| 0
| 0
| 0.002133
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024691
| false
| 0
| 0.061728
| 0
| 0.111111
| 0.012346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bfb4d961bec58ff15fe5b25777f51138ea3c5dc
| 1,516
|
py
|
Python
|
tests/dataset_balancer_test.py
|
MarinkoBa/Hate-Speech-Classification
|
72f6bbe93b823daefa138df4f81a3a4df5b34c4c
|
[
"MIT"
] | null | null | null |
tests/dataset_balancer_test.py
|
MarinkoBa/Hate-Speech-Classification
|
72f6bbe93b823daefa138df4f81a3a4df5b34c4c
|
[
"MIT"
] | null | null | null |
tests/dataset_balancer_test.py
|
MarinkoBa/Hate-Speech-Classification
|
72f6bbe93b823daefa138df4f81a3a4df5b34c4c
|
[
"MIT"
] | 1
|
2020-12-14T13:56:50.000Z
|
2020-12-14T13:56:50.000Z
|
# -*- coding: utf-8 -*-
from src.utils.get_data import load_data
from src.utils.get_data import get_datasets
from src.utils.get_data import concatenate_datasets
from src.utils.dataset_balancer import balance_data
import os
import pandas as pd
import unittest
class TestDataBalancer(unittest.TestCase):
def setUp(self):
self.df = load_data(os.path.join(os.path.pardir, 'src', 'data', 'tweets.csv'))
self.df2, self.df3 = get_datasets(os.path.join(os.path.pardir, 'src', 'data', 'labeled_data.csv'),
os.path.join(os.path.pardir, 'src', 'data',
'hatespeech_text_label_vote_RESTRICTED_100K.csv'))
self.df_concatenated = concatenate_datasets(os.path.join(os.path.pardir, 'src', 'data', 'tweets.csv'),
self.df2,
self.df3)
def test_balance_data(self):
x_balanced, y_balanced = balance_data(self.df_concatenated[['text']],
self.df_concatenated[['hate_speech']])
self.assertIsInstance(y_balanced,
pd.core.frame.DataFrame)
self.assertIsInstance(x_balanced,
pd.core.frame.DataFrame)
self.assertEquals(x_balanced.shape, y_balanced.shape)
if __name__ == "__main__":
unittest.main()
| 35.255814
| 110
| 0.550792
| 164
| 1,516
| 4.859756
| 0.347561
| 0.060226
| 0.060226
| 0.060226
| 0.397742
| 0.397742
| 0.223338
| 0.223338
| 0.186951
| 0.130489
| 0
| 0.008065
| 0.345646
| 1,516
| 42
| 111
| 36.095238
| 0.795363
| 0.013852
| 0
| 0.076923
| 0
| 0
| 0.089082
| 0.03081
| 0
| 0
| 0
| 0
| 0.115385
| 1
| 0.076923
| false
| 0
| 0.269231
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bfb89534390da200300df58f33c846fbb2cba39
| 12,695
|
py
|
Python
|
gptorch/models/sparse_gpr.py
|
cics-nd/gptorch
|
80c62a227c466bb7fa29e11263e94c41f96ff93f
|
[
"MIT"
] | 28
|
2018-11-05T03:01:18.000Z
|
2021-04-02T18:11:05.000Z
|
gptorch/models/sparse_gpr.py
|
cics-nd/gptorch
|
80c62a227c466bb7fa29e11263e94c41f96ff93f
|
[
"MIT"
] | 7
|
2019-06-04T21:43:40.000Z
|
2021-11-04T04:19:26.000Z
|
gptorch/models/sparse_gpr.py
|
cics-nd/gptorch
|
80c62a227c466bb7fa29e11263e94c41f96ff93f
|
[
"MIT"
] | 8
|
2019-04-03T12:28:05.000Z
|
2021-12-23T10:15:34.000Z
|
#
# Yinhao Zhu, May 01, 2017
#
"""
Sparse GP regression, including variational GP and others.
"""
from __future__ import absolute_import
import torch
import numpy as np
from torch.utils.data import TensorDataset, DataLoader
from torch.distributions.transforms import LowerCholeskyTransform
from ..model import Param
from ..functions import cholesky, trtrs
from ..mean_functions import Zero
from ..likelihoods import Gaussian
from ..util import TensorType, torch_dtype, as_tensor, kmeans_centers
from .gpr import GPR
from .base import GPModel
class _InducingPointsGP(GPModel):
"""
Parent class for GPs with inducing points
"""
def __init__(
self,
x,
y,
kernel,
num_inducing_points=None,
inducing_points=None,
mean_function=None,
likelihood=None,
):
"""
Assume Gaussian likelihood
Args:
observations (np.ndarray): Y, n x p
input (np.ndarray): X, n x q
kernel (gptorch.Kernel):
inducing_points (np.ndarray, optional): Z, m x q
num_inducing (int), optional): number of inducing inputs
Input, observations, and kernel must be specified, if both
``inducing_points`` and ``num_inducing`` are not set, 1/10 th of total
points (up to 100) will be draw randomly from input as the inducing
points.
"""
super().__init__(x, y, kernel, likelihood, mean_function)
if inducing_points is None:
if num_inducing_points is None:
num_inducing_points = np.clip(x.shape[0] // 10, 1, 100)
inducing_points = kmeans_centers(x, num_inducing_points,
perturb_if_fail=True)
# indices = np.random.permutation(len(x))[:num_inducing_points]
# inducing_points = TensorType(x[indices])
# Z stands for inducing input points as standard in the literature
self.Z = Param(as_tensor(inducing_points))
@property
def num_inducing(self) -> int:
"""
Number of inducing points
"""
return self.Z.shape[0]
class FITC(_InducingPointsGP):
"""
Fully Independent Training Conditional approximation for GP
References:
Snelson, Edward, and Zoubin Ghahramani. "Sparse Gaussian processes
using pseudo-inputs." Advances in neural information processing
systems 18 (2006): 1257.
Quinonero-Candela, Joaquin, and Carl Edward Rasmussen. "A unifying
view of sparse approximate Gaussian process regression." Journal of
Machine Learning Research 6.Dec (2005): 1939-1959.
"""
# TODO: add FITC for sparse GP regression
pass
class VFE(_InducingPointsGP):
"""
Variational Free Energy approximation for GP
Reference:
Titsias, Michalis K. "Variational Learning of Inducing Variables
in Sparse Gaussian Processes." AISTATS. Vol. 5. 2009.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert isinstance(
self.mean_function, Zero
), "Mean functions not implemented for VFE yet."
def log_likelihood(self, x=None, y=None):
"""
Computes the variational lower bound of the true log marginal likelihood
Eqn (9) in Titsias, Michalis K. "Variational Learning of Inducing Variables
in Sparse Gaussian Processes." AISTATS. Vol. 5. 2009.
"""
x = x if x is not None else self.X
y = y if y is not None else self.Y
if not x.shape[0] == y.shape[0]:
raise ValueError("X and Y must have same # data.")
num_inducing = self.num_inducing
num_data = x.shape[0]
d_out = self.output_dimension
# TODO: add mean_functions
# err = self.Y - self.mean_function(x)
err = self.Y
Kff_diag = self.kernel.Kdiag(x)
Kuf = self.kernel.K(self.Z, x)
# add jitter
Kuu = self.kernel.K(self.Z)
L = cholesky(Kuu)
A = trtrs(Kuf, L)
AAT = A @ A.t() / self.likelihood.variance.transform().expand_as(Kuu)
B = AAT + torch.eye(num_inducing, dtype=torch_dtype).to(AAT.device)
LB = cholesky(B)
# divide variance at the end
c = trtrs(A @ err, LB) / self.likelihood.variance.transform()
# Evidence lower bound
elbo = TensorType([-0.5 * d_out * num_data * np.log(2 * np.pi)]).to(c.device)
elbo -= d_out * LB.diag().log().sum()
elbo -= (
0.5 * d_out * num_data * self.likelihood.variance.transform().log()
)
elbo -= (
0.5
* (err.pow(2).sum() + d_out * Kff_diag.sum())
/ self.likelihood.variance.transform()
)
elbo += 0.5 * c.pow(2).sum()
elbo += 0.5 * d_out * AAT.diag().sum()
return elbo[0]
def _predict(self, x_new: TensorType, diag=True, x=None):
"""
Compute posterior p(f*|y), integrating out induced outputs' posterior.
:return: (mean, var/cov)
"""
x = x if x is not None else self.X
z = self.Z
z.requires_grad_(False)
num_inducing = z.size(0)
# err = self.Y - self.mean_function(x)
err = self.Y
Kuf = self.kernel.K(z, x)
# add jitter
Kuu = self.kernel.K(z)
Kus = self.kernel.K(z, x_new)
L = cholesky(Kuu)
A = trtrs(Kuf, L)
AAT = A @ A.t() / self.likelihood.variance.transform().expand_as(Kuu)
B = AAT + torch.eye(num_inducing, dtype=torch_dtype).to(AAT.device)
LB = cholesky(B)
# divide variance at the end
c = trtrs(A @ err, LB) / self.likelihood.variance.transform()
tmp1 = trtrs(Kus, L)
tmp2 = trtrs(tmp1, LB)
mean = tmp2.t() @ c
if diag:
var = (
self.kernel.Kdiag(x_new)
- tmp1.pow(2).sum(0).squeeze()
+ tmp2.pow(2).sum(0).squeeze()
)[:, None].expand_as(mean)
else:
var = self.kernel.K(x_new) + tmp2.t() @ tmp2 - tmp1.t() @ tmp1
return mean, var
def minibatch(loss_func):
"""
Decorator to use minibatching for a loss function (e.g. SVGP)
"""
def wrapped(obj, x=None, y=None):
if x is not None:
assert y is not None
else:
# Get from model:
if obj.batch_size is not None:
i = np.random.permutation(obj.num_data)[: obj.batch_size]
x, y = obj.X[i, :], obj.Y[i, :]
else:
x, y = obj.X, obj.Y
return loss_func(obj, x, y)
return wrapped
class SVGP(_InducingPointsGP):
"""
Sparse variational Gaussian process.
James Hensman, Nicolo Fusi, and Neil D. Lawrence,
"Gaussian processes for Big Data" (2013)
James Hensman, Alexander Matthews, and Zoubin Ghahramani,
"Scalable variational Gaussian process classification", JMLR (2015).
"""
def __init__(
self,
y,
x,
kernel,
num_inducing_points=None,
inducing_points=None,
mean_function=None,
likelihood=Gaussian(),
batch_size=None,
):
"""
:param batch_size: How many points to process in a minibatch of
training. If None, no minibatches are used.
"""
super().__init__(
y,
x,
kernel,
num_inducing_points=num_inducing_points,
inducing_points=inducing_points,
mean_function=mean_function,
likelihood=likelihood,
)
# assert batch_size is None, "Minibatching not supported yet."
self.batch_size = batch_size
# Parameters for the Gaussian variational posterior over the induced
# outputs.
# Note: induced_output_mean does NOT include the contribution due to the
# mean function.
self.induced_output_mean, self.induced_output_chol_cov = self._init_posterior()
@minibatch
def log_likelihood(self, x, y):
"""
Variational bound.
"""
if not x.shape[0] == y.shape[0]:
raise ValueError("X and Y must have same # data.")
chol_kuu = cholesky(self.kernel.K(self.Z))
# Marginal posterior q(f)'s mean & variance
f_mean, f_var = self._predict(x, diag=True, chol_kuu=chol_kuu)
marginal_log_likelihood = torch.stack(
[
self.likelihood.propagate_log(
torch.distributions.Normal(loc_i, torch.sqrt(v_i)), yi
)
for loc_i, v_i, yi in zip(f_mean.t(), f_var.t(), y.t())
]
).sum()
# Account for size of minibatch relative to the total dataset size:
marginal_log_likelihood *= self.num_data / x.shape[0]
mu_xu = self.mean_function(self.Z) # Prior mean
qu_mean = self.induced_output_mean + mu_xu
qu_lc = self.induced_output_chol_cov.transform()
# Each output dimension has its own Multivariate normal (different
# means, shared covariance); the joint distribution is the product
# across output dimensions.
qus = [
torch.distributions.MultivariateNormal(qu_i, scale_tril=qu_lc)
for qu_i in qu_mean.t()
]
# Each dimension has its own prior as well due to the mean function
# Being potentially different for each output dimension.
pus = [
torch.distributions.MultivariateNormal(mi, scale_tril=chol_kuu)
for mi in mu_xu.t()
]
kl = torch.stack(
[torch.distributions.kl_divergence(qu, pu) for qu, pu in zip(qus, pus)]
).sum()
return marginal_log_likelihood - kl
def _init_posterior(self):
"""
Get an initial guess at the variational posterior over the induced
outputs.
Just build a GP out of a few data and use its posterior.
This could be far worse than expected if the likelihood is non-Gaussian,
but we don't need this to be great--just good enough to get started.
"""
i = np.random.permutation(self.num_data)[0 : min(self.num_data, 100)]
x, y = self.X[i].data.numpy(), self.Y[i].data.numpy()
# Likelihood needs to be Gaussian for exact inference in GPR
likelihood = (
self.likelihood
if isinstance(self.likelihood, Gaussian)
else Gaussian(variance=0.01 * y.var())
)
model = GPR(
x, y, self.kernel, mean_function=self.mean_function, likelihood=likelihood
)
mean, cov = model.predict_f(self.Z, diag=False)
mean -= self.mean_function(self.Z)
chol_cov = cholesky(cov)
return Param(mean), Param(chol_cov, transform=LowerCholeskyTransform())
def _predict(self, x_new: TensorType, diag=True, chol_kuu=None, **kwargs):
"""
SVGP Prediction uses inducing points as sufficient statistics for the
posterior.
Could implement Marginalization of Gaussians (cf. PRML p. 93), but
something specific to (positive-definite) kernel matrices should
perform better.
Shapes of outputs are:
diag: both are [N x dy]
not diag: mean is [N x dy], cov is [N x N]
:param x_new: inputs to predict on.
:param diag: if True, return variance of prediction; False=full cov
:param chol_kuu: The Cholesky of the kernel matrix for the inducing
inputs (to enable reuse when computing the training loss)
:return: (torch.Tensor, torch.Tensor) mean & [co]variance
"""
chol_kuu = cholesky(self.kernel.K(self.Z)) if chol_kuu is None else chol_kuu
kuf = self.kernel.K(self.Z, x_new)
alpha = trtrs(kuf, chol_kuu).t()
# beta @ beta.t() = inv(L) @ S @ inv(L'), S=post cov of induced outs
beta = trtrs(self.induced_output_chol_cov.transform(), chol_kuu)
mu_x = self.mean_function(x_new)
# Remember: induced_output_mean doesn't include mean function, so no
# need to subtract it.
f_mean = alpha @ trtrs(self.induced_output_mean, chol_kuu) + mu_x
# gamma @ gamma.t() = Kfu @ inv(Kuu) @ S @ inv(Kuu) @ Kuf
gamma = alpha @ beta
if diag:
f_cov = (
self.kernel.Kdiag(x_new)
- torch.sum(alpha ** 2, dim=1)
+ torch.sum(gamma ** 2, dim=1)
)[:, None].expand_as(f_mean)
else:
f_cov = self.kernel.K(x_new) - alpha @ alpha.t() + gamma @ gamma.t()
return f_mean, f_cov
| 33.232984
| 87
| 0.59228
| 1,653
| 12,695
| 4.421658
| 0.23533
| 0.04214
| 0.01505
| 0.025448
| 0.245451
| 0.189492
| 0.159256
| 0.154057
| 0.128882
| 0.128882
| 0
| 0.01253
| 0.308468
| 12,695
| 381
| 88
| 33.32021
| 0.820025
| 0.330209
| 0
| 0.270408
| 0
| 0
| 0.013033
| 0
| 0
| 0
| 0
| 0.005249
| 0.010204
| 1
| 0.056122
| false
| 0.005102
| 0.061224
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef0025261578f6f3b594dd1953fdfd38e1b064c9
| 10,015
|
py
|
Python
|
xyw_macro/notify.py
|
xue0228/keyboard
|
dcb0def1d87a9197676c0f405b980a67e128ab24
|
[
"MIT"
] | null | null | null |
xyw_macro/notify.py
|
xue0228/keyboard
|
dcb0def1d87a9197676c0f405b980a67e128ab24
|
[
"MIT"
] | null | null | null |
xyw_macro/notify.py
|
xue0228/keyboard
|
dcb0def1d87a9197676c0f405b980a67e128ab24
|
[
"MIT"
] | null | null | null |
import tkinter as tk
import tkinter.font as tf
from tkinter import ttk
from tkinter import messagebox
from tkinter.filedialog import askopenfilename, askdirectory
import time
import threading
from functools import wraps
from xyw_macro.utils import SingletonType
from xyw_macro.contants import SLEEP_TIME
class Notification(metaclass=SingletonType):
def __init__(self, text='xyw', fg='white', bg='black'):
self.__text = text
self.__fg = fg
self.__bg = bg
self.__visible = False
self.__vnum = 0
self.__window, self.__label, self.__width = self.__init__window()
self.set_visible(self.__visible)
def show(self):
if self.__vnum == 0:
self.set_visible(True)
self.__vnum = self.__vnum + 1
def hide(self):
self.__vnum = self.__vnum - 1
if self.__vnum == 0:
self.set_visible(False)
def __init__window(self):
window = tk.Tk()
window.wm_attributes('-topmost', True)
screen_width = window.winfo_screenwidth()
screen_height = window.winfo_screenheight()
width = round(screen_width / 10)
height = round(screen_width / 10)
window.geometry('{}x{}+{}+{}'.format(width, height, (screen_width - width) // 2, (screen_height - height) // 2))
window.overrideredirect(True)
window.configure(background=self.__bg)
window.attributes('-alpha', 0.7)
font_size = self.__get_font_size(width)
outer_border_size = round(font_size * 0.08)
inner_border_size = round(font_size * 0.05)
font = tf.Font(size=font_size, weight=tf.BOLD)
label_border = tk.LabelFrame(window, background=self.__fg, relief='flat')
label = tk.Label(label_border, text=self.__text, font=font, bg=self.__bg, fg=self.__fg,
height=height, width=width, justify='center', anchor='center',
borderwidth=0, relief='flat')
label_border.pack(fill='both', expand=True, padx=outer_border_size, pady=outer_border_size)
label.pack(fill='both', expand=True, padx=inner_border_size, pady=inner_border_size)
return window, label, width
def get_text(self):
"""
获取标签文本
:return:
"""
return self.__text
def __get_font_size(self, width):
# 根据换行符拆分文本
texts = self.__text.split('\n')
# 英文半角字符集
alnum = r'abcdefghijklmnopqrstuvwxyz0123456789+-*/=`~!@#$%^&*()_\|?><.,'
# 计算最大单行字符长度
length = [1]
for item in texts:
tem = 0
for i in item:
if i.lower() in alnum:
# 英文半角字符算半个字符长度
tem = tem + 0.5
else:
# 其他字符算一个字符长度
tem = tem + 1
length.append(tem)
length = max(length)
# 根据字符长度动态更改字体尺寸
font_size = round(width * 0.6 / length)
return font_size
def set_text(self, text):
"""
设置标签文本
:param text:
:return:
"""
self.__text = text
font_size = self.__get_font_size(self.__width)
# 更改标签文本
font = tf.Font(size=font_size, weight=tf.BOLD)
self.__label.config(text=self.__text, font=font)
def get_visible(self):
"""
获取窗体可见性
:return:
"""
return self.__visible
def set_visible(self, visible):
"""
设置窗体可见性
:param visible:
:return:
"""
self.__visible = visible
if self.__visible:
self.__window.update()
self.__window.deiconify()
else:
self.__window.withdraw()
def run(self):
"""
启动窗体主循环
:return:
"""
self.__window.mainloop()
text = property(get_text, set_text)
visible = property(get_visible, set_visible)
class InputField:
def __init__(self, name, type='entry', default=None, options=None, focus=False):
self.name = name
self.type = type
self.default = default
self.options = options
self.focus = focus
@staticmethod
def select_file(var):
filepath = askopenfilename()
var.set(filepath)
@staticmethod
def select_dir(var):
dirpath = askdirectory()
var.set(dirpath)
def draw_frame(self, window):
var = tk.StringVar()
frame = tk.Frame(window, takefocus=True)
frame.pack(fill=tk.X, padx=10, pady=2, expand=1)
tk.Label(frame, text=self.name).pack(side=tk.TOP, anchor=tk.W)
if self.type == 'entry':
widget = tk.Entry(frame, show=None, textvariable=var)
widget.pack(fill=tk.X, side=tk.TOP)
if self.default is not None:
var.set(self.default)
elif self.type == 'file':
widget = tk.Entry(frame, show=None, textvariable=var, state=tk.DISABLED)
widget.pack(fill=tk.X, side=tk.LEFT, expand=1)
tk.Button(frame, text='选择文件', command=lambda var=var: self.select_file(var)) \
.pack(fill=tk.X, side=tk.LEFT)
if self.default is not None:
var.set(self.default)
elif self.type == 'dir':
widget = tk.Entry(frame, show=None, textvariable=var, state=tk.DISABLED)
widget.pack(fill=tk.X, side=tk.LEFT, expand=1)
tk.Button(frame, text='选择文件夹', command=lambda var=var: self.select_dir(var)) \
.pack(fill=tk.X, side=tk.LEFT)
if self.default is not None:
var.set(self.default)
elif self.type == 'combobox':
widget = ttk.Combobox(frame, textvariable=var)
widget['values'] = self.options
widget.pack(fill=tk.X, side=tk.TOP)
if self.default is None:
widget.current(0)
else:
widget.current(self.default)
else:
raise ValueError('there is no such type,select in "entry","file","dir" or "combobox"')
if self.focus:
widget.focus_set()
return var
class InputBox:
"""
参数输入框类
"""
def __init__(self, title='输入框', *args):
"""
初始化实例
:param title: 对话框标题
"""
self.title = title
self.__args = args
self.top = None
self.vars = []
self.values = []
def show(self):
"""
显示输入对话框
:return: 输入的参数列表
"""
return self.top_window()
def clear_all(self):
for var in self.vars:
var.set('')
def close_window(self, flag=False):
if flag:
self.values = None
else:
self.values = [var.get() for var in self.vars]
self.top.destroy()
def top_window(self):
self.top = tk.Toplevel()
self.top.withdraw()
self.top.update()
self.top.wm_attributes('-topmost', True)
self.top.attributes('-toolwindow', True)
self.top.title(self.title)
self.top.grab_set()
screen_width = self.top.winfo_screenwidth()
screen_height = self.top.winfo_screenheight()
width = 300
height = (len(self.__args) * 2 + 1) * 30
self.top.geometry('{}x{}+{}+{}'
.format(width, height, (screen_width - width) // 2, (screen_height - height) // 2))
for field in self.__args:
if not isinstance(field, InputField):
raise TypeError('args must be <class InputField>')
self.vars.append(field.draw_frame(self.top))
frame = tk.Frame(self.top, takefocus=True)
frame.pack(fill=tk.X, padx=10, pady=2, expand=1)
button1 = tk.Button(frame, text='确定', command=lambda: self.close_window(False))
button1.pack(side=tk.LEFT, fill=tk.X, expand=1)
button2 = tk.Button(frame, text='清空', command=self.clear_all)
button2.pack(side=tk.LEFT, fill=tk.X, expand=1)
self.top.protocol("WM_DELETE_WINDOW", lambda: self.close_window(True))
self.top.bind('<Return>', lambda event: self.close_window(False))
self.top.bind('<Escape>', lambda event: self.close_window(True))
self.top.deiconify()
self.top.focus_force()
self.top.focus_set()
self.top.wait_window()
return self.values
def input_box(*ags, title='输入框'):
"""
参数输入框装饰器
:param title: 输入框标题
:return:
"""
def decorator(f):
@wraps(f)
def decorated(*args, **kwargs):
time.sleep(SLEEP_TIME)
res = InputBox(title, *ags).show()
if res is not None:
return f(*res)
return decorated
return decorator
def confirm_box(message='确定执行此操作吗?'):
"""
操作确认框装饰器
:param message: 提示信息
:return:
"""
def decorator(f):
@wraps(f)
def decorated(*args, **kwargs):
time.sleep(SLEEP_TIME)
if messagebox.askokcancel('提示', message):
return f(*args, **kwargs)
return decorated
return decorator
if __name__ == '__main__':
def sub():
time.sleep(2)
notify.text = 'xue'
notify.show()
time.sleep(2)
notify.hide()
# notify = Notification()
# threading.Thread(target=auto_hide).start()
# notify.start()
# thd = threading.Thread(target=sub)
# thd.start()
# def auto_hide():
# time.sleep(2)
# # notify.destroy()
# # flag = False
# notify.hide()
notify = Notification('xyw_macro\n已启动')
threading.Thread(target=sub).start()
notify.run()
# notify.show(0.2)
# print('end')
# time.sleep(2)
# notify.set_text('changed')
# notify.show()
# notify.start()
# print('xue')
# print(type(notify.get_window()))
# notify.start()
# flag = True
# while flag:
# # notify.get_window().update_idletasks()
# notify.get_window().update()
| 30.348485
| 120
| 0.563155
| 1,177
| 10,015
| 4.62192
| 0.203059
| 0.028309
| 0.012868
| 0.016176
| 0.270772
| 0.239154
| 0.192096
| 0.182904
| 0.175368
| 0.152574
| 0
| 0.009987
| 0.310135
| 10,015
| 329
| 121
| 30.440729
| 0.777392
| 0.081877
| 0
| 0.209302
| 0
| 0
| 0.042174
| 0.006879
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12093
| false
| 0
| 0.046512
| 0
| 0.251163
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef0469d45705f95287d4ed042d4ea25304eabf8c
| 3,217
|
py
|
Python
|
tests/test_data/movies.py
|
jmolinski/traktpy
|
e6ff22acaf273b7b45070a4f8938c210fe4d63d7
|
[
"MIT"
] | null | null | null |
tests/test_data/movies.py
|
jmolinski/traktpy
|
e6ff22acaf273b7b45070a4f8938c210fe4d63d7
|
[
"MIT"
] | 1
|
2019-04-13T10:15:48.000Z
|
2019-04-13T10:15:48.000Z
|
tests/test_data/movies.py
|
jmolinski/traktpy
|
e6ff22acaf273b7b45070a4f8938c210fe4d63d7
|
[
"MIT"
] | null | null | null |
MOVIE1 = {
"title": "Guardians of the Galaxy",
"year": 2014,
"ids": {
"trakt": 28,
"slug": "guardians-of-the-galaxy-2014",
"imdb": "tt2015381",
"tmdb": 118340,
},
}
MOVIE2 = {
"title": "Guardians of the Galaxy",
"year": 2014,
"ids": {
"trakt": 28,
"slug": "guardians-of-the-galaxy-2014",
"imdb": "tt2015381",
"tmdb": 118340,
},
}
MOVIE_PREMIERES = [
{"released": "2014-08-01", "movie": MOVIE1},
{"released": "2014-08-01", "movie": MOVIE2},
]
MOVIES = [MOVIE1, MOVIE2]
TRENDING_MOVIES = [{"watchers": 21, "movie": MOVIE1}, {"watchers": 17, "movie": MOVIE2}]
PLAYED_MOVIES = [
{
"watcher_count": 66667,
"play_count": 109736,
"collected_count": 27584,
"movie": MOVIE1,
},
{
"watcher_count": 76254,
"play_count": 104242,
"collected_count": 31877,
"movie": MOVIE2,
},
]
ANTICIPATED_MOVIES = [
{"list_count": 5362, "movie": MOVIE1},
{"list_count": 4405, "movie": MOVIE2},
]
BOX_OFFICE = [
{"revenue": 48464322, "movie": MOVIE1},
{"revenue": 17728313, "movie": MOVIE2},
]
UPDATED_MOVIES = [{"updated_at": "2014-09-22T21:56:03.000Z", "movie": MOVIE1}]
EXTENDED_MOVIE = {
"title": "TRON: Legacy",
"year": 2010,
"ids": {
"trakt": 343,
"slug": "tron-legacy-2010",
"imdb": "tt1104001",
"tmdb": 20526,
},
"tagline": "The Game Has Changed.",
"overview": "Sam Flynn, the tech-savvy and daring son of Kevin Flynn, investigates his father's disappearance and is pulled into The Grid. With the help of a mysterious program named Quorra, Sam quests to stop evil dictator Clu from crossing into the real world.",
"released": "2010-12-16",
"runtime": 125,
"country": "us",
"updated_at": "2014-07-23T03:21:46.000Z",
"trailer": None,
"homepage": "http://disney.go.com/tron/",
"rating": 8,
"votes": 111,
"comment_count": 92,
"language": "en",
"available_translations": ["en"],
"genres": ["action"],
"certification": "PG-13",
}
ALIASES = [
{"title": "Batman 1 - Batman Begins", "country": "ca"},
{"title": "Batman 5 Begins", "country": "br"},
]
RELEASES = [
{
"country": "us",
"certification": "PG",
"release_date": "2010-12-16",
"release_type": "theatrical",
"note": None,
},
{
"country": "gb",
"certification": "PG",
"release_date": "2010-12-17",
"release_type": "theatrical",
"note": None,
},
]
TRANSLATIONS = [
{
"title": "Batman Begins",
"overview": "...",
"tagline": "Das Böse fürchtet den Ritter.",
"language": "de",
}
]
RATINGS = {
"rating": 7.33778,
"votes": 7866,
"distribution": {
"1": 298,
"2": 46,
"3": 87,
"4": 178,
"5": 446,
"6": 1167,
"7": 1855,
"8": 1543,
"9": 662,
"10": 1583,
},
}
RELATED_MOVIES = [MOVIE1, MOVIE2]
MOVIE_STATS = {
"watchers": 39204,
"plays": 51033,
"collectors": 27379,
"comments": 36,
"lists": 4561,
"votes": 7866,
}
| 22.496503
| 269
| 0.520361
| 337
| 3,217
| 4.893175
| 0.540059
| 0.040024
| 0.03396
| 0.048514
| 0.213463
| 0.15282
| 0.114008
| 0.114008
| 0.114008
| 0.114008
| 0
| 0.135512
| 0.286602
| 3,217
| 142
| 270
| 22.65493
| 0.583007
| 0
| 0
| 0.19685
| 0
| 0.007874
| 0.416226
| 0.039167
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef05389e99b6d9f3d5e451c4f3f4a586cd843bd5
| 7,580
|
py
|
Python
|
lib/FeatureSetUtils/Utils/AveExpressionMatrixBuilder.py
|
mclark58/FeatureSetUtils
|
2b84bc40d6a8f8aec878aa965ca567537c67267e
|
[
"MIT"
] | 1
|
2020-01-13T19:38:50.000Z
|
2020-01-13T19:38:50.000Z
|
lib/FeatureSetUtils/Utils/AveExpressionMatrixBuilder.py
|
mclark58/FeatureSetUtils
|
2b84bc40d6a8f8aec878aa965ca567537c67267e
|
[
"MIT"
] | 6
|
2017-09-19T17:46:03.000Z
|
2020-06-09T04:28:36.000Z
|
lib/FeatureSetUtils/Utils/AveExpressionMatrixBuilder.py
|
mclark58/FeatureSetUtils
|
2b84bc40d6a8f8aec878aa965ca567537c67267e
|
[
"MIT"
] | 9
|
2017-06-30T16:01:48.000Z
|
2020-08-13T20:19:42.000Z
|
import json
import time
import uuid
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.WorkspaceClient import Workspace as Workspace
def log(message, prefix_newline=False):
"""Logging function, provides a hook to suppress or redirect log messages."""
print(('\n' if prefix_newline else '') + '{0:.2f}'.format(time.time()) + ': ' + str(message))
class AveExpressionMatrixBuilder:
def _validate_calculate_average_expression_matrix_params(self, params):
"""
_validate_calculate_average_expression_matrix_params:
validates params passed to calculate_average_expression_matrix method
"""
log('start validating calculate_average_expression_matrix params')
# check for required parameters
for p in ['expression_matrix_ref', 'output_suffix', 'workspace_name']:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
def _generate_report(self, expression_matrix_ref, workspace_name):
"""
_generate_report: generate report
"""
objects_created = [{'ref': expression_matrix_ref,
'description': 'Average ExpressionMatrix'}]
report_params = {'message': '',
'workspace_name': workspace_name,
'objects_created': objects_created,
# 'html_links': output_html_files,
# 'direct_html_link_index': 0,
'html_window_height': 366,
'report_object_name': 'kb_ave_expr_matrix_report_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _save_expression_matrix(self, em_data, em_obj_name, workspace_name):
"""
_save_expression_matrix: saving ExpressionMatrix
"""
try:
log('saving ExpressionMatrix [{}]'.format(em_obj_name))
data_type = 'KBaseFeatureValues.ExpressionMatrix'
obj_info = self.dfu.save_objects({'id': self.dfu.ws_name_to_id(workspace_name),
'objects': [{'type': data_type,
'data': em_data,
'name': em_obj_name}]})[0]
except Exception as e:
log(e)
raise Exception('Failed Saving ExpressionMatrix to Workspace')
expression_matrix_ref = str(obj_info[6]) + '/' + str(obj_info[0]) + '/' + str(obj_info[4])
return expression_matrix_ref
def __init__(self, config):
self.ws_url = config["workspace-url"]
self.callback_url = config['SDK_CALLBACK_URL']
self.token = config['KB_AUTH_TOKEN']
self.shock_url = config['shock-url']
self.ws = Workspace(self.ws_url, token=self.token)
self.dfu = DataFileUtil(self.callback_url)
self.scratch = config['scratch']
def calculate_average_expression_matrix(self, params):
"""
calculate_average_expression_matrix: create an average ExpressionMatrix object
from a ExpressionMatrix object
required params:
expression_matrix_ref: ExpressionMatrix object reference
output_suffix: output average ExpressionMatrix name suffix
workspace_name: the name of the workspace it gets saved to
return:
average_expression_matrix_ref: generated average ExpressionMatrix object reference
report_name: report name generated by KBaseReport
report_ref: report reference generated by KBaseReport
"""
log('--->\nrunning AveExpressionMatrixBuilder.calculate_average_expression_matrix\n' +
'params:\n{}'.format(json.dumps(params, indent=1)))
self._validate_calculate_average_expression_matrix_params(params)
expression_matrix_ref = params.get('expression_matrix_ref')
expression_matrix = self.ws.get_objects2({'objects':
[{'ref':
expression_matrix_ref}]})['data'][0]
expression_matrix_data = expression_matrix['data']
expression_matrix_info = expression_matrix['info']
condition_map = expression_matrix_data['condition_mapping']
ori_data = expression_matrix_data['data']
ori_col_ids = ori_data['col_ids']
ori_row_ids = ori_data['row_ids']
ori_values = ori_data['values']
labels = list(condition_map.keys())
if set(labels) != set(ori_col_ids):
error_msg = 'available labels: {}\n'.format(ori_col_ids)
error_msg += 'labels in condition_mapping: {}'.format(labels)
raise ValueError(error_msg)
condition_pos = {}
for label, condition in condition_map.items():
if condition not in condition_pos:
condition_pos.update({condition: [ori_col_ids.index(label)]})
else:
condition_list = condition_pos[condition]
condition_list.append(ori_col_ids.index(label))
condition_pos.update({condition: condition_list})
conditions = list(condition_pos.keys())
ave_values = []
for ori_value in ori_values:
ave_value = [None] * len(conditions)
for condition, poss in condition_pos.items():
ave_pos = conditions.index(condition)
sum_value = 0.0
for pos in poss:
sum_value += round(float(ori_value[pos]), 3)
average = sum_value / len(poss)
ave_value[ave_pos] = round(average, 2)
ave_values.append(ave_value)
average_data = {}
average_data.update({'row_ids': ori_row_ids})
average_data.update({'col_ids': conditions})
average_data.update({'values': ave_values})
em_data = {}
genome_ref = expression_matrix_data.get('genome_ref')
if genome_ref:
em_data.update({'genome_ref': genome_ref})
em_data.update({'scale': expression_matrix_data.get('scale')})
em_data.update({'type': expression_matrix_data.get('type')})
em_data.update({'feature_mapping': expression_matrix_data.get('feature_mapping')})
em_data.update({'condition_mapping': expression_matrix_data.get('condition_mapping')})
em_data.update({'data': average_data})
expression_matrix_name = expression_matrix_info[1]
ave_expression_matrix_name = expression_matrix_name + params.get('output_suffix')
workspace_name = params.get('workspace_name')
ave_expression_matrix_ref = self._save_expression_matrix(em_data,
ave_expression_matrix_name,
workspace_name)
returnVal = {'average_expression_matrix_ref': ave_expression_matrix_ref}
report_output = self._generate_report(ave_expression_matrix_ref,
workspace_name)
returnVal.update(report_output)
return returnVal
| 41.648352
| 98
| 0.61504
| 797
| 7,580
| 5.501882
| 0.2133
| 0.15325
| 0.060661
| 0.058381
| 0.123603
| 0.031471
| 0
| 0
| 0
| 0
| 0
| 0.003543
| 0.292612
| 7,580
| 181
| 99
| 41.878453
| 0.814248
| 0.120844
| 0
| 0.018018
| 0
| 0
| 0.141445
| 0.03567
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.054054
| 0
| 0.144144
| 0.009009
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef071178a07b347765b3a959b7f835718f3934a3
| 588
|
py
|
Python
|
s3bro/pool_map.py
|
rsavordelli/s3bro
|
e5b1d41052fd2491c08589b8a2bffeb6aae7cf33
|
[
"MIT"
] | 22
|
2018-03-13T18:46:33.000Z
|
2021-11-03T09:41:39.000Z
|
s3bro/pool_map.py
|
rsavordelli/s3bro
|
e5b1d41052fd2491c08589b8a2bffeb6aae7cf33
|
[
"MIT"
] | 5
|
2018-06-26T21:39:06.000Z
|
2020-08-03T12:53:10.000Z
|
s3bro/pool_map.py
|
rsavordelli/s3bro
|
e5b1d41052fd2491c08589b8a2bffeb6aae7cf33
|
[
"MIT"
] | 2
|
2019-09-04T06:40:09.000Z
|
2020-07-06T01:56:44.000Z
|
from multiprocessing import Pool
import logging
def multi_process(func, data, workers):
logging.warning('Consuming list with %s workers' % workers)
p = Pool(workers)
try:
# the timeout(.get(9999999) is a workaround for the KeyboardInterrupt. without that it just does not work.
# Seem to be a bug on multiprocessing. Will investigate it later
p.map_async(func, data).get(9999999)
p.close()
except (KeyboardInterrupt, SystemExit):
print("Caught KeyboardInterrupt, terminating workers")
except Exception as e:
print(e)
| 32.666667
| 114
| 0.690476
| 75
| 588
| 5.386667
| 0.706667
| 0.039604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030905
| 0.229592
| 588
| 17
| 115
| 34.588235
| 0.860927
| 0.284014
| 0
| 0
| 0
| 0
| 0.179856
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.25
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef07256f31589e2d434bffa64e958f93097dc4b3
| 11,290
|
py
|
Python
|
htmlmth/utils.py
|
ZwCreatePhoton/htmlmth
|
74d23ca2fa53e11b2587251d2f71c8f275548182
|
[
"MIT"
] | null | null | null |
htmlmth/utils.py
|
ZwCreatePhoton/htmlmth
|
74d23ca2fa53e11b2587251d2f71c8f275548182
|
[
"MIT"
] | null | null | null |
htmlmth/utils.py
|
ZwCreatePhoton/htmlmth
|
74d23ca2fa53e11b2587251d2f71c8f275548182
|
[
"MIT"
] | null | null | null |
import os
import yaml
from HTMLScriptExtractor import HTMLScriptExtractor
MIME_TYPE_MAP = {
'.htm': 'text/html',
'.html': 'text/html',
'.js': 'text/javascript',
'.vbs': 'text/vbscript',
'.txt': 'text/plain',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg'
}
# input:
# a function "mime_type_function_dict" a dictionary (mime type -> f) where "f" is a function that accepts the tuple: (string, MetaData) and returns the tuple: (string, MetaData)
# output:
# a function "g" that accepts a single argument of type list of tuple: (string, MetaData)
# # in this function, for each tuple in the list, the function mime_type_function_dict[tuple[1].mime_type] will be called with tuple as the argument
def mime_type_based_transform(mime_type_function_dict):
def g(list_of_tfarg):
new_list_of_tfarg = []
for tfarg in list_of_tfarg:
f = mime_type_function_dict.get(tfarg.metadata.mime_type, None)
ret = None
if callable(f):
ret = f(tfarg)
if isinstance(ret, TransformFunctionArgument):
new_list_of_tfarg.append(tfarg)
elif isinstance(ret, list):
new_list_of_tfarg += ret
else:
new_list_of_tfarg.append(tfarg)
return new_list_of_tfarg
return g
# for use with TransformFunctionArgument.content
# function(string) -> function(TransformFunctionArgument)
def string_to_tfarg_function(f):
def g(tfarg):
tfarg.content = f(tfarg.content)
return tfarg
return g
# for use with TransformFunctionArgument.metadata.http.normalized_headers
# function(list of headers) -> function(TransformFunctionArgument)
def normalized_headers_to_tfarg_function(f):
def g(tfarg):
is_list = isinstance(tfarg, list)
tfargs = tfarg if is_list else [tfarg]
for tfa in tfargs:
tfa.metadata.http.normalized_headers = f(tfa.metadata.http.normalized_headers)
if is_list:
return tfargs
else:
return tfarg
return g
# for use with TransformFunctionArgument.metadata.http.payload
# function(bytes) -> function(TransformFunctionArgument)
def http_payload_to_tfarg_function(f):
def g(tfarg):
is_list = isinstance(tfarg, list)
tfargs = tfarg if is_list else [tfarg]
for tfa in tfargs:
tfa.metadata.http.body = f(tfa.metadata.http.body)
if is_list:
return tfargs
else:
return tfarg
return g
def replace_apply_replace_back(f, s, sub):
def g(input):
output = input.replace(s, sub)
output = f(output)
output = output.replace(sub, s)
return output
return g
class TransformFunction():
def __init__(self, name=None, description=None, *args):
self._name = name
self._description = description
self._functions = args
self.parameters = {}
@property
def name(self):
return self._name
@property
def description(self):
if self._description:
return self._description
else:
return "; ".join(f.description for f in self._functions)
def __call__(self, *args, **kwargs):
ret = args[0]
for func in self._functions:
ret = func(ret)
return ret
def parameterize(self, **kwargs):
raise NotImplemented
@staticmethod
# clean up the descriptions of all TransformFunction objects in "transform_functions" using the name and description propteries of TransformFunction objects with an index < "index"
def cleanup_descriptions(transform_functions, index=0):
for j in reversed(range(len(transform_functions))):
test_case = transform_functions[j]
description = test_case.description
pieces = set(description.split("; "))
used_pieces = set()
new_descriptions = []
for i in range(index):
if i == j:
continue
tc = transform_functions[i]
tc_description = tc.description
tc_pieces = set(tc_description.split("; "))
has_all_pieces = all(p in pieces for p in tc_pieces)
if has_all_pieces:
used_pieces.update(tc_pieces)
new_descriptions.append(tc.name)
missing_pieces = pieces - used_pieces
test_case._description = "; ".join(new_descriptions + list(missing_pieces))
class TransformFunctionArgument():
def __init__(self, content=None, content_type=None):
self.content = content
self.metadata = MetaData(data=self, mime_type=content_type)
def __str__(self):
return self.content
def __len__(self):
return len(str(self))
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class MetaData():
def __init__(self, data, mime_type=None):
self.data = data
self.mime_type = mime_type
self.http = HttpMetaData(data, mime_type=mime_type)
class HttpMetaData():
NEWLINE = "\r\n"
def __init__(self, data, type="response", version="1.1", mime_type=None, content_length_header=True, content_type_header=False, server_header=False, connection_header=False):
self._body = None
self.data = data
self.type = type
self.host = ""
self.path = "/"
self.is_launch_path = False
self.version = version
self.status_code = 200
self.status_message = "OK"
self.mime_type = mime_type if mime_type is not None else "text/html"
self._headers = None
self._normalized_headers = None
self.server_header = server_header
self.server_header_value = ""
self.content_type_header = content_type_header
self.connection_header = connection_header
self.connection_header_value = "close"
self.content_length_header = content_length_header
@property
def normalized_headers(self):
if self._normalized_headers is None:
self._normalized_headers = []
if self.server_header:
h = "Server: {}".format(self.server_header_value)
self._normalized_headers.append(h)
if self.content_type_header:
h = "Content-Type: {}".format(self.mime_type)
self._normalized_headers.append(h)
if self.connection_header:
h = "Connection: {}".format(self.connection_header_value)
self._normalized_headers.append(h)
return self._normalized_headers
@normalized_headers.setter
def normalized_headers(self, normalized_headers):
self._normalized_headers = normalized_headers
@property
def headers(self):
if self._headers:
return self._headers
else:
headers_bytes = ""
if self.type == "response":
headers_bytes += "HTTP/{} {} {}".format(self.version, self.status_code, self.status_message) + HttpMetaData.NEWLINE
else:
pass # TODO
# Assumption: the "headers" property will only be called after modifications to the payload are complete
# -> content-length will not be updated after accessing this property for the first time
self.normalized_headers
if self.content_length_header:
h = "Content-Length: {}".format(len(self.body))
self._normalized_headers.append(h)
for h in self._normalized_headers:
headers_bytes += h + HttpMetaData.NEWLINE
headers_bytes += HttpMetaData.NEWLINE
self._headers = headers_bytes
return self._headers
@headers.setter
def headers(self, headers):
self._headers = headers
# normalized body: before chunking, compression, etc.
@property
def payload(self):
return self.data.content
# raw body: after chunking, compression, etc.
@property
def body(self):
if self._body is None:
return self.payload
else:
return self._body
@body.setter
def body(self, value):
self._body = value
@staticmethod
def copy_server_headers(input_hmd, output_hmd):
output_hmd.server_header = input_hmd.server_header
output_hmd.server_header_value = input_hmd.server_header_value
output_hmd.content_type_header = input_hmd.content_type_header
output_hmd.content_length_header = input_hmd.content_length_header
output_hmd.connection_header = input_hmd.connection_header
def IsYaml(filepath):
return os.path.splitext(filepath)[-1].lower() == ".yaml"
# returns list of baseline
# baseline := dictionary of "host", "path", "filepath", "content"
def ParseBaselineYaml(filepath):
filepath = os.path.normpath(filepath.replace("\\", "/")) # normalize
baselines = []
with open(filepath) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
if "include" in data:
for include_yaml in data["include"]:
baselines.extend(ParseBaselineYaml(os.path.join(os.path.abspath(os.path.dirname(filepath)), include_yaml)))
else:
if data['baselines'] is None:
return baselines
for baseline in data['baselines']:
normalized_filepath = os.path.normpath(baseline["filepath"].replace("\\", "/"))
bl = {
"host": baseline["host"] if "host" in baseline else "",
"path": baseline["path"] if "path" in baseline else normalized_filepath.replace("\\", "/"),
"filepath": normalized_filepath,
"content": open(os.path.join(os.path.abspath(os.path.dirname(filepath)), normalized_filepath), "r").read(),
}
if bl["path"][0] != "/":
bl["path"] = "/" + bl["path"]
baselines.append(bl)
return baselines
# returns list of testcase
# testcase := dictionary of "host", "path", "casename"
def ParseTestcaseYaml(filepath):
filepath = os.path.normpath(filepath.replace("\\", "/")) # normalize
baselines = []
with open(filepath) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
if data is None:
return baselines
if "include" in data:
for include_yaml in data["include"]:
baselines.extend(ParseTestcaseYaml(os.path.join(os.path.abspath(os.path.dirname(filepath)), include_yaml)))
else:
if data['baselines'] is None:
return baselines
for baseline in data['baselines']:
bl = {
"host": baseline["host"] if "host" in baseline else "",
"path": baseline["path"] if "path" in baseline else "",
"casename": baseline["casename"]
}
if bl["path"] and bl["path"][0] != "/":
bl["path"] = "/" + bl["path"]
baselines.append(bl)
return baselines
| 35.84127
| 184
| 0.615766
| 1,288
| 11,290
| 5.184783
| 0.162267
| 0.023959
| 0.037736
| 0.010482
| 0.300988
| 0.22941
| 0.221923
| 0.193471
| 0.193471
| 0.193471
| 0
| 0.001363
| 0.285297
| 11,290
| 314
| 185
| 35.955414
| 0.826249
| 0.132418
| 0
| 0.310484
| 0
| 0
| 0.043518
| 0
| 0
| 0
| 0
| 0.003185
| 0
| 1
| 0.129032
| false
| 0.004032
| 0.012097
| 0.020161
| 0.298387
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef0a465c711275ee344dd982144bb689f29fa28c
| 4,409
|
py
|
Python
|
tests/test_models.py
|
rramaa/pynnotate
|
7cf983dd16726032d3d53340415a823c9e8bd76c
|
[
"MIT"
] | 1
|
2019-07-24T12:56:16.000Z
|
2019-07-24T12:56:16.000Z
|
tests/test_models.py
|
rramaa/pynnotate
|
7cf983dd16726032d3d53340415a823c9e8bd76c
|
[
"MIT"
] | 14
|
2019-03-12T08:49:34.000Z
|
2019-04-04T09:51:16.000Z
|
tests/test_models.py
|
rramaa/pynnotate
|
7cf983dd16726032d3d53340415a823c9e8bd76c
|
[
"MIT"
] | 2
|
2019-10-13T14:45:11.000Z
|
2019-12-24T22:22:46.000Z
|
from annotatelib.models import (
models, class_from_filename,
table_name_from_filename, _get_column_description_from_object,
_get_indices_description_from_oject
)
import sqlite3
from orator import DatabaseManager
import os
import sys
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), '../')))
def test_models():
result = models('tests/fixture_models')
result.sort()
result = list(map(lambda x: os.path.split(x)[1], result))
assert result == ['fixture_model_1.py', 'fixture_model_2.py', 'tasks.py']
def test_class_from_filename():
assert class_from_filename('class_name.py') == 'ClassName'
def test_class_from_filename_multiple():
assert class_from_filename('class_name_sfsaa.py') == 'ClassNameSfsaa'
def test_table_name_from_filename():
assert table_name_from_filename(
'engine_model_names.py') == 'engine_model_names'
def test_get_column_description_from_object():
database = "test.db"
create_database(database)
config = {
'sqlite3': {
'driver': 'sqlite',
'database': database
}
}
db = DatabaseManager(config)
result = _get_column_description_from_object(
db.get_schema_manager(), 'tasks')
assert result == {
'id': {'unsigned': False, 'autoincrement': False, 'length': None, 'default': None,
'pk': 1, 'precision': 10, 'name': 'id', 'extra': {}, 'scale': 0, 'type': 'integer', 'notnull': False, 'fixed': False},
'status_id': {'unsigned': False, 'autoincrement': False, 'length': None, 'default': None,
'pk': 0, 'precision': 10, 'name': 'status_id', 'extra': {}, 'scale': 0, 'type': 'integer', 'notnull': True, 'fixed': False},
'project_id': {'unsigned': False, 'autoincrement': False, 'length': None, 'default': None,
'pk': 0, 'precision': 10, 'name': 'project_id', 'extra': {}, 'scale': 0, 'type': 'integer', 'notnull': True, 'fixed': False},
'name': {'unsigned': False, 'autoincrement': False, 'length': None, 'default': None,
'pk': 0, 'precision': 10, 'name': 'name', 'extra': {}, 'scale': 0, 'type': 'text', 'notnull': True, 'fixed': False},
'end_date': {'unsigned': False, 'autoincrement': False, 'length': None, 'default': None,
'pk': 0, 'precision': 10, 'name': 'end_date', 'extra': {}, 'scale': 0, 'type': 'text', 'notnull': True, 'fixed': False},
'priority': {'unsigned': False, 'autoincrement': False, 'length': None, 'default': None,
'pk': 0, 'precision': 10, 'name': 'priority', 'extra': {}, 'scale': 0, 'type': 'integer', 'notnull': False, 'fixed': False},
'begin_date': {'unsigned': False, 'autoincrement': False, 'length': None, 'default': None,
'pk': 0, 'precision': 10, 'name': 'begin_date', 'extra': {}, 'scale': 0, 'type': 'text', 'notnull': True, 'fixed': False}}
drop_database(database)
def test_get_indices_description_from_object():
database = "test.db"
create_database(database)
config = {
'sqlite3': {
'driver': 'sqlite',
'database': database
}
}
db = DatabaseManager(config)
result = _get_indices_description_from_oject(
db.get_schema_manager(), 'tasks')
assert result == {'primary': {'is_unique?': True,
'is_primary?': True, 'columns': ['id']}}
drop_database(database)
def create_database(database):
sql_create_tasks_table = """CREATE TABLE IF NOT EXISTS tasks (
id integer PRIMARY KEY,
name text NOT NULL,
priority integer,
status_id integer NOT NULL,
project_id integer NOT NULL,
begin_date text NOT NULL,
end_date text NOT NULL,
FOREIGN KEY (project_id) REFERENCES projects (id)
);"""
# create a database connection
conn = sqlite3.connect(database)
# create tasks table
c = conn.cursor()
c.execute(sql_create_tasks_table)
def drop_database(database):
os.remove(database)
def truncate_file(file_path):
with open(file_path, 'r+') as f:
f.truncate(0)
| 41.205607
| 148
| 0.577682
| 479
| 4,409
| 5.100209
| 0.227557
| 0.039296
| 0.074499
| 0.088825
| 0.557102
| 0.487106
| 0.460909
| 0.431437
| 0.431437
| 0.379042
| 0
| 0.011452
| 0.267181
| 4,409
| 106
| 149
| 41.59434
| 0.744661
| 0.01066
| 0
| 0.211765
| 0
| 0
| 0.344804
| 0.004818
| 0
| 0
| 0
| 0
| 0.070588
| 1
| 0.105882
| false
| 0
| 0.058824
| 0
| 0.164706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef0f95f25a14e3a1c31217d9a079a1f1c52c743d
| 541
|
py
|
Python
|
pps/message.py
|
SeungUkLee/preview-pipfile-script
|
d28d963f1feee9ed1621a04b25c02d34a0919829
|
[
"MIT"
] | null | null | null |
pps/message.py
|
SeungUkLee/preview-pipfile-script
|
d28d963f1feee9ed1621a04b25c02d34a0919829
|
[
"MIT"
] | null | null | null |
pps/message.py
|
SeungUkLee/preview-pipfile-script
|
d28d963f1feee9ed1621a04b25c02d34a0919829
|
[
"MIT"
] | null | null | null |
"""
messages
"""
from .color import ENDC, FAIL, OKBLUE, YELLOW
EXE_SCRIPT_ERR_MSG = '{0}[!]{1} An error occurred while executing script in Pipfile'.format(
FAIL, ENDC
)
KEYWORD_NOT_FOUND_MSG = "{0}[!]{1} {2}Pipfile{1} in {3}[scripts]{1} keyword not found!".format(
FAIL, ENDC, OKBLUE, YELLOW
)
FILE_NOT_FOUND_MSG = "{0}[!]{1} {2}Pipfile{1} not found!".format(
FAIL, ENDC, OKBLUE
)
KEYBOARD_INTERRUPT_MSG = "{0}[!]{1} KeyboardInterrupt".format(FAIL, ENDC)
INQUIRER_MSG = "{0}Select Pipfile script to run{1}".format(YELLOW, ENDC)
| 31.823529
| 95
| 0.685767
| 82
| 541
| 4.378049
| 0.439024
| 0.05571
| 0.05571
| 0.066852
| 0.278552
| 0.278552
| 0.122563
| 0.122563
| 0
| 0
| 0
| 0.034261
| 0.136784
| 541
| 16
| 96
| 33.8125
| 0.734475
| 0.014787
| 0
| 0
| 0
| 0.083333
| 0.413333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef1093497c62d32b5e459bb8bfbe26c27ca18a49
| 2,101
|
py
|
Python
|
lambdafunctions/LogEvent/LogEvent.py
|
rpetrina/slack-sentiment-bot
|
47969d8a8c476aa60939fab88f0af793a24a4acc
|
[
"MIT"
] | null | null | null |
lambdafunctions/LogEvent/LogEvent.py
|
rpetrina/slack-sentiment-bot
|
47969d8a8c476aa60939fab88f0af793a24a4acc
|
[
"MIT"
] | null | null | null |
lambdafunctions/LogEvent/LogEvent.py
|
rpetrina/slack-sentiment-bot
|
47969d8a8c476aa60939fab88f0af793a24a4acc
|
[
"MIT"
] | null | null | null |
import sys
import logging
import pymysql
import json
import os
#rds settings - Lambda role must have RDS access
rds_host = os.environ['RDS_HOST'] # Set in Lambda Dashboard
name = os.environ['DB_USERNAME']
password = os.environ['DB_PW']
db_name = os.environ['DB_NAME']
db_table = os.environ['DB_TABLE']
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def connecttodb():
try:
conn = pymysql.connect(rds_host, user=name,
passwd=password, db=db_name, connect_timeout=5)
return conn
except:
logger.error(
"ERROR: Unexpected error: Could not connect to MySql instance.")
sys.exit()
logger.info("SUCCESS: Connection to RDS mysql instance succeeded")
def writemessagetodb(event):
conn = connecttodb()
_eventid = str(event["event_id"])
_userid = str(event["user"])
_msgtext = event["text"]
_timestamp = str(event["event_time"])
insertstatement = 'INSERT INTO `' + db_table + \
r"""` (`eventid`, `userid`, `msgtxt`) VALUES (%s, %s, %s)"""
with conn.cursor() as cur:
cur.execute(insertstatement, (_eventid, _userid, _msgtext))
conn.commit()
print("Message successfully inserted into DB")
def handler(event, context):
"""
This function handles SNS posts from Amazon SNS. Currently it:
1) Inserts the request into an RDS MySQL DB
Current Assumptions:
1) Messages don't contain special characters - i.e: '
2) Requests are correctly formated (contain body and event, and event contains the expected values)
"""
print("In logevent: ", event)
try:
slackevent = json.loads(event["Records"][0]["Sns"]["Message"])
writemessagetodb(slackevent)
response = response = {
"statusCode": 200,
"body": event
}
except Exception as e:
''' Just a stub. Please make this better in real use :) '''
logger.error(f"ERROR: {e}")
response = {
"statusCode": 400,
"body": event
}
return response
| 29.180556
| 107
| 0.619229
| 250
| 2,101
| 5.12
| 0.524
| 0.035156
| 0.034375
| 0.023438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007106
| 0.263208
| 2,101
| 71
| 108
| 29.591549
| 0.819767
| 0.174203
| 0
| 0.08
| 0
| 0
| 0.186827
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0.04
| 0.1
| 0
| 0.2
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef12df78f36f2adabef28423fa54313ee1270534
| 1,707
|
py
|
Python
|
data/build_wd_elastic_index.py
|
flaneuse/reframedb-backend
|
863423fb9fad547aa8c2f826dc2d39939fe1b991
|
[
"MIT"
] | null | null | null |
data/build_wd_elastic_index.py
|
flaneuse/reframedb-backend
|
863423fb9fad547aa8c2f826dc2d39939fe1b991
|
[
"MIT"
] | null | null | null |
data/build_wd_elastic_index.py
|
flaneuse/reframedb-backend
|
863423fb9fad547aa8c2f826dc2d39939fe1b991
|
[
"MIT"
] | null | null | null |
import requests
from elasticsearch import Elasticsearch, client
from elasticsearch.exceptions import RequestError
es = Elasticsearch()
# retrieve all QIDs from the populated reframe ES index
body = {
"_source": {
"includes": ["qid"],
},
"query": {
"query_string": {
"query": "Q*",
"fields": ['qid']
}
},
"from": 0, "size": 10000,
}
es.indices.refresh(index="reframe")
r = es.search(index="reframe", body=body)
bd = {
'mapping': {
'total_fields': {
'limit': 30000
}
}
}
c = client.IndicesClient(es)
# check if index exists, otherwise, create
if c.exists(index='wikidata'):
c.put_settings(index='wikidata', body=bd)
else:
c.create(index='wikidata', body=bd)
session = requests.Session()
for count, hit in enumerate(r['hits']['hits']):
qid = hit['_source']['qid']
header = {
'Accept': 'application/json'
}
r = session.get('http://www.wikidata.org/entity/{}'.format(qid), headers=header).json()
# print(r)
obj = r['entities'][qid]
del obj['descriptions']
for claim, value in obj['claims'].items():
# print(claim, value)
for x in value:
if 'references' in x:
del x['references']
if es.exists(index='wikidata', doc_type='compound', id=qid):
# print('this exists!!')
es.update(index='wikidata', id=qid, doc_type='compound', body={'doc': obj})
# pass
else:
try:
res = es.index(index="wikidata", doc_type='compound', id=qid, body=obj)
except RequestError as e:
print(e)
if count % 100 == 0:
print('imported ', count)
| 21.884615
| 91
| 0.565319
| 200
| 1,707
| 4.785
| 0.445
| 0.081505
| 0.047022
| 0.039707
| 0.068966
| 0.068966
| 0.068966
| 0
| 0
| 0
| 0
| 0.012048
| 0.27065
| 1,707
| 77
| 92
| 22.168831
| 0.756627
| 0.088459
| 0
| 0.038462
| 0
| 0
| 0.193798
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef1825ce5af0c1bb4c24887ac8d1e612fd32ac97
| 5,383
|
py
|
Python
|
ena-dts/framework/rst.py
|
amzn/amzn-ec2-ena-utilities
|
99502ff5bb025dc71727d4991ea5e29a4e9388c6
|
[
"MIT-0"
] | 7
|
2021-04-29T05:23:56.000Z
|
2022-03-23T02:26:55.000Z
|
ena-dts/framework/rst.py
|
amzn/amzn-ec2-ena-utilities
|
99502ff5bb025dc71727d4991ea5e29a4e9388c6
|
[
"MIT-0"
] | null | null | null |
ena-dts/framework/rst.py
|
amzn/amzn-ec2-ena-utilities
|
99502ff5bb025dc71727d4991ea5e29a4e9388c6
|
[
"MIT-0"
] | 4
|
2021-06-10T19:02:57.000Z
|
2021-12-06T01:31:06.000Z
|
# BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
import re
from exception import VerifyFailure
"""
Generate Rst Test Result Report
Example:
import rst
rst.write_title("Test Case: " + test_case.__name__)
out = table.draw()
rst.write_text('\n' + out + '\n\n')
rst.write_result("PASS")
Result:
<copyright>
<Prerequisites>
Test Case: CASE
---------------
Result: PASS
"""
path2Plan = 'test_plans'
path2Result = 'output'
class RstReport(object):
def __init__(self, crbName, target, nic, suite, perf=False):
"""
copy desc from #Name#_test_plan.rst to TestResult_#Name#.rst
"""
try:
path = [path2Result, crbName, target, nic]
# ensure the level folder exist
for node in range(0, len(path)):
if not os.path.exists('/'.join(path[:node + 1])):
for level in range(node, len(path)):
os.mkdir('/'.join(path[:level + 1]))
break
self.rstName = "%s/TestResult_%s.rst" % ('/'.join(path), suite)
rstReport = open(self.rstName, 'w')
if perf is True:
self.rstAnnexName = "%s/TestResult_%s_Annex.rst" % (
'/'.join(path), suite)
rstAnnexReport = open(self.rstAnnexName, 'w')
f = open("%s/%s_test_plan.rst" % (path2Plan, suite), 'r')
for line in f:
if line[:13] == "Prerequisites":
break
rstReport.write(line)
if perf is True:
rstAnnexReport.write(line)
f.close()
rstReport.close()
except Exception as e:
raise VerifyFailure("RST Error: " + str(e))
def clear_all_rst(self, crbName, target):
path = [path2Result, crbName, target]
shutil.rmtree('/'.join(path), True)
def write_title(self, text):
"""
write case title Test Case: #Name#
-----------------
"""
line = "\n%s\n" % text
with open(self.rstName, "a") as f:
f.write(line)
f.write('-' * len(line) + '\n')
def write_annex_title(self, text):
"""
write annex to test case title Annex to #Name#
-----------------
"""
line = "\n%s\n" % text
with open(self.rstAnnexName, "a") as f:
f.write(line)
f.write('-' * len(line) + '\n')
def write_text(self, text, annex=False):
rstFile = self.rstAnnexName if annex else self.rstName
with open(rstFile, "a") as f:
f.write(text)
def write_frame(self, text, annex=False):
self.write_text("\n::\n\n", annex)
parts = re.findall(r'\S+', text)
text = ""
length = 0
for part in parts:
if length + len(part) > 75:
text = text + "\n" + " " + part
length = len(part)
else:
length = length + len(part)
text = text + " " + part
self.write_text(text, annex)
self.write_text("\n\n", annex)
def write_result(self, result):
with open(self.rstName, "a") as f:
f.write("\nResult: " + result + "\n")
def include_image(self, image, width=90):
"""
Includes an image in the RST file.
The argument must include path, name and extension.
"""
with open(self.rstName, "a") as f:
f.write(".. image:: %s\n :width: %d%%\n\n" % (image, width))
def report(self, text, frame=False, annex=False):
"""
Save report text into rst file.
"""
if frame:
self.write_frame(text, annex)
else:
self.write_text(text, annex)
| 33.228395
| 75
| 0.583132
| 668
| 5,383
| 4.646707
| 0.333832
| 0.020296
| 0.006443
| 0.008054
| 0.147874
| 0.105348
| 0.105348
| 0.105348
| 0.105348
| 0.06701
| 0
| 0.006107
| 0.30039
| 5,383
| 161
| 76
| 33.434783
| 0.818109
| 0.347576
| 0
| 0.22973
| 0
| 0
| 0.06638
| 0.008587
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121622
| false
| 0
| 0.054054
| 0
| 0.189189
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef19d273749fc5c7cda4c1d9c7f1b0e4fb378f5e
| 30,467
|
py
|
Python
|
mutation.py
|
nklapste/mutation
|
28eb3eaa3173f0a9cfcd22c2cabe6d0c87f50dfa
|
[
"MIT"
] | null | null | null |
mutation.py
|
nklapste/mutation
|
28eb3eaa3173f0a9cfcd22c2cabe6d0c87f50dfa
|
[
"MIT"
] | null | null | null |
mutation.py
|
nklapste/mutation
|
28eb3eaa3173f0a9cfcd22c2cabe6d0c87f50dfa
|
[
"MIT"
] | null | null | null |
"""Mutation.
Usage:
mutation play [--verbose] [--exclude=<globs>] [--only-deadcode-detection] [--include=<globs>] [--sampling=<s>] [--randomly-seed=<n>] [--max-workers=<n>] [<file-or-directory> ...] [-- TEST-COMMAND ...]
mutation replay [--verbose] [--max-workers=<n>]
mutation list
mutation show MUTATION
mutation apply MUTATION
mutation (-h | --help)
mutation --version
Options:
--verbose Show more information.
-h --help Show this screen.
--version Show version.
"""
import asyncio
import fnmatch
import functools
import itertools
import os
import random
import re
import shlex
import sys
import time
from ast import Constant
from concurrent import futures
from contextlib import contextmanager
from copy import deepcopy
from datetime import timedelta
from difflib import unified_diff
from uuid import UUID
import lexode
import parso
import pygments
import pygments.formatters
import pygments.lexers
import zstandard as zstd
from aiostream import pipe, stream
from astunparse import unparse
from coverage import Coverage
from docopt import docopt
from humanize import precisedelta
from loguru import logger as log
from lsm import LSM
from pathlib3x import Path
from termcolor import colored
from tqdm import tqdm
from ulid import ULID
__version__ = (0, 4, 4)
MINUTE = 60 # seconds
HOUR = 60 * MINUTE
DAY = 24 * HOUR
MONTH = 31 * DAY
def humanize(seconds):
if seconds < 1:
precision = "seconds"
elif seconds // DAY != 0:
precision = "days"
elif seconds // DAY != 0:
precision = "hours"
elif seconds // HOUR != 0:
precision = "minutes"
else:
precision = "seconds"
return precisedelta(timedelta(seconds=seconds), minimum_unit=precision)
PRONOTION = "https://youtu.be/ihZEaj9ml4w?list=PLOSNaPJYYhrtliZqyEWDWL0oqeH0hOHnj"
log.remove()
if os.environ.get("DEBUG", False):
log.add(
sys.stdout,
format="<level>{level}</level> {message}",
level="TRACE",
colorize=True,
enqueue=True,
)
else:
log.add(
sys.stdout,
format="<level>{level}</level> {message}",
level="INFO",
colorize=True,
enqueue=True,
)
# The function patch was taken somewhere over the rainbow...
_hdr_pat = re.compile(r"^@@ -(\d+),?(\d+)? \+(\d+),?(\d+)? @@$")
def patch(diff, source):
"""Apply unified diff patch to string s to recover newer string. If
revert is True, treat s as the newer string, recover older string.
"""
s = source.splitlines(True)
p = diff.splitlines(True)
t = ""
i = sl = 0
(midx, sign) = (1, "+")
while i < len(p) and p[i].startswith(("---", "+++")):
i += 1 # skip header lines
while i < len(p):
m = _hdr_pat.match(p[i])
if not m:
raise Exception("Cannot process diff")
i += 1
l = int(m.group(midx)) - 1 + (m.group(midx + 1) == "0")
t += "".join(s[sl:l])
sl = l
while i < len(p) and p[i][0] != "@":
if i + 1 < len(p) and p[i + 1][0] == "\\":
line = p[i][:-1]
i += 2
else:
line = p[i]
i += 1
if len(line) > 0:
if line[0] == sign or line[0] == " ":
t += line[1:]
sl += line[0] != sign
t += "\n" + "".join(s[sl:])
return t
def glob2predicate(patterns):
def regex_join(regexes):
"""Combine a list of regexes into one that matches any of them."""
return "|".join("(?:%s)" % r for r in regexes)
regexes = (fnmatch.translate(pattern) for pattern in patterns)
regex = re.compile(regex_join(regexes))
def predicate(path):
return regex.match(path) is not None
return predicate
def node_iter(node, level=1):
yield node
for child in node.children:
if not getattr(child, "children", False):
yield child
continue
yield from node_iter(child, level + 1)
def node_copy_tree(node, index):
root = node.get_root_node()
root = deepcopy(root)
iterator = itertools.dropwhile(
lambda x: x[0] != index, zip(itertools.count(0), node_iter(root))
)
index, node = next(iterator)
return root, node
@contextmanager
def timeit():
start = time.perf_counter()
yield lambda: time.perf_counter() - start
class Mutation(type):
ALL = set()
DEADCODE = set()
deadcode_detection = False
def __init__(cls, *args, **kwargs):
super().__init__(*args, **kwargs)
obj = cls()
type(cls).ALL.add(obj)
if cls.deadcode_detection:
type(cls).DEADCODE.add(obj)
class StatementDrop(metaclass=Mutation):
deadcode_detection = True
NEWLINE = "a = 42\n"
def predicate(self, node):
return "stmt" in node.type and node.type != "expr_stmt"
def mutate(self, node, index):
root, new = node_copy_tree(node, index)
index = new.parent.children.index(new)
passi = parso.parse("pass").children[0]
passi.prefix = new.get_first_leaf().prefix
new.parent.children[index] = passi
newline = parso.parse(type(self).NEWLINE).children[0].children[1]
new.parent.children.insert(index + 1, newline)
yield root, new
class DefinitionDrop(metaclass=Mutation):
deadcode_detection = True
def predicate(self, node):
# There is also node.type = 'lambdadef' but lambadef are
# always part of a assignation statement. So, that case is
# handled in StatementDrop.
return node.type in ("classdef", "funcdef")
def mutate(self, node, index):
root, new = node_copy_tree(node, index)
new.parent.children.remove(new)
yield root, new
def chunks(iterable, n):
"""Yield successive n-sized chunks from iterable."""
it = iter(iterable)
while chunk := tuple(itertools.islice(it, n)):
yield chunk
class MutateNumber(metaclass=Mutation):
COUNT = 5
def predicate(self, node):
return node.type == "number"
def mutate(self, node, index):
value = eval(node.value)
if isinstance(value, int):
def randomize(x):
return random.randint(0, x)
else:
def randomize(x):
return random.random() * x
for size in range(8, 32):
if value < 2 ** size:
break
count = 0
while count != self.COUNT:
count += 1
root, new = node_copy_tree(node, index)
new.value = str(randomize(2 ** size))
if new.value == node.value:
continue
yield root, new
class MutateString(metaclass=Mutation):
def predicate(self, node):
# str or bytes.
return node.type == "string"
def mutate(self, node, index):
root, new = node_copy_tree(node, index)
value = eval(new.value)
if isinstance(value, bytes):
value = b"coffeebad" + value
else:
value = "mutated string " + value
value = Constant(value=value, kind="")
value = unparse(value).strip()
new.value = value
yield root, new
class MutateKeyword(metaclass=Mutation):
KEYWORDS = set(["continue", "break", "pass"])
SINGLETON = set(["True", "False", "None"])
# Support xor operator ^
BOOLEAN = set(["and", "or"])
TARGETS = KEYWORDS | SINGLETON | BOOLEAN
def predicate(self, node):
return node.type == "keyword" and node.value in type(self).TARGETS
def mutate(self, node, index):
value = node.value
for targets in [self.KEYWORDS, self.SINGLETON, self.BOOLEAN]:
if value in targets:
break
else:
raise NotImplementedError
for target in targets:
if target == value:
continue
root, new = node_copy_tree(node, index)
new.value = target
yield root, new
class Comparison(metaclass=Mutation):
def predicate(self, node):
return node == "comparison"
def mutate(self, node, index):
root, new = node_copy_tree(node, index)
not_test = parso.parse("not ({})".format(new.get_code()))
index = new.parent.children.index(new)
new.parent.children[index] = not_test
return root, new
class MutateOperator(metaclass=Mutation):
BINARY = ["+", "-", "%", "|", "&", "//", "/", "*", "^", "**", "@"]
BITWISE = ["<<", ">>"]
COMPARISON = ["<", "<=", "==", "!=", ">=", ">"]
ASSIGNEMENT = ["="] + [x + "=" for x in BINARY + BITWISE]
# TODO support OPERATORS_CONTAINS = ["in", "not in"]
OPERATORS = [
BINARY,
BITWISE,
BITWISE,
COMPARISON,
ASSIGNEMENT,
]
def predicate(self, node):
return node.type == "operator"
def mutate(self, node, index):
for operators in type(self).OPERATORS:
if node.value not in operators:
continue
for new_operator in operators:
if node.value == new_operator:
continue
root, new = node_copy_tree(node, index)
new.value = new_operator
yield root, new
def diff(source, target, filename=""):
lines = unified_diff(
source.split("\n"), target.split("\n"), filename, filename, lineterm=""
)
out = "\n".join(lines)
return out
def mutate(node, index, mutations):
for mutation in mutations:
if not mutation.predicate(node):
continue
yield from mutation.mutate(node, index)
def interesting(new_node, coverage):
if getattr(new_node, "line", False):
return new_node.line in coverage
return new_node.get_first_leaf().line in coverage
def deltas_compute(source, path, coverage, mutations):
ast = parso.parse(source)
ignored = 0
for (index, node) in zip(itertools.count(0), node_iter(ast)):
for root, new_node in mutate(node, index, mutations):
if not interesting(new_node, coverage):
ignored += 1
continue
target = root.get_code()
delta = diff(source, target, path)
yield delta
if ignored > 1:
msg = "Ignored {} mutations from file at {}"
msg += " because there is no associated coverage."
log.trace(msg, ignored, path)
async def pool_for_each_par_map(loop, pool, f, p, iterator):
zx = stream.iterate(iterator)
zx = zx | pipe.map(lambda x: loop.run_in_executor(pool, p, x))
async with zx.stream() as streamer:
limit = pool._max_workers
unfinished = []
while True:
tasks = []
for i in range(limit):
try:
task = await streamer.__anext__()
except StopAsyncIteration:
limit = 0
else:
tasks.append(task)
tasks = tasks + list(unfinished)
if not tasks:
break
finished, unfinished = await asyncio.wait(
tasks, return_when=asyncio.FIRST_COMPLETED
)
for finish in finished:
out = finish.result()
f(out)
limit = pool._max_workers - len(unfinished)
def mutation_create(item):
path, source, coverage, mutation_predicate = item
if not coverage:
msg = "Ignoring file {} because there is no associated coverage."
log.trace(msg, path)
return []
log.trace("Mutating file: {}...", path)
mutations = [m for m in Mutation.ALL if mutation_predicate(m)]
deltas = deltas_compute(source, path, coverage, mutations)
# return the compressed deltas to save some time in the
# mainthread.
out = [(path, zstd.compress(x.encode("utf8"))) for x in deltas]
log.trace("There is {} mutations for the file `{}`", len(out), path)
return out
def install_module_loader(uid):
db = LSM(".mutation.okvslite")
mutation_show(uid.hex)
path, diff = lexode.unpack(db[lexode.pack([1, uid])])
diff = zstd.decompress(diff).decode("utf8")
with open(path) as f:
source = f.read()
patched = patch(diff, source)
import imp
components = path[:-3].split("/")
while components:
for pythonpath in sys.path:
filepath = os.path.join(pythonpath, "/".join(components))
filepath += ".py"
ok = os.path.exists(filepath)
if ok:
module_path = ".".join(components)
break
else:
components.pop()
continue
break
if module_path is None:
raise Exception("sys.path oops!")
patched_module = imp.new_module(module_path)
try:
exec(patched, patched_module.__dict__)
except Exception:
# TODO: syntaxerror, do not produce those mutations
exec("", patched_module.__dict__)
sys.modules[module_path] = patched_module
def pytest_configure(config):
mutation = config.getoption("mutation", default=None)
if mutation is not None:
uid = UUID(hex=mutation)
install_module_loader(uid)
def pytest_addoption(parser, pluginmanager):
parser.addoption("--mutation", dest="mutation", type=str)
def for_each_par_map(loop, pool, inc, proc, items):
out = []
for item in items:
item = proc(item)
item = inc(item)
out.append(item)
return out
def mutation_pass(args): # TODO: rename
command, uid, timeout = args
command = command + ["--mutation={}".format(uid.hex)]
out = run(command, timeout=timeout, silent=True)
if out == 0:
msg = "no error with mutation: {} ({})"
log.trace(msg, " ".join(command), out)
with database_open(".") as db:
db[lexode.pack([2, uid])] = b"\x00"
return False
else:
# TODO: pass root path...
with database_open(".") as db:
del db[lexode.pack([2, uid])]
return True
PYTEST = "pytest --exitfirst --no-header --tb=no --quiet --assert=plain"
PYTEST = shlex.split(PYTEST)
def coverage_read(root):
coverage = Coverage(".coverage") # use pathlib
coverage.load()
data = coverage.get_data()
filepaths = data.measured_files()
out = dict()
root = root.resolve()
for filepath in filepaths:
key = str(Path(filepath).relative_to(root))
value = set(data.lines(filepath))
print(key)
out[key] = value
return out
def database_open(root, recreate=False):
root = root if isinstance(root, Path) else Path(root)
db = root / ".mutation.okvslite"
if recreate and db.exists():
log.trace("Deleting existing database...")
for file in root.glob(".mutation.okvslite*"):
file.unlink()
if not recreate and not db.exists():
log.error("No database, can not proceed!")
sys.exit(1)
db = LSM(str(db))
return db
def run(command, timeout=None, silent=True):
if timeout and timeout < 60:
timeout = 60
if timeout:
command.insert(0, "timeout {}".format(timeout))
command.insert(0, "PYTHONDONTWRITEBYTECODE=1")
if silent and not os.environ.get("DEBUG"):
command.append("> /dev/null 2>&1")
return os.system(" ".join(command))
def sampling_setup(sampling, total):
if sampling is None:
return lambda x: x, total
if sampling.endswith("%"):
# randomly choose percent mutations
cutoff = float(sampling[:-1]) / 100
def sampler(iterable):
for item in iterable:
value = random.random()
if value < cutoff:
yield item
total = int(total * cutoff)
elif sampling.isdigit():
# otherwise, it is the first COUNT mutations that are used.
total = int(sampling)
def sampler(iterable):
remaining = total
for item in iterable:
yield item
remaining -= 1
if remaining == 0:
return
else:
msg = "Sampling passed via --sampling option must be a positive"
msg += " integer or a percentage!"
log.error(msg)
sys.exit(2)
if sampling:
log.info("Taking into account sampling there is {} mutations.", total)
return sampler, total
# TODO: the `command` is a hack, maybe there is a way to avoid the
# following code: `if command is not None.
def check_tests(root, seed, arguments, command=None):
max_workers = arguments["--max-workers"] or (os.cpu_count() - 1) or 1
max_workers = int(max_workers)
log.info("Let's check that the tests are green...")
if arguments["<file-or-directory>"] and arguments["TEST-COMMAND"]:
log.error("<file-or-directory> and TEST-COMMAND are exclusive!")
sys.exit(1)
if command is not None:
command = list(command)
if max_workers > 1:
command.extend(
[
# Use pytest-xdist to make sure it is possible to run the
# tests in parallel
"--numprocesses={}".format(max_workers),
]
)
else:
if arguments["TEST-COMMAND"]:
command = list(arguments["TEST-COMMAND"])
else:
command = list(PYTEST)
command.extend(arguments["<file-or-directory>"])
if max_workers > 1:
command.append(
# Use pytest-xdist to make sure it is possible to run
# the tests in parallel
"--numprocesses={}".format(max_workers)
)
command.extend(
[
# Setup coverage options to only mutate what is tested.
"--cov=.",
"--cov-branch",
"--no-cov-on-fail",
# Pass random seed
"--randomly-seed={}".format(seed),
]
)
with timeit() as alpha:
out = run(command)
if out == 0:
log.info("Tests are green 💚")
alpha = alpha() * max_workers
else:
msg = "Tests are not green... return code is {}..."
log.warning(msg, out)
log.warning("I tried the following command: `{}`", " ".join(command))
# Same command without parallelization
if arguments["TEST-COMMAND"]:
command = list(arguments["TEST-COMMAND"])
else:
command = list(PYTEST)
command.extend(arguments["<file-or-directory>"])
command += [
# Setup coverage options to only mutate what is tested.
"--cov=.",
"--cov-branch",
"--no-cov-on-fail",
# Pass random seed
"--randomly-seed={}".format(seed),
]
with timeit() as alpha:
out = run(command)
if out != 0:
msg = "Tests are definitly red! Return code is {}!!"
log.error(msg, out)
log.error("I tried the following command: `{}`", " ".join(command))
sys.exit(2)
# Otherwise, it is possible to run the tests but without
# parallelization.
msg = "Setting max_workers=1 because tests do not pass in parallel"
log.warning(msg)
max_workers = 1
alpha = alpha()
msg = "Time required to run the tests once: {}..."
log.info(msg, humanize(alpha))
return alpha, max_workers
def mutation_only_deadcode(x):
return getattr(x, "deadcode_detection", False)
def mutation_all(x):
return True
async def play_create_mutations(loop, root, db, max_workers, arguments):
# Go through all files, and produce mutations, take into account
# include pattern, and exclude patterns. Also, exclude what has
# no coverage.
include = arguments.get("--include") or "*.py"
include = include.split(",")
include = glob2predicate(include)
exclude = arguments.get("--exclude") or "*test*"
exclude = exclude.split(",")
exclude = glob2predicate(exclude)
filepaths = root.rglob("*.py")
filepaths = (x for x in filepaths if include(str(x)) and not exclude(str(x)))
# setup coverage support
coverage = coverage_read(root)
only_dead_code = arguments["--only-deadcode-detection"]
if only_dead_code:
mutation_predicate = mutation_only_deadcode
else:
mutation_predicate = mutation_all
def make_item(filepath):
with filepath.open() as f:
content = f.read()
out = (
str(filepath),
content,
coverage.get(str(filepath), set()),
mutation_predicate,
)
return out
items = (make_item(x) for x in filepaths if coverage.get(str(x), set()))
# Start with biggest files first, because that is those that will
# take most time, that way, it will make most / best use of the
# workers.
items = sorted(items, key=lambda x: len(x[1]), reverse=True)
# prepare to create mutations
total = 0
log.info("Crafting mutations from {} files...", len(items))
with tqdm(total=len(items), desc="Files") as progress:
def on_mutations_created(items):
nonlocal total
progress.update()
total += len(items)
for path, delta in items:
# TODO: replace ULID with a content addressable hash.
uid = ULID().to_uuid()
# delta is a compressed unified diff
db[lexode.pack([1, uid])] = lexode.pack([path, delta])
with timeit() as delta:
with futures.ProcessPoolExecutor(max_workers=max_workers) as pool:
await pool_for_each_par_map(
loop, pool, on_mutations_created, mutation_create, items
)
log.info("It took {} to compute mutations...", humanize(delta()))
log.info("The number of mutation is {}!", total)
return total
async def play_mutations(loop, db, seed, alpha, total, max_workers, arguments):
# prepare to run tests against mutations
command = list(arguments["TEST-COMMAND"] or PYTEST)
command.append("--randomly-seed={}".format(seed))
command.extend(arguments["<file-or-directory>"])
eta = humanize(alpha * total / max_workers)
log.success("It will take at most {} to run the mutations", eta)
timeout = alpha * 2
uids = db[lexode.pack([1]) : lexode.pack([2])]
uids = ((command, lexode.unpack(key)[1], timeout) for (key, _) in uids)
# sampling
sampling = arguments["--sampling"]
sampler, total = sampling_setup(sampling, total)
uids = sampler(uids)
step = 10
gamma = time.perf_counter()
remaining = total
log.info("Testing mutations in progress...")
with tqdm(total=100) as progress:
def on_progress(_):
nonlocal remaining
nonlocal step
nonlocal gamma
remaining -= 1
if (remaining % step) == 0:
percent = 100 - ((remaining / total) * 100)
now = time.perf_counter()
delta = now - gamma
eta = (delta / step) * remaining
progress.update(int(percent))
progress.set_description("ETA {}".format(humanize(eta)))
msg = "Mutation tests {:.2f}% done..."
log.debug(msg, percent)
log.debug("ETA {}...", humanize(eta))
for speed in [10_000, 1_000, 100, 10, 1]:
if total // speed == 0:
continue
step = speed
break
gamma = time.perf_counter()
with timeit() as delta:
with futures.ThreadPoolExecutor(max_workers=max_workers) as pool:
await pool_for_each_par_map(
loop, pool, on_progress, mutation_pass, uids
)
errors = len(list(db[lexode.pack([2]) : lexode.pack([3])]))
if errors > 0:
msg = "It took {} to compute {} mutation failures!"
log.error(msg, humanize(delta()), errors)
else:
msg = "Checking that the test suite is strong against mutations took:"
msg += " {}... And it is a success 💚"
log.info(msg, humanize(delta()))
return errors
async def play(loop, arguments):
root = Path(".")
seed = arguments["--randomly-seed"] or int(time.time())
log.info("Using random seed: {}".format(seed))
random.seed(seed)
alpha, max_workers = check_tests(root, seed, arguments)
with database_open(root, recreate=True) as db:
# store arguments used to execute command
if arguments["TEST-COMMAND"]:
command = list(arguments["TEST-COMMAND"])
else:
command = list(PYTEST)
command += arguments["<file-or-directory>"]
command = dict(
command=command,
seed=seed,
)
value = list(command.items())
db[lexode.pack((0, "command"))] = lexode.pack(value)
# let's create mutations!
count = await play_create_mutations(loop, root, db, max_workers, arguments)
# Let's run tests against mutations!
await play_mutations(loop, db, seed, alpha, count, max_workers, arguments)
def mutation_diff_size(db, uid):
_, diff = lexode.unpack(db[lexode.pack([1, uid])])
out = len(zstd.decompress(diff))
return out
def replay_mutation(db, uid, alpha, seed, max_workers, command):
log.info("* Use Ctrl+C to exit.")
command = list(command)
command.append("--randomly-seed={}".format(seed))
max_workers = 1
if max_workers > 1:
command.append("--numprocesses={}".format(max_workers))
timeout = alpha * 2
while True:
ok = mutation_pass((command, uid, timeout))
if not ok:
mutation_show(uid.hex)
msg = "* Type 'skip' to go to next mutation or just enter to retry."
log.info(msg)
skip = input().startswith("s")
if skip:
db[lexode.pack([2, uid])] = b"\x01"
return
# Otherwise loop to re-test...
else:
del db[lexode.pack([2, uid])]
return
def replay(arguments):
root = Path(".")
with database_open(root) as db:
command = db[lexode.pack((0, "command"))]
command = lexode.unpack(command)
command = dict(command)
seed = command.pop("seed")
random.seed(seed)
command = command.pop("command")
alpha, max_workers = check_tests(root, seed, arguments, command)
with database_open(root) as db:
while True:
uids = (
lexode.unpack(k)[1] for k, v in db[lexode.pack([2]) :] if v == b"\x00"
)
uids = sorted(
uids,
key=functools.partial(mutation_diff_size, db),
reverse=True,
)
if not uids:
log.info("No mutation failures 👍")
sys.exit(0)
while uids:
uid = uids.pop(0)
replay_mutation(db, uid, alpha, seed, max_workers, command)
def mutation_list():
with database_open(".") as db:
uids = ((lexode.unpack(k)[1], v) for k, v in db[lexode.pack([2]) :])
uids = sorted(uids, key=lambda x: mutation_diff_size(db, x[0]), reverse=True)
if not uids:
log.info("No mutation failures 👍")
sys.exit(0)
for (uid, type) in uids:
log.info("{}\t{}".format(uid.hex, "skipped" if type == b"\x01" else ""))
def mutation_show(uid):
uid = UUID(hex=uid)
log.info("mutation show {}", uid.hex)
log.info("")
with database_open(".") as db:
path, diff = lexode.unpack(db[lexode.pack([1, uid])])
diff = zstd.decompress(diff).decode("utf8")
terminal256 = pygments.formatters.get_formatter_by_name("terminal256")
python = pygments.lexers.get_lexer_by_name("python")
print(diff)
for line in diff.split("\n"):
if line.startswith("+++"):
delta = colored("+++", "green", attrs=["bold"])
highlighted = pygments.highlight(line[3:], python, terminal256)
log.info(delta + highlighted.rstrip())
elif line.startswith("---"):
delta = colored("---", "red", attrs=["bold"])
highlighted = pygments.highlight(line[3:], python, terminal256)
log.info(delta + highlighted.rstrip())
elif line.startswith("+"):
delta = colored("+", "green", attrs=["bold"])
highlighted = pygments.highlight(line[1:], python, terminal256)
log.info(delta + highlighted.rstrip())
elif line.startswith("-"):
delta = colored("-", "red", attrs=["bold"])
highlighted = pygments.highlight(line[1:], python, terminal256)
log.info(delta + highlighted.rstrip())
else:
highlighted = pygments.highlight(line, python, terminal256)
log.info(highlighted.rstrip())
def mutation_apply(uid):
uid = UUID(hex=uid)
with database_open(".") as db:
path, diff = lexode.unpack(db[lexode.pack([1, uid])])
diff = zstd.decompress(diff).decode("utf8")
with open(path, "r") as f:
source = f.read()
patched = patch(diff, source)
with open(path, "w") as f:
f.write(patched)
def main():
arguments = docopt(__doc__, version=__version__)
if arguments.get("--verbose", False):
log.remove()
log.add(
sys.stdout,
format="<level>{level}</level> {message}",
level="DEBUG",
colorize=True,
enqueue=True,
)
log.debug("Mutation at {}", PRONOTION)
log.trace(arguments)
if arguments["replay"]:
replay(arguments)
sys.exit(0)
if arguments.get("list", False):
mutation_list()
sys.exit(0)
if arguments.get("show", False):
mutation_show(arguments["MUTATION"])
sys.exit(0)
if arguments.get("apply", False):
mutation_apply(arguments["MUTATION"])
sys.exit(0)
# Otherwise run play.
loop = asyncio.get_event_loop()
loop.run_until_complete(play(loop, arguments))
loop.close()
if __name__ == "__main__":
main()
| 28.961027
| 202
| 0.573046
| 3,612
| 30,467
| 4.764396
| 0.16113
| 0.018595
| 0.01046
| 0.007438
| 0.260096
| 0.224127
| 0.174908
| 0.157243
| 0.148004
| 0.12569
| 0
| 0.009253
| 0.301178
| 30,467
| 1,051
| 203
| 28.988582
| 0.798835
| 0.079397
| 0
| 0.280632
| 0
| 0
| 0.097855
| 0.004147
| 0
| 0
| 0
| 0.000951
| 0.001318
| 1
| 0.072464
| false
| 0.011858
| 0.046113
| 0.01581
| 0.201581
| 0.002635
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef1a0f68bf7e4627785fe119d1363f10a767d348
| 1,058
|
py
|
Python
|
main.py
|
bijilap/ColorRecognition
|
a070645e5bda40c0d06d03db468f31c79b63d0bd
|
[
"Apache-2.0"
] | 2
|
2018-03-29T12:15:04.000Z
|
2019-01-09T02:09:41.000Z
|
main.py
|
bijilap/ColorRecognition
|
a070645e5bda40c0d06d03db468f31c79b63d0bd
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
bijilap/ColorRecognition
|
a070645e5bda40c0d06d03db468f31c79b63d0bd
|
[
"Apache-2.0"
] | null | null | null |
import argparse
from ColorDetector import ColorDetector
def main():
detector = ColorDetector()
parser = argparse.ArgumentParser()
# --k : number of clusters, --image: image path, --debug: debug level
parser.add_argument("--k", nargs=1, type=int, help='maximum number of colors to be identified. Default:10')
parser.add_argument("--n", nargs=1, type=int, help='number of top dominant colors to be displayed')
parser.add_argument("--image", nargs=1, required=True, help='full path of image to be processed')
parser.add_argument("--debug", nargs=1, type=int, help='debug level: 1 for debug mode, 0: no log messages')
args = parser.parse_args()
img_name = None
n = 4
if args.k:
detector.NUM_OF_CLUSTERS = int(args.k[0])
if args.image:
img_name = args.image[0]
if args.debug:
detector.log_level = int(args.debug[0])
if args.n:
n = int(args.n[0])
image = detector.readImage(img_name)
detector.getDominantColors(image, n)
if __name__ == "__main__":
main()
| 30.228571
| 111
| 0.660681
| 152
| 1,058
| 4.473684
| 0.361842
| 0.052941
| 0.1
| 0.057353
| 0.075
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015495
| 0.206994
| 1,058
| 34
| 112
| 31.117647
| 0.794994
| 0.063327
| 0
| 0
| 0
| 0
| 0.211325
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.083333
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef1beeeb227406f72c9053a339254f85199fda6b
| 2,062
|
py
|
Python
|
app/app.py
|
tigpt/docker-flask-postgres
|
ba0b192afe77e6946c8e49574def3533ea0f1181
|
[
"MIT"
] | null | null | null |
app/app.py
|
tigpt/docker-flask-postgres
|
ba0b192afe77e6946c8e49574def3533ea0f1181
|
[
"MIT"
] | null | null | null |
app/app.py
|
tigpt/docker-flask-postgres
|
ba0b192afe77e6946c8e49574def3533ea0f1181
|
[
"MIT"
] | null | null | null |
from elasticapm.contrib.flask import ElasticAPM
import os
from flask import Flask, request, render_template
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
APP = Flask(__name__)
APP.config['ELASTIC_APM'] = {
}
apm = ElasticAPM(APP)
APP.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
APP.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://%s:%s@%s/%s' % (
# ARGS.dbuser, ARGS.dbpass, ARGS.dbhost, ARGS.dbname
os.environ['DBUSER'], os.environ['DBPASS'], os.environ['DBHOST'], os.environ['DBNAME']
)
# initialize the database connection
DB = SQLAlchemy(APP)
# initialize database migration management
MIGRATE = Migrate(APP, DB)
from models import *
@APP.route('/')
def view_registered_guests():
guests = Guest.query.all()
return render_template('guest_list.html', guests=guests)
@APP.route('/register', methods = ['GET'])
def view_registration_form():
return render_template('guest_registration.html')
@APP.route('/register', methods = ['POST'])
def register_guest():
name = request.form.get('name')
email = request.form.get('email')
partysize = request.form.get('partysize')
if not partysize or partysize=='':
partysize = 1
guest = Guest(name, email, partysize)
DB.session.add(guest)
DB.session.commit()
return render_template('guest_confirmation.html',
name=name, email=email, partysize=partysize)
# bad query
@APP.route('/bad_query')
def view_registered_guests_bad_query():
for _ in range(20):
guests = Guest.query.all()
return render_template('guest_list.html', guests=guests)
# error message
@APP.route('/hello')
def apm_message_hello():
apm.capture_message('hello, world!')
return render_template('apm_hello.html')
# Error
@APP.route('/error')
def apm_error():
try:
1 / 0
except ZeroDivisionError:
apm.capture_exception()
return render_template('apm_error.html')
# Unhandled error
@APP.route('/fatal_error')
def apm_fatal_error():
1 / 0
return render_template('apm_error.html')
| 25.775
| 90
| 0.70805
| 264
| 2,062
| 5.359848
| 0.306818
| 0.079152
| 0.09894
| 0.070671
| 0.135689
| 0.135689
| 0.090459
| 0.090459
| 0.090459
| 0.090459
| 0
| 0.004585
| 0.153734
| 2,062
| 80
| 91
| 25.775
| 0.806304
| 0.083414
| 0
| 0.145455
| 0
| 0
| 0.175252
| 0.070101
| 0
| 0
| 0
| 0
| 0
| 1
| 0.127273
| false
| 0.018182
| 0.109091
| 0.018182
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef1e04b7ef6eaf43f6fa7d6f871605144e4d447e
| 8,836
|
py
|
Python
|
scrapers/meetings/fetch_meetings.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 2
|
2015-04-11T12:22:41.000Z
|
2016-08-18T11:12:06.000Z
|
scrapers/meetings/fetch_meetings.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 84
|
2015-01-22T14:33:49.000Z
|
2015-04-01T23:15:29.000Z
|
scrapers/meetings/fetch_meetings.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 1
|
2015-04-16T03:10:39.000Z
|
2015-04-16T03:10:39.000Z
|
# -*- coding: utf-8 -*-
from datetime import datetime
import logging
import os.path
import requests
import time
import urllib
from bs4 import BeautifulSoup
from utils import mongo
class FetchMeetings:
def __init__(self, **kwargs):
# fetch the logger
self._logger = logging.getLogger("spud")
self.BASE_URL = "https://www.gov.uk"
# initial search query stuff
self.search_term = "meetings"
self.search_filter = "transparency-data"
# database stuff
self.db = mongo.MongoInterface()
self.COLLECTION_NAME = "meetings_fetch"
if kwargs["refreshdb"]:
self.db.drop(self.COLLECTION_NAME)
# local directory to save fetched files to
self.STORE_DIR = "store"
# get the current path
self.current_path = os.path.dirname(os.path.abspath(__file__))
# if True, avoid downloading where possible
self.dryrun = kwargs["dryrun"]
def fetch_all_publications(self):
self._logger.debug("Searching %s for '%s' with filter '%s' ..." % (self.BASE_URL, self.search_term, self.search_filter))
search_tmpl = "%s/government/publications?keywords=%s&publication_filter_option=%s&page=%%d" % (self.BASE_URL, urllib.quote_plus(self.search_term), self.search_filter)
page = 1
total_pages = "unknown"
collections = {}
publications = {}
while True:
if total_pages != "unknown" and page > total_pages:
# no more search results
break
# search gov.uk for results
self._logger.debug(" Fetching results page %d / %s ..." % (page, total_pages))
r = requests.get(search_tmpl % page)
time.sleep(0.5)
soup = BeautifulSoup(r.text)
if total_pages == "unknown":
total_pages = int(soup.find(class_="page-numbers").text[5:])
publication_soups = soup.find_all(class_="document-row")
for pub_soup in publication_soups:
# find collections (we'll use these to find more publications)
collection_soup = pub_soup.find(class_="document-collections")
if collection_soup:
collection_text = collection_soup.a.text
collection_url = "%s%s" % (self.BASE_URL, collection_soup.a["href"])
if collection_url not in collections and self.search_term in collection_text.lower():
collections[collection_url] = {
"url": collection_url,
"name": collection_text,
}
continue
# any remaining publications are not part of a collection
pub_title = pub_soup.h3.a
pub_url = "%s%s" % (self.BASE_URL, pub_title["href"])
if self.search_term in pub_title.text.lower() and pub_url not in publications:
department = pub_soup.find(class_="organisations")
if department.abbr is not None:
department = department.abbr["title"]
else:
department = department.text
publications[pub_url] = {
"source": {
"linked_from_url": pub_url,
},
"collection": None,
"title": pub_title.text,
"published_at": pub_soup.find(class_="public_timestamp").text.strip(),
"department": department,
}
page += 1
self._logger.debug("Found %d collections, and %d publications not part of collections." % (len(collections), len(publications)))
publications = self.fetch_pubs_from_collections(collections.values(), publications)
return publications.values()
def fetch_pubs_from_collections(self, collections, publications={}):
self._logger.debug("Searching %d collections for more publications ..." % len(collections))
for collection in collections:
r = requests.get(collection["url"])
time.sleep(0.5)
soup = BeautifulSoup(r.text)
department = soup.find(class_="organisation-link").text
publication_soups = soup.find_all(class_="publication")
for pub_soup in publication_soups:
pub_title = pub_soup.h3.a
pub_url = "%s%s" % (self.BASE_URL, pub_title["href"])
if self.search_term in pub_title.text.lower() and pub_url not in publications:
publications[pub_url] = {
"source": {
"linked_from_url": pub_url,
},
"collection": collection["name"],
"title": pub_title.text,
"published_at": pub_soup.find(class_="public_timestamp").text,
"department": department,
}
self._logger.debug("Done searching.")
return publications
def fetch_file(self, url, filename):
self._logger.debug(" Fetching: %s" % url)
full_path = os.path.join(self.current_path, self.STORE_DIR, filename)
urllib.urlretrieve(url, full_path)
time.sleep(0.5)
def save_to_db(self, publication):
publication["source"]["fetched"] = False
# existing = self.db.find_one(self.COLLECTION_NAME, {"url": publication["source"]["url"]})
# if existing is None:
self.db.save(self.COLLECTION_NAME, publication, manipulate=False)
def get_all_unfetched(self):
all_not_fetched = []
page = 1
while True:
not_fetched, meta = self.db.query(self.COLLECTION_NAME, query={"source.fetched": False}, page=page)
all_not_fetched += not_fetched
page += 1
if not meta["has_more"]:
return all_not_fetched
def run(self):
publications = self.fetch_all_publications()
self._logger.debug("Searching %d publication pages for attachments ..." % len(publications))
for pub in publications:
r = requests.get(pub["source"]["linked_from_url"])
time.sleep(0.5)
soup = BeautifulSoup(r.text)
attachment_soups = soup.find_all(class_="attachment")
for attachment_soup in attachment_soups:
attachment_title = attachment_soup.h2.text
if self.search_term not in attachment_title.lower():
continue
attachment = pub.copy()
attachment["title"] = attachment_title
download_soup = attachment_soup.find(class_="download")
if download_soup is not None:
# download link (usually to a csv) is available
rel_url = download_soup.a["href"]
attachment["file_type"] = rel_url.split(".")[-1].upper()
elif attachment_soup.h2.a is not None:
# heading link (usually to a pdf)
rel_url = attachment_soup.h2.a["href"]
attachment["file_type"] = attachment_soup.find(class_="type").text
else:
self._logger.error(attachment_soup)
raise Exception("Unknown attachment type.")
attachment["source"]["url"] = "%s%s" % (self.BASE_URL, rel_url)
attachment["filename"] = os.path.join("-".join(rel_url.split("/")[-2:]))
self.save_to_db(attachment)
if attachment_soups == []:
# the data is inline - embedded in the page.
# NB this is very unusual.
pub["source"]["url"] = pub["source"]["linked_from_url"]
pub["filename"] = os.path.join("%s.html" % pub["source"]["url"].split("/")[-1])
pub["file_type"] = "HTML"
self.save_to_db(pub)
self._logger.debug("Found %d attachments in total." % self.db.count(self.COLLECTION_NAME))
if not self.dryrun:
not_fetched = self.get_all_unfetched()
self._logger.debug("Fetching %d attachments ..." % len(not_fetched))
for pub in not_fetched:
self.fetch_file(pub["source"]["url"], pub["filename"])
pub["source"]["fetched"] = str(datetime.now())
self.db.update(self.COLLECTION_NAME, {"source.url": pub["source"]["url"]}, pub)
self._logger.debug("Attachments fetched.")
def fetch(**kwargs):
# TODO! this is temporary!
# import requests_cache
# requests_cache.install_cache("meetings")
FetchMeetings(**kwargs).run()
| 46.26178
| 175
| 0.563943
| 964
| 8,836
| 4.973029
| 0.201245
| 0.025031
| 0.031289
| 0.012516
| 0.20776
| 0.175845
| 0.116187
| 0.116187
| 0.109303
| 0.094285
| 0
| 0.003855
| 0.324808
| 8,836
| 190
| 176
| 46.505263
| 0.799698
| 0.078542
| 0
| 0.228758
| 0
| 0
| 0.132619
| 0.009358
| 0
| 0
| 0
| 0.005263
| 0
| 1
| 0.052288
| false
| 0
| 0.052288
| 0
| 0.130719
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef21cfd36477df2859e374f71d6a0bbf86ff8519
| 561
|
py
|
Python
|
tests/settings.py
|
managedbyq/mbq.atomiq
|
23edd33e8b958cfd9257ea62a107d8bb793ff3b9
|
[
"Apache-2.0"
] | null | null | null |
tests/settings.py
|
managedbyq/mbq.atomiq
|
23edd33e8b958cfd9257ea62a107d8bb793ff3b9
|
[
"Apache-2.0"
] | 9
|
2018-09-17T20:50:43.000Z
|
2018-12-07T21:19:56.000Z
|
tests/settings.py
|
managedbyq/mbq.atomiq
|
23edd33e8b958cfd9257ea62a107d8bb793ff3b9
|
[
"Apache-2.0"
] | null | null | null |
import os
import boto3
import dj_database_url
from mbq import env, metrics
SECRET_KEY = 'fake-key'
DEBUG = True
ATOMIQ = {
'env': 'Test',
'service': 'test-service',
}
database_url = os.environ.get('DATABASE_URL', 'mysql://root:@mysql:3306/atomiqdb')
DATABASES = {
'default': dj_database_url.parse(database_url),
}
INSTALLED_APPS = [
'mbq.atomiq',
]
USE_TZ = True
boto3.setup_default_session(
region_name='us-east-1',
)
ENV = env.get_environment("ENV_NAME")
metrics.init('mbq.atomiq', env=ENV, constant_tags={"env": ENV.long_name})
| 16.5
| 82
| 0.695187
| 79
| 561
| 4.721519
| 0.544304
| 0.147453
| 0.069705
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014644
| 0.14795
| 561
| 33
| 83
| 17
| 0.76569
| 0
| 0
| 0
| 0
| 0
| 0.224599
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.173913
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef25471191ad1db593810b69150f45edb9dc331e
| 2,615
|
py
|
Python
|
WickContractions/ops/indexed.py
|
chrisculver/WickContractions
|
a36af32bdd049789faf42d24d168c4073fc45ed0
|
[
"MIT"
] | 2
|
2021-08-03T17:32:09.000Z
|
2021-08-03T18:28:31.000Z
|
WickContractions/ops/indexed.py
|
chrisculver/WickContractions
|
a36af32bdd049789faf42d24d168c4073fc45ed0
|
[
"MIT"
] | null | null | null |
WickContractions/ops/indexed.py
|
chrisculver/WickContractions
|
a36af32bdd049789faf42d24d168c4073fc45ed0
|
[
"MIT"
] | null | null | null |
from collections import deque
class IndexedObject:
"""Container for an object that has indices
:param name: Name of the object
:param indices: Indices attached to it
"""
def __init__(self,name,indices):
"""Constructor
"""
self.name = name
self.indices = indices
def cyclic_permute_indices(self):
"""Return the object with it's indices cyclicly permuted once.
"""
tmp=deque(self.indices)
tmp.rotate(1)
self.indices=list(tmp)
def __str__(self):
"""String printer
"""
idx_str = ''
for i in range(len(self.indices)):
idx_str += self.indices[i]
if(i!=len(self.indices)-1):
idx_str += ' '
return self.name + '_{' + idx_str + '}'
def __eq__(self, other):
"""Equality comparison
"""
return (self.name == other.name) and (self.indices==other.indices)
def __lt__(self, other):
"""Less then operator
"""
if(self.name != other.name):
return (self.name < other.name)
else:
return (self.indices < other.indices)
class IndexedFunction(IndexedObject):
"""Container for an object with indices and arguments
:param name: Name of the object
:param indices: Indices attached to the argument
:param arguments: Arguments the object depends on
"""
def __init__(self, name, indices, arguments):
"""Constructor
"""
self.name = name
self.indices = indices
self.arguments = arguments
def __str__(self):
"""String printer
"""
idx_str = ''
for i in range(len(self.indices)):
idx_str += self.indices[i]
if(i!=len(self.indices)-1):
idx_str += ' '
arg_str = ''
for i in range(len(self.arguments)):
arg_str += self.arguments[i]
if(i!=len(self.arguments)-1):
arg_str += ','
return self.name + '(' + arg_str + ')_{' + idx_str + '}'
def __eq__(self, other):
"""Equality comparison
"""
return (self.name == other.name) and (self.indices==other.indices) and (self.arguments==self.arguments)
def __lt__(self, other):
"""Less then operator
"""
if(self.name != other.name):
return (self.name < other.name)
else:
self_strings = self.indices + self.arguments
other_strings = other.indices + other.arguments
return (self_strings < other_strings)
| 30.406977
| 111
| 0.549522
| 293
| 2,615
| 4.726962
| 0.211604
| 0.111191
| 0.06065
| 0.073646
| 0.612274
| 0.52491
| 0.52491
| 0.450542
| 0.450542
| 0.450542
| 0
| 0.002278
| 0.328489
| 2,615
| 86
| 112
| 30.406977
| 0.786446
| 0.213002
| 0
| 0.530612
| 0
| 0
| 0.005658
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.183673
| false
| 0
| 0.020408
| 0
| 0.408163
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef29d7cb4df5849c15653808babb4473a2403757
| 874
|
py
|
Python
|
python/sagiri-bot/SAGIRIBOT/data_manage/update_data/update_setting.py
|
GG-yuki/bugs
|
aabd576e9e57012a3390007af890b7c6ab6cdda8
|
[
"MIT"
] | null | null | null |
python/sagiri-bot/SAGIRIBOT/data_manage/update_data/update_setting.py
|
GG-yuki/bugs
|
aabd576e9e57012a3390007af890b7c6ab6cdda8
|
[
"MIT"
] | null | null | null |
python/sagiri-bot/SAGIRIBOT/data_manage/update_data/update_setting.py
|
GG-yuki/bugs
|
aabd576e9e57012a3390007af890b7c6ab6cdda8
|
[
"MIT"
] | null | null | null |
from SAGIRIBOT.basics.aio_mysql_excute import execute_sql
async def update_setting(group_id, setting_name, new_setting_value) -> None:
"""
Update setting to database
Args:
group_id: Group id
setting_name: Setting name
new_setting_value: New setting value
Examples:
await update_setting(12345678, "setu", True)
Return:
None
"""
str_key_word = ["speakMode", "switch", "music", "r18Process"]
sql_key_word = ["repeat", "real", "limit"]
if setting_name in sql_key_word:
setting_name = '`'+setting_name+'`'
if setting_name in str_key_word:
sql = "UPDATE setting SET %s='%s' WHERE groupId=%d" % (setting_name, new_setting_value, group_id)
else:
sql = "UPDATE setting SET %s=%s WHERE groupId=%d" % (setting_name, new_setting_value, group_id)
await execute_sql(sql)
| 31.214286
| 105
| 0.662471
| 117
| 874
| 4.65812
| 0.393162
| 0.181651
| 0.137615
| 0.154128
| 0.341284
| 0.245872
| 0.245872
| 0.245872
| 0.245872
| 0.245872
| 0
| 0.014859
| 0.229977
| 874
| 27
| 106
| 32.37037
| 0.794948
| 0
| 0
| 0
| 0
| 0
| 0.21129
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef3678c7e21e6c165bc6c6b597bc9cfc9cfa52bc
| 10,380
|
py
|
Python
|
examples/tutorial/example4.py
|
sathiscode/trumania
|
bcf21c4f9e1ff0fe03fd9cbe2dc367f0df033fbc
|
[
"Apache-2.0"
] | 97
|
2018-01-15T19:29:31.000Z
|
2022-03-11T00:27:34.000Z
|
examples/tutorial/example4.py
|
sathiscode/trumania
|
bcf21c4f9e1ff0fe03fd9cbe2dc367f0df033fbc
|
[
"Apache-2.0"
] | 10
|
2018-01-15T22:44:55.000Z
|
2022-02-18T09:44:10.000Z
|
examples/tutorial/example4.py
|
sathiscode/trumania
|
bcf21c4f9e1ff0fe03fd9cbe2dc367f0df033fbc
|
[
"Apache-2.0"
] | 33
|
2018-01-15T19:34:23.000Z
|
2022-03-05T22:39:33.000Z
|
from trumania.core import circus
import trumania.core.population as population
import trumania.core.random_generators as gen
import trumania.core.operations as ops
import trumania.core.story as story
import trumania.components.time_patterns.profilers as profilers
import trumania.core.util_functions as util_functions
import trumania.components.db as DB
import pandas as pd
# each step?() function below implement one step of the fourth example of the
# tutorial documented at
# https://realimpactanalytics.atlassian.net/wiki/display/LM/Data+generator+tutorial
# this is essentially a modification of example3, with some supplementary
# features demonstrating persistence
def build_music_repo():
# this time we create a "detached" population, not connected to a circus
repo = population.Population(
circus=None,
size=5,
ids_gen=gen.SequencialGenerator(prefix="GENRE_"))
repo.create_attribute(
name="genre_name",
init_values=["blues", "jazz", "electro", "pop", "rock"])
repo.create_relationship(name="songs", seed=18)
return repo
def add_song_to_repo(repo_population):
songs = population.Population(
circus=None,
size=0,
ids_gen=gen.SequencialGenerator(prefix="SONG_"))
# since the size of the population is 0, we can create attribute without
# providing any initialization
songs.create_attribute(name="artist_name")
songs.create_attribute(name="song_genre")
songs.create_attribute(name="title")
songs.create_attribute(name="duration_seconds")
songs.create_attribute(name="recording_year")
song_id_gen = gen.SequencialGenerator(prefix="S_")
# generate artist names from a list of randomly generated ones, so we have
# some redundancy in the generated dataset
artist_name_gen = gen.NumpyRandomGenerator(
method="choice",
a=gen.FakerGenerator(
method="name",
seed=1234).generate(size=200),
seed=5678)
title_gen = gen.FakerGenerator(method="sentence",
seed=78961,
nb_words=4,
variable_nb_words=True)
# generates recording years within a desired date range
year_gen = gen.FakerGenerator(
method="date_time_between_dates",
seed=184,
datetime_start=pd.Timestamp("1910-10-20"),
datetime_end=pd.Timestamp("2016-12-02")) \
.map(f=lambda d: d.year)
duration_gen = gen.ParetoGenerator(xmin=60,
seed=9874,
force_int=True,
a=1.2)
repo_genre_rel = repo_population.get_attribute("genre_name")
for genre_id, genre_name in repo_genre_rel.get_values().items():
# an operation capable of creating songs of that genre
init_attribute = ops.Chain(
artist_name_gen.ops.generate(named_as="artist_name"),
title_gen.ops.generate(named_as="title"),
year_gen.ops.generate(named_as="recording_year"),
duration_gen.ops.generate(named_as="duration_seconds"),
gen.ConstantGenerator(value=genre_name).ops.generate(named_as="song_genre")
)
# dataframe of emtpy songs: just with one SONG_ID column for now
song_ids = song_id_gen.generate(size=1000)
emtpy_songs = story.Story.init_story_data(
member_id_field_name="SONG_ID",
active_ids=song_ids
)
# we can already adds the generated songs to the music repo relationship
repo_population.get_relationship("songs").add_grouped_relations(
from_ids=[genre_id],
grouped_ids=[song_ids]
)
# here we generate all desired columns in the dataframe
initialized_songs, _ = init_attribute(emtpy_songs)
initialized_songs.drop(["SONG_ID"], axis=1, inplace=True)
# this works because the columns of init_attribute match exactly the
# ones of the attributes of the populations
songs.update(initialized_songs)
# makes sure year and duration are handled as integer
songs.get_attribute("recording_year").transform_inplace(int)
songs.get_attribute("duration_seconds").transform_inplace(int)
return songs
def build_circus(name):
return circus.Circus(
name=name,
master_seed=12345,
start=pd.Timestamp("1 Jan 2017 00:00"),
step_duration=pd.Timedelta("1h"))
def add_listener(the_circus):
users = the_circus.create_population(
name="user", size=5,
ids_gen=gen.SequencialGenerator(prefix="user_"))
users.create_attribute(
name="FIRST_NAME",
init_gen=gen.FakerGenerator(method="first_name",
seed=next(the_circus.seeder)))
users.create_attribute(
name="LAST_NAME",
init_gen=gen.FakerGenerator(method="last_name",
seed=next(the_circus.seeder)))
def add_listen_and_share_stories_with_details(the_circus):
users = the_circus.populations["user"]
# using this timer means POS are more likely to trigger a re-stock during
# day hours rather that at night.
timer_gen = profilers.HighWeekDaysTimerGenerator(
clock=the_circus.clock, seed=next(the_circus.seeder))
# this generate activity level distributed as a "truncated normal
# distribution", i.e. very high and low activities are prevented.
bounded_gaussian_activity_gen = gen.NumpyRandomGenerator(
method="normal",
seed=next(the_circus.seeder),
loc=timer_gen.activity(n=20, per=pd.Timedelta("1 day")),
scale=5
).map(ops.bound_value(lb=10, ub=30))
listen = the_circus.create_story(
name="listen_events",
initiating_population=users,
member_id_field="UID",
timer_gen=timer_gen,
activity_gen=bounded_gaussian_activity_gen
)
share = the_circus.create_story(
name="share_events",
initiating_population=users,
member_id_field="UID",
timer_gen=timer_gen,
activity_gen=bounded_gaussian_activity_gen
)
repo = the_circus.populations["music_repository"]
songs = the_circus.populations["songs"]
select_genre_and_song = ops.Chain(
users.ops.lookup(
id_field="UID",
select={
"FIRST_NAME": "USER_FIRST_NAME",
"LAST_NAME": "USER_LAST_NAME",
}
),
# picks a genre at random
repo.ops.select_one(named_as="GENRE"),
# picks a song at random for that genre
repo.get_relationship("songs").ops.select_one(
from_field="GENRE",
named_as="SONG_ID"),
# now also reporting details of listened or shared songs
songs.ops.lookup(
id_field="SONG_ID",
select={
"artist_name": "SONG_ARTIST",
"title": "SONG_TITLE",
"recording_year": "SONG_YEAR",
"duration_seconds": "SONG_DURATION",
}
),
)
listen.set_operations(
select_genre_and_song,
ops.FieldLogger("listen_events")
)
share.set_operations(
select_genre_and_song,
# picks a user this song is shared to
users.ops.select_one(named_as="SHARED_TO_UID"),
# note we could post-check when user shared a song to their own uid
# here, in which case we can use DropRow to discard that share event
ops.FieldLogger("share_events")
)
def step1():
# this creates 2 populations: music_repo and songs
music_repo = build_music_repo()
songs = add_song_to_repo(music_repo)
# saves them to persistence
DB.remove_namespace(namespace="tutorial_example4")
DB.save_population(music_repo, namespace="tutorial_example4",
population_id="music_repository")
DB.save_population(songs, namespace="tutorial_example4",
population_id="songs")
# build a new circus then loads and attach the persisted population to it
example4_circus = build_circus(name="example4_circus")
example4_circus.load_population(namespace="tutorial_example4",
population_id="music_repository")
example4_circus.load_population(namespace="tutorial_example4",
population_id="songs")
add_listener(example4_circus)
def step2():
# this creates 2 populations: music_repo and songs
music_repo = build_music_repo()
songs = add_song_to_repo(music_repo)
# saves them to persistence
DB.remove_namespace(namespace="tutorial_example4")
DB.save_population(music_repo, namespace="tutorial_example4",
population_id="music_repository")
DB.save_population(songs, namespace="tutorial_example4",
population_id="songs")
# build a new circus then loads and attach the persisted population to it
example4_circus = build_circus(name="example4_circus")
example4_circus.load_population(namespace="tutorial_example4",
population_id="music_repository")
example4_circus.load_population(namespace="tutorial_example4",
population_id="songs")
add_listener(example4_circus)
# This saves the whole circus to persistence, with all its populations,
# relationships, generators,...
# This is independent from the 2 populations saved above: this time we no longer
# have direct control on the namespace: the persistence mechanism use the
# circus name as namespace
example4_circus.save_to_db(overwrite=True)
# example4bis should be an exact deep copy of example4_circus
example4bis = circus.Circus.load_from_db(circus_name="example4_circus")
# Stories are not serialized to CSV but rather serialized in code,
# using humans as transducers
add_listen_and_share_stories_with_details(example4bis)
example4bis.run(
duration=pd.Timedelta("5 days"),
log_output_folder="output/example4",
delete_existing_logs=True)
if __name__ == "__main__":
util_functions.setup_logging()
step2()
| 34.832215
| 87
| 0.657225
| 1,251
| 10,380
| 5.213429
| 0.27498
| 0.019319
| 0.038332
| 0.042932
| 0.315547
| 0.25667
| 0.228458
| 0.205765
| 0.205765
| 0.205765
| 0
| 0.014798
| 0.257803
| 10,380
| 297
| 88
| 34.949495
| 0.831776
| 0.218593
| 0
| 0.245989
| 0
| 0
| 0.117749
| 0.002851
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037433
| false
| 0
| 0.048128
| 0.005348
| 0.101604
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef3d18dad9fb4f3ea7850ca0af729153b0fd6bb6
| 1,828
|
py
|
Python
|
hyperparameter_tuner/run_command_generator.py
|
chutien/zpp-mem
|
470dec89dda475f7272b876f191cef9f8266a6dc
|
[
"MIT"
] | 1
|
2019-10-22T11:33:23.000Z
|
2019-10-22T11:33:23.000Z
|
hyperparameter_tuner/run_command_generator.py
|
chutien/zpp-mem
|
470dec89dda475f7272b876f191cef9f8266a6dc
|
[
"MIT"
] | null | null | null |
hyperparameter_tuner/run_command_generator.py
|
chutien/zpp-mem
|
470dec89dda475f7272b876f191cef9f8266a6dc
|
[
"MIT"
] | null | null | null |
from itertools import product
from hyperparameter_tuner.single_parameter_generator import single_parameter_generator as sgen
class run_command_generator():
def __init__(self, single_parameter_generator_list, command_prefix="python ../experiment.py",
output_path="./results"):
for gen in single_parameter_generator_list:
assert isinstance(gen, sgen)
self.single_parameter_generator_list = single_parameter_generator_list
self.run_command = command_prefix
self.output_path = output_path
def run_commands(self):
all_parrams_gennerator = self.single_parameter_generator_list[0].params()
for p in self.single_parameter_generator_list[1:]:
all_parrams_gennerator = product(all_parrams_gennerator, p.params())
for train_params in all_parrams_gennerator:
command = str(train_params).replace('(', '').replace(')', '').replace('\'', '').replace(',', '')
stripped_command = command.replace(' ', '_').replace('-', '').replace('.', '')
output_path = f"{self.output_path}/{stripped_command}"
command = f"{self.run_command} {command} >{output_path}.out 2>{output_path}.err"
yield command
def default_commands_generator(command_prefix="python experiment.py", output_path="./hyperparameter_tuner/results"):
return run_command_generator([sgen("name", ["vgg_16"]),
sgen("learning_rate", [0.001, 0.005, 0.01, 0.03, 0.07, 0.1, 0.5, 1]),
sgen("batch_size", [20, 25, 30, 35, 50, 75]),
], command_prefix=command_prefix, output_path=output_path).run_commands()
if __name__ == '__main__':
commands = default_commands_generator()
for c in commands:
print(c)
| 46.871795
| 116
| 0.650438
| 213
| 1,828
| 5.211268
| 0.338028
| 0.09009
| 0.172973
| 0.151351
| 0.189189
| 0.073874
| 0.073874
| 0
| 0
| 0
| 0
| 0.027523
| 0.224836
| 1,828
| 38
| 117
| 48.105263
| 0.755822
| 0
| 0
| 0
| 0
| 0.068966
| 0.136214
| 0.036652
| 0
| 0
| 0
| 0
| 0.034483
| 1
| 0.103448
| false
| 0
| 0.068966
| 0.034483
| 0.241379
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef41254ab69ff27661576195222b554a1c94e4da
| 6,158
|
py
|
Python
|
src/inscriptis/model/canvas/__init__.py
|
rlskoeser/inscriptis
|
e23f79a4ad561f53943c3c6dd70a7d4981b0e0fb
|
[
"Apache-2.0"
] | 90
|
2016-01-29T15:09:21.000Z
|
2022-03-08T15:08:57.000Z
|
src/inscriptis/model/canvas/__init__.py
|
rlskoeser/inscriptis
|
e23f79a4ad561f53943c3c6dd70a7d4981b0e0fb
|
[
"Apache-2.0"
] | 27
|
2016-01-14T10:30:10.000Z
|
2022-03-24T08:00:31.000Z
|
src/inscriptis/model/canvas/__init__.py
|
rlskoeser/inscriptis
|
e23f79a4ad561f53943c3c6dd70a7d4981b0e0fb
|
[
"Apache-2.0"
] | 20
|
2016-01-14T12:50:55.000Z
|
2022-03-04T07:26:30.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""Classes used for rendering (parts) of the canvas.
Every parsed :class:`~inscriptis.model.html_element.HtmlElement` writes its
textual content to the canvas which is managed by the following three classes:
- :class:`Canvas` provides the drawing board on which the HTML page is
serialized and annotations are recorded.
- :class:`~inscriptis.model.canvas.block.Block` contains the current line to
which text is written.
- :class:`~inscriptis.model.canvas.prefix.Prefix` handles indentation
and bullets that prefix a line.
"""
from inscriptis.annotation import Annotation
from inscriptis.html_properties import WhiteSpace, Display
from inscriptis.model.canvas.block import Block
from inscriptis.model.html_element import HtmlElement
from inscriptis.model.canvas.prefix import Prefix
class Canvas:
r"""The text Canvas on which Inscriptis writes the HTML page.
Attributes:
margin: the current margin to the previous block (this is required to
ensure that the `margin_after` and `margin_before` constraints of
HTML block elements are met).
current_block: A :class:`~inscriptis.model.canvas.block.Block` which
merges the input text into a block (i.e., line).
blocks: a list of strings containing the completed blocks (i.e.,
text lines). Each block spawns at least one line.
annotations: the list of recorded
:class:`~inscriptis.annotation.Annotation`\s.
_open_annotations: a map of open tags that contain annotations.
"""
__slots__ = ('annotations', 'blocks', 'current_block', '_open_annotations',
'margin')
def __init__(self):
self.margin = 1000 # margin to the previous block
self.current_block = Block(0, Prefix())
self.blocks = []
self.annotations = []
self._open_annotations = {}
def open_tag(self, tag: HtmlElement) -> None:
"""Register that a tag is opened.
Args:
tag: the tag to open.
"""
if tag.annotation:
self._open_annotations[tag] = self.current_block.idx
if tag.display == Display.block:
self.open_block(tag)
def open_block(self, tag: HtmlElement):
"""Open an HTML block element."""
# write missing bullets, if no content has been written
if not self._flush_inline() and tag.list_bullet:
self.write_unconsumed_bullet()
self.current_block.prefix.register_prefix(tag.padding_inline,
tag.list_bullet)
# write the block margin
required_margin = max(tag.previous_margin_after, tag.margin_before)
if required_margin > self.margin:
required_newlines = required_margin - self.margin
self.current_block.idx += required_newlines
self.blocks.append('\n' * (required_newlines - 1))
self.margin = required_margin
def write_unconsumed_bullet(self):
"""Write unconsumed bullets to the blocks list."""
bullet = self.current_block.prefix.unconsumed_bullet
if bullet:
self.blocks.append(bullet)
self.current_block.idx += len(bullet)
self.current_block = self.current_block.new_block()
self.margin = 0
def write(self, tag: HtmlElement, text: str,
whitespace: WhiteSpace = None) -> None:
"""Write the given text to the current block."""
self.current_block.merge(text, whitespace or tag.whitespace)
def close_tag(self, tag: HtmlElement) -> None:
"""Register that the given tag tag is closed.
Args:
tag: the tag to close.
"""
if tag.display == Display.block:
# write missing bullets, if no content has been written so far.
if not self._flush_inline() and tag.list_bullet:
self.write_unconsumed_bullet()
self.current_block.prefix.remove_last_prefix()
self.close_block(tag)
if tag in self._open_annotations:
start_idx = self._open_annotations.pop(tag)
# do not record annotations with no content
if start_idx == self.current_block.idx:
return
for annotation in tag.annotation:
self.annotations.append(
Annotation(start_idx, self.current_block.idx, annotation))
def close_block(self, tag: HtmlElement):
"""Close the given HtmlElement by writing its bottom margin.
Args:
tag: the HTML Block element to close
"""
if tag.margin_after > self.margin:
required_newlines = tag.margin_after - self.margin
self.current_block.idx += required_newlines
self.blocks.append('\n' * (required_newlines - 1))
self.margin = tag.margin_after
def write_newline(self):
if not self._flush_inline():
self.blocks.append('')
self.current_block = self.current_block.new_block()
def get_text(self) -> str:
"""Provide a text representation of the Canvas."""
self._flush_inline()
return '\n'.join(self.blocks)
def _flush_inline(self) -> bool:
"""Attempt to flush the content in self.current_block into a new block.
Notes:
- If self.current_block does not contain any content (or only
whitespaces) no changes are made.
- Otherwise the content of current_block is added to blocks and a
new current_block is initialized.
Returns:
True if the attempt was successful, False otherwise.
"""
if not self.current_block.is_empty():
self.blocks.append(self.current_block.content)
self.current_block = self.current_block.new_block()
self.margin = 0
return True
return False
@property
def left_margin(self) -> int:
"""Return the length of the current line's left margin."""
return self.current_block.prefix.current_padding
| 38.248447
| 79
| 0.636733
| 758
| 6,158
| 5.030343
| 0.228232
| 0.084972
| 0.092316
| 0.029898
| 0.278521
| 0.215578
| 0.169945
| 0.150538
| 0.140047
| 0.116968
| 0
| 0.002253
| 0.279149
| 6,158
| 160
| 80
| 38.4875
| 0.856724
| 0.37025
| 0
| 0.194805
| 0
| 0
| 0.016271
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.064935
| 0
| 0.298701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef44efdf1df1a7a380310f517a87f13a57e2f804
| 1,832
|
py
|
Python
|
server/app.py
|
Catsvilles/Lofi
|
f3a783a5ba3e80e6c8f958990f6f09767d25a48e
|
[
"Apache-2.0"
] | 27
|
2021-07-14T17:12:29.000Z
|
2022-03-18T16:15:18.000Z
|
server/app.py
|
Catsvilles/Lofi
|
f3a783a5ba3e80e6c8f958990f6f09767d25a48e
|
[
"Apache-2.0"
] | 3
|
2021-08-29T11:22:04.000Z
|
2022-02-16T23:20:04.000Z
|
server/app.py
|
Catsvilles/Lofi
|
f3a783a5ba3e80e6c8f958990f6f09767d25a48e
|
[
"Apache-2.0"
] | 4
|
2021-07-25T09:55:09.000Z
|
2022-03-25T17:16:18.000Z
|
import json
import torch
from flask import Flask, request, jsonify
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from model.lofi2lofi_model import Decoder as Lofi2LofiDecoder
from model.lyrics2lofi_model import Lyrics2LofiModel
from server.lofi2lofi_generate import decode
from server.lyrics2lofi_predict import predict
device = "cpu"
app = Flask(__name__)
limiter = Limiter(
app,
key_func=get_remote_address,
default_limits=["30 per minute"]
)
lofi2lofi_checkpoint = "checkpoints/lofi2lofi_decoder.pth"
print("Loading lofi model...", end=" ")
lofi2lofi_model = Lofi2LofiDecoder(device=device)
lofi2lofi_model.load_state_dict(torch.load(lofi2lofi_checkpoint, map_location=device))
print(f"Loaded {lofi2lofi_checkpoint}.")
lofi2lofi_model.to(device)
lofi2lofi_model.eval()
lyrics2lofi_checkpoint = "checkpoints/lyrics2lofi.pth"
print("Loading lyrics2lofi model...", end=" ")
lyrics2lofi_model = Lyrics2LofiModel(device=device)
lyrics2lofi_model.load_state_dict(torch.load(lyrics2lofi_checkpoint, map_location=device))
print(f"Loaded {lyrics2lofi_checkpoint}.")
lyrics2lofi_model.to(device)
lyrics2lofi_model.eval()
@app.route('/')
def home():
return 'Server running'
@app.route('/decode', methods=['GET'])
def decode_input():
input = request.args.get('input')
number_list = json.loads(input)
json_output = decode(lofi2lofi_model, torch.tensor([number_list]).float())
response = jsonify(json_output)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/predict', methods=['GET'])
def lyrics_to_track():
input = request.args.get('input')
json_output = predict(lyrics2lofi_model, input)
response = jsonify(json_output)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
| 29.548387
| 90
| 0.771288
| 227
| 1,832
| 6.013216
| 0.321586
| 0.082051
| 0.023443
| 0.026374
| 0.250549
| 0.215385
| 0.175824
| 0.118681
| 0.118681
| 0.118681
| 0
| 0.017802
| 0.110808
| 1,832
| 61
| 91
| 30.032787
| 0.820135
| 0
| 0
| 0.166667
| 0
| 0
| 0.158843
| 0.088428
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.1875
| 0.020833
| 0.3125
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef473c6a7f8ab89bcd75652de804e2198dfb2d97
| 1,153
|
py
|
Python
|
cw-bitcoin-price.py
|
buraktokman/Crypto-Exchange-Data-Fetcher
|
23e6ba542ff7a862af3247db2c04c2c10a5f3edf
|
[
"MIT"
] | 1
|
2021-08-09T07:22:25.000Z
|
2021-08-09T07:22:25.000Z
|
cw-bitcoin-price.py
|
buraktokman/Crypto-Exchange-Data-Fetcher
|
23e6ba542ff7a862af3247db2c04c2c10a5f3edf
|
[
"MIT"
] | null | null | null |
cw-bitcoin-price.py
|
buraktokman/Crypto-Exchange-Data-Fetcher
|
23e6ba542ff7a862af3247db2c04c2c10a5f3edf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
'''
Cryptowat.ch API
https://cryptowat.ch/docs/api
https://api.cryptowat.ch/markets/prices '''
import urllib.request, json, datetime, time
from urllib.request import urlopen
from pathlib import Path
csv_file_price = Path(__file__).parents[0] / 'data' / 'cryptowatch-bitcoin-price2.csv'
def request(url):
with urllib.request.urlopen(url) as url:
data = json.loads(url.read().decode())
print(data)
return data['result']['price']['last'], data['result']['volume']
def main():
current_time = datetime.datetime.now(datetime.timezone.utc)
unix_timestamp = current_time.timestamp()
print(int(unix_timestamp))
url = 'https://api.cryptowat.ch/markets/prices'
try:
price, volume = request(url)
except Exception as e:
print(e)
#with open(csv_file_price, 'a') as f:
# f.write(str(int(unix_timestamp)) + ',' + price + '\n')
if __name__ == '__main__':
#main()
while True:
now = datetime.datetime.now()
while (now.second % 5):
now = datetime.datetime.now()
print(now.second)
time.sleep(0.5)
main()
| 26.813953
| 86
| 0.633998
| 150
| 1,153
| 4.733333
| 0.446667
| 0.061972
| 0.080282
| 0.053521
| 0.090141
| 0.090141
| 0
| 0
| 0
| 0
| 0
| 0.006593
| 0.210755
| 1,153
| 42
| 87
| 27.452381
| 0.773626
| 0.180399
| 0
| 0.076923
| 0
| 0
| 0.115508
| 0.032086
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.115385
| 0
| 0.230769
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef488748bc20e35c68916d75dae55ef743e1069d
| 6,145
|
py
|
Python
|
python/orz/sta2json.py
|
ViewFaceCore/OpenRoleZoo
|
19cef3cdc5238374cedcf7068dc7a6ad8448c21b
|
[
"BSD-2-Clause"
] | null | null | null |
python/orz/sta2json.py
|
ViewFaceCore/OpenRoleZoo
|
19cef3cdc5238374cedcf7068dc7a6ad8448c21b
|
[
"BSD-2-Clause"
] | null | null | null |
python/orz/sta2json.py
|
ViewFaceCore/OpenRoleZoo
|
19cef3cdc5238374cedcf7068dc7a6ad8448c21b
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# coding: UTF-8
import os
import struct
from .sta import *
import json
import copy
import base64
from collections import OrderedDict
class Stream:
def __init__(self, byte):
self.byte = byte
self.index = 0
def read(self, size=None):
data = ''
if size is None:
data = self.byte[self.index:]
else:
data = self.byte[self.index:self.index+size]
self.index += len(data)
return data
def unpack_nil(stream, **kwargs):
stream.read(1)
return None
def unpack_int(stream, **kwargs):
return struct.unpack('=i', stream.read(4))[0]
def unpack_float(stream, **kwargs):
return struct.unpack('=f', stream.read(4))[0]
def unpack_string(stream, **kwargs):
length = struct.unpack('=i', stream.read(4))[0]
s = struct.unpack('=%ds' % length, stream.read(length))[0].decode()
return s
def unpack_binary(stream, **kwargs):
length = struct.unpack('=i', stream.read(4))[0]
s = struct.unpack('=%ds' % length, stream.read(length))[0]
mode = 0
if 'binary_mode' in kwargs:
mode = kwargs['binary_mode']
if mode == 0:
return '@base64@%s' % base64.b64encode(s)
elif mode == 1:
# save file
if 'getway' not in kwargs:
raise Exception("getway must be set.")
if 'workshop' not in kwargs:
raise Exception("workshop must be set.")
filename_ext = kwargs['getway'] + '.bin'
binary_filename = os.path.join(kwargs['workshop'], filename_ext)
s[8] = 1
with open(binary_filename, 'wb') as f:
f.write(s)
return '@file@%s' % filename_ext
elif mode == 2:
return '@binary@%d' % length
else:
return binary(s)
def unpack_list(stream, **kwargs):
local_kwargs = copy.copy(kwargs)
if 'getway' not in local_kwargs:
local_kwargs['getway'] = ''
getway = local_kwargs['getway']
obj = []
length = struct.unpack('=i', stream.read(4))[0]
for i in range(length):
local_kwargs['getway'] = getway + '_' + str(i)
obj.append(unpack_obj(stream, **local_kwargs))
return obj
def unpack_dict(stream, **kwargs):
local_kwargs = copy.copy(kwargs)
if 'getway' not in local_kwargs:
local_kwargs['getway'] = ''
getway = local_kwargs['getway']
obj = {}
length = struct.unpack('=i', stream.read(4))[0]
for i in range(length):
key = unpack_string(stream, **kwargs)
local_kwargs['getway'] = getway + '_' + key
value = unpack_obj(stream, **local_kwargs)
obj[key] = value
obj = OrderedDict(sorted(obj.items(), key=lambda item: item[0]))
return obj
def unpack_obj(stream, **kwargs):
"""
Convert an stream(sta format) to object(json format)
:param stream: Stream of binary sta file
:param workshop: path to write binary file
:param getway: the getway to all values
:param binary_mode: 0(default): means write @base64@...
1: means @file@path
2: means write @binary@size
3: means str for binary memory
:return: unpacked object
"""
mark = struct.unpack('=b', stream.read(1))[0]
if mark == STA_NIL:
return unpack_nil(stream, **kwargs)
elif mark == STA_INT:
return unpack_int(stream, **kwargs)
elif mark == STA_FLOAT:
return unpack_float(stream, **kwargs)
elif mark == STA_STRING:
return unpack_string(stream, **kwargs)
elif mark == STA_BINARY:
return unpack_binary(stream, **kwargs)
elif mark == STA_LIST:
return unpack_list(stream, **kwargs)
elif mark == STA_DICT:
return unpack_dict(stream, **kwargs)
else:
raise Exception("Unsupported mark type: ", type(mark))
def sta2obj(sta_filename, **kwargs):
"""
Convert filename.sta to object
:param sta_filename: input sta filename
:param binary_mode: 0(default): means write @base64@...
1: means @file@path
2: means write @binary@size
3: means str for binary memory
:return:
"""
byte = ''
with open(sta_filename, 'rb') as ifile:
byte = ifile.read()
stream = Stream(byte)
mark = struct.unpack('=i', stream.read(4))[0]
if mark != STA_MARK:
raise Exception("%s is not a valid sta file." % sta_filename)
# kwargs = {}
if 'binary_mode' not in kwargs:
kwargs['binary_mode'] = 0
obj = unpack_obj(stream, **kwargs)
return obj
def sta2json(sta_filename, json_filename=None, **kwargs):
"""
Convert filename.sta to filename.json.
:param sta_filename: input sta filename
:param json_filename: output json filename or path
:param binary_mode: 0(default): means write @base64@...
1: means @file@path
2: means write @binary@size
3: means str for binary memory
:return:
"""
filepath, filename_ext = os.path.split(sta_filename)
filename, ext = os.path.splitext(filename_ext)
if json_filename is None:
json_filename = os.path.join(filepath, filename + ".json")
if os.path.isdir(json_filename):
json_filename = os.path.join(json_filename, filename + ".json")
workshop, getway_ext = os.path.split(json_filename)
getway = os.path.splitext(getway_ext)[0]
if len(workshop) > 0 and not os.path.isdir(workshop):
raise Exception("%s/ is not a valid path." % workshop)
with open(json_filename, 'w') as ofile:
byte = ''
with open(sta_filename, 'rb') as ifile:
byte = ifile.read()
stream = Stream(byte)
mark = struct.unpack('=i', stream.read(4))[0]
if mark != STA_MARK:
raise Exception("%s is not a valid sta file." % sta_filename)
kwargs['workshop'] = workshop
kwargs['getway'] = getway
if 'binary_mode' not in kwargs:
kwargs['binary_mode'] = 1
obj = unpack_obj(stream, **kwargs)
json.dump(obj, ofile, indent=2)
| 28.449074
| 73
| 0.593979
| 794
| 6,145
| 4.492443
| 0.157431
| 0.060555
| 0.024671
| 0.026913
| 0.508831
| 0.367816
| 0.359406
| 0.325764
| 0.325764
| 0.303897
| 0
| 0.014189
| 0.277461
| 6,145
| 215
| 74
| 28.581395
| 0.789189
| 0.172823
| 0
| 0.285714
| 0
| 0
| 0.075556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090226
| false
| 0
| 0.052632
| 0.015038
| 0.293233
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef4888a9795dbbe5df0abc36429c88521fbd3e99
| 1,494
|
py
|
Python
|
872 Leaf-Similar Trees.py
|
krishna13052001/LeetCode
|
cd6ec626bea61f0bd9e8493622074f9e69a7a1c3
|
[
"MIT"
] | 872
|
2015-06-15T12:02:41.000Z
|
2022-03-30T08:44:35.000Z
|
872 Leaf-Similar Trees.py
|
nadeemshaikh-github/LeetCode
|
3fb14aeea62a960442e47dfde9f964c7ffce32be
|
[
"MIT"
] | 8
|
2015-06-21T15:11:59.000Z
|
2022-02-01T11:22:34.000Z
|
872 Leaf-Similar Trees.py
|
nadeemshaikh-github/LeetCode
|
3fb14aeea62a960442e47dfde9f964c7ffce32be
|
[
"MIT"
] | 328
|
2015-06-28T03:10:35.000Z
|
2022-03-29T11:05:28.000Z
|
#!/usr/bin/python3
"""
Consider all the leaves of a binary tree. From left to right order, the values
of those leaves form a leaf value sequence.
For example, in the given tree above, the leaf value sequence is (6, 7, 4, 9,
8).
Two binary trees are considered leaf-similar if their leaf value sequence is the
same.
Return true if and only if the two given trees with head nodes root1 and root2
are leaf-similar.
Note:
Both of the given trees will have between 1 and 100 nodes.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def leafSimilar(self, root1: TreeNode, root2: TreeNode) -> bool:
"""
brute force, get all the leaf and then compare
to save space, use generator
O(lg n) space for the stack
"""
itr1 = self.dfs(root1)
itr2 = self.dfs(root2)
while True:
a = next(itr1, None)
b = next(itr2, None)
if a != b:
return False
if a is None and b is None:
break
return True
def dfs(self, node):
stk = [node]
# pre-order
while stk:
cur = stk.pop()
if not cur:
continue
if not cur.left and not cur.right:
yield cur.val
else:
stk.append(cur.right)
stk.append(cur.left)
| 25.758621
| 80
| 0.566934
| 213
| 1,494
| 3.957746
| 0.469484
| 0.032028
| 0.060498
| 0.045077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020877
| 0.358768
| 1,494
| 57
| 81
| 26.210526
| 0.859081
| 0.419009
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef53ba7f982e4f61582b4dfc595af89608ab9da3
| 3,695
|
py
|
Python
|
third_party/graphy/graphy/common_test.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
third_party/graphy/graphy/common_test.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 4,640
|
2015-07-08T16:19:08.000Z
|
2019-12-02T15:01:27.000Z
|
third_party/graphy/graphy/common_test.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 698
|
2015-06-02T19:18:35.000Z
|
2022-03-29T16:57:15.000Z
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for common.py."""
import warnings
from graphy import common
from graphy import graphy_test
from graphy.backends import google_chart_api
class CommonTest(graphy_test.GraphyTest):
def setUp(self):
self.chart = google_chart_api.LineChart()
def tearDown(self):
warnings.resetwarnings()
def testDependentAxis(self):
self.assertTrue(self.chart.left is self.chart.GetDependentAxis())
self.assertTrue(self.chart.bottom is self.chart.GetIndependentAxis())
def testAxisAssignment(self):
"""Make sure axis assignment works properly"""
new_axis = common.Axis()
self.chart.top = new_axis
self.assertTrue(self.chart.top is new_axis)
new_axis = common.Axis()
self.chart.bottom = new_axis
self.assertTrue(self.chart.bottom is new_axis)
new_axis = common.Axis()
self.chart.left = new_axis
self.assertTrue(self.chart.left is new_axis)
new_axis = common.Axis()
self.chart.right = new_axis
self.assertTrue(self.chart.right is new_axis)
def testAxisConstruction(self):
axis = common.Axis()
self.assertTrue(axis.min is None)
self.assertTrue(axis.max is None)
axis = common.Axis(-2, 16)
self.assertEqual(axis.min, -2)
self.assertEqual(axis.max, 16)
def testGetDependentIndependentAxes(self):
c = self.chart
self.assertEqual([c.left, c.right], c.GetDependentAxes())
self.assertEqual([c.top, c.bottom], c.GetIndependentAxes())
right2 = c.AddAxis(common.AxisPosition.RIGHT, common.Axis())
bottom2 = c.AddAxis(common.AxisPosition.BOTTOM, common.Axis())
self.assertEqual([c.left, c.right, right2], c.GetDependentAxes())
self.assertEqual([c.top, c.bottom, bottom2], c.GetIndependentAxes())
# TODO: remove once AddSeries is deleted
def testAddSeries(self):
warnings.filterwarnings('ignore')
chart = common.BaseChart()
chart.AddSeries(points=[1, 2, 3], style='foo',
markers='markers', label='label')
series = chart.data[0]
self.assertEqual(series.data, [1, 2, 3])
self.assertEqual(series.style, 'foo')
self.assertEqual(series.markers, 'markers')
self.assertEqual(series.label, 'label')
# TODO: remove once the deprecation warning is removed
def testDataSeriesStyles(self):
# Deprecated approach
warnings.filterwarnings('error')
self.assertRaises(DeprecationWarning, common.DataSeries, [1, 2, 3],
color='0000FF')
warnings.filterwarnings('ignore')
d = common.DataSeries([1, 2, 3], color='0000FF')
self.assertEqual('0000FF', d.color)
d.color = 'F00'
self.assertEqual('F00', d.color)
# TODO: remove once the deprecation warning is removed
def testDataSeriesArgumentOrder(self):
# Deprecated approach
warnings.filterwarnings('error')
self.assertRaises(DeprecationWarning, common.DataSeries, [1, 2, 3],
'0000FF', 'style')
# New order
style = common._BasicStyle('0000FF')
d = common.DataSeries([1, 2, 3], 'label', style)
self.assertEqual('label', d.label)
self.assertEqual(style, d.style)
if __name__ == '__main__':
graphy_test.main()
| 33.899083
| 74
| 0.707984
| 481
| 3,695
| 5.380457
| 0.322245
| 0.048686
| 0.041731
| 0.053323
| 0.316461
| 0.316461
| 0.204791
| 0.188949
| 0.155719
| 0.078825
| 0
| 0.020541
| 0.169959
| 3,695
| 108
| 75
| 34.212963
| 0.82328
| 0.223545
| 0
| 0.144928
| 0
| 0
| 0.039112
| 0
| 0
| 0
| 0
| 0.009259
| 0.347826
| 1
| 0.130435
| false
| 0
| 0.057971
| 0
| 0.202899
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef53e0e036cb078d36e154064142222b1dfe4d85
| 608
|
py
|
Python
|
projects/utils_func/fetch_data.py
|
blitty-codes/ml-proyects
|
97d41757cfb45209bbbb09e4c3b51e20c4328a30
|
[
"Apache-2.0"
] | null | null | null |
projects/utils_func/fetch_data.py
|
blitty-codes/ml-proyects
|
97d41757cfb45209bbbb09e4c3b51e20c4328a30
|
[
"Apache-2.0"
] | null | null | null |
projects/utils_func/fetch_data.py
|
blitty-codes/ml-proyects
|
97d41757cfb45209bbbb09e4c3b51e20c4328a30
|
[
"Apache-2.0"
] | null | null | null |
# Download the data you
import os
import tarfile
import requests
def fetch_data(dataset_url):
file_name = dataset_url.split('/')[-1]
dataset_path = os.path.join("datasets", file_name.split('.')[0])
print(dataset_path)
print(f"File name: {file_name.split('.')[0]}")
os.makedirs(dataset_path, exist_ok=True)
data = requests.get(dataset_url)
tgz_path = os.path.join(dataset_path, f"{file_name}")
with open(tgz_path, 'wb') as file:
file.write(data.content)
dataset_tgz = tarfile.open(tgz_path)
dataset_tgz.extractall(path=dataset_path)
dataset_tgz.close()
| 27.636364
| 68
| 0.692434
| 90
| 608
| 4.455556
| 0.4
| 0.099751
| 0.049875
| 0.069825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005906
| 0.164474
| 608
| 21
| 69
| 28.952381
| 0.783465
| 0.034539
| 0
| 0
| 0
| 0
| 0.100855
| 0.042735
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.25
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef54bb20c88dda93a302698251aa2e77667dc8a2
| 4,526
|
py
|
Python
|
xpython/builtins.py
|
pmp-p/x-python
|
e5bdc15af1bf9cf696b2d9a8e1a02a4863b1fb8a
|
[
"MIT"
] | null | null | null |
xpython/builtins.py
|
pmp-p/x-python
|
e5bdc15af1bf9cf696b2d9a8e1a02a4863b1fb8a
|
[
"MIT"
] | null | null | null |
xpython/builtins.py
|
pmp-p/x-python
|
e5bdc15af1bf9cf696b2d9a8e1a02a4863b1fb8a
|
[
"MIT"
] | null | null | null |
"""
A place to implement built-in functions.
We use the bytecode for these when doing cross-version interpreting
"""
from xpython.pyobj import Function, Cell, make_cell
from xdis import codeType2Portable, PYTHON_VERSION, IS_PYPY
def func_code(func):
if hasattr(func, "func_code"):
return func.func_code
else:
assert hasattr(func, "__code__"), "%s should be a function type; is %s" % (
func,
type(func),
)
return func.__code__
# This code was originally written by Darius Bacon,
# but follows code from PEP 3115 listed below.
# Rocky Bernstein did the xdis adaptions and
# added a couple of bug fixes.
def build_class(opc, func, name, *bases, **kwds):
"""
Like built-in __build_class__() in bltinmodule.c, but running in the
byterun VM.
See also: PEP 3115: https://www.python.org/dev/peps/pep-3115/ and
https://mail.python.org/pipermail/python-3000/2007-March/006338.html
"""
# Parameter checking...
if not (isinstance(func, Function)):
raise TypeError("func must be a PyVM function")
if not isinstance(name, str):
raise TypeError("name is not a string")
metaclass = kwds.pop("metaclass", None)
if metaclass is None:
metaclass = type(bases[0]) if bases else type
if isinstance(metaclass, type):
metaclass = calculate_metaclass(metaclass, bases)
if hasattr(metaclass, "__prepare__"):
prepare = metaclass.__prepare__
namespace = prepare(name, bases, **kwds)
else:
namespace = {}
python_implementation = "PyPy" if IS_PYPY else "CPython"
if not (
opc.version == PYTHON_VERSION
and python_implementation == opc.python_implementation
):
# convert code to xdis's portable code type.
class_body_code = codeType2Portable(func_code(func))
else:
class_body_code = func.func_code
# Execute the body of func. This is the step that would go wrong if
# we tried to use the built-in __build_class__, because __build_class__
# does not call func, it magically executes its body directly, as we
# do here (except we invoke our PyVM instead of CPython's).
#
# This behavior when interpreting bytecode that isn't the same as
# the bytecode using in the running Python can cause a SEGV, specifically
# between Python 3.5 running 3.4 or earlier.
frame = func._vm.make_frame(
code=class_body_code,
f_globals=func.func_globals,
f_locals=namespace,
closure=func.__closure__,
)
# rocky: cell is the return value of a function where?
cell = func._vm.eval_frame(frame)
# Add any class variables that may have been added in running class_body_code.
# See test_attribute_access.py for a simple example that needs the update below.
namespace.update(frame.f_locals)
# If metaclass is builtin "type", it can't deal with a xpython.pyobj.Cell object
# but needs a builtin cell object. make_cell() can do this.
if "__classcell__" in namespace and metaclass == type:
namespace["__classcell__"] = make_cell(namespace["__classcell__"].get())
try:
cls = metaclass(name, bases, namespace)
except TypeError:
# For mysterious reasons the above can raise a:
# __init__() takes *n* positional arguments but *n+1* were given.
# In particular for:
# class G(Generic[T]):
# pass
import types
cls = types.new_class(name, bases, kwds, exec_body=lambda ns: namespace)
pass
if isinstance(cell, Cell):
cell.set(cls)
return cls
# From Pypy 3.6
# def find_metaclass(bases, namespace, globals, builtin):
# if '__metaclass__' in namespace:
# return namespace['__metaclass__']
# elif len(bases) > 0:
# base = bases[0]
# if hasattr(base, '__class__'):
# return base.__class__
# else:
# return type(base)
# elif '__metaclass__' in globals:
# return globals['__metaclass__']
# else:
# try:
# return builtin.__metaclass__
# except AttributeError:
# return type
def calculate_metaclass(metaclass, bases):
"Determine the most derived metatype."
winner = metaclass
for base in bases:
t = type(base)
if issubclass(t, winner):
winner = t
elif not issubclass(winner, t):
raise TypeError("metaclass conflict", winner, t)
return winner
| 32.328571
| 84
| 0.650685
| 588
| 4,526
| 4.807823
| 0.363946
| 0.019809
| 0.018394
| 0.012027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011367
| 0.261379
| 4,526
| 139
| 85
| 32.561151
| 0.834281
| 0.455148
| 0
| 0.047619
| 0
| 0
| 0.092257
| 0
| 0
| 0
| 0
| 0
| 0.015873
| 1
| 0.047619
| false
| 0.015873
| 0.047619
| 0
| 0.15873
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef58bac3885ae00f40f0903957d207828fe3e0c6
| 857
|
py
|
Python
|
config/object_detection_retinanet_config.py
|
kadirtereci/Keras-retinanet-Training-on-custom-datasets-for-Object-Detection--
|
5baacf4475f3679b96ea2001994a575ec0a72bf0
|
[
"Apache-2.0"
] | null | null | null |
config/object_detection_retinanet_config.py
|
kadirtereci/Keras-retinanet-Training-on-custom-datasets-for-Object-Detection--
|
5baacf4475f3679b96ea2001994a575ec0a72bf0
|
[
"Apache-2.0"
] | null | null | null |
config/object_detection_retinanet_config.py
|
kadirtereci/Keras-retinanet-Training-on-custom-datasets-for-Object-Detection--
|
5baacf4475f3679b96ea2001994a575ec0a72bf0
|
[
"Apache-2.0"
] | null | null | null |
# import the necessary packages
import os
# Set the dataset base path here
BASE_PATH = "/content/Keras-retinanet-Training-on-custom-datasets-for-Object-Detection--/dataset"
# build the path to the annotations and input images
ANNOT_PATH = os.path.sep.join([BASE_PATH, 'annotations'])
IMAGES_PATH = os.path.sep.join([BASE_PATH, 'images'])
# degine the training/testing split
# If you have only training dataset then put here TRAIN_TEST_SPLIT = 1
TRAIN_TEST_SPLIT = 0.80
# build the path to the output training and test .csv files
TRAIN_CSV = os.path.sep.join([BASE_PATH, 'train.csv'])
TEST_CSV = os.path.sep.join([BASE_PATH, 'test.csv'])
# build the path to the output classes CSV files
CLASSES_CSV = os.path.sep.join([BASE_PATH, 'classes.csv'])
# build the path to the output predictions dir
OUTPUT_DIR = os.path.sep.join([BASE_PATH, 'predictions'])
| 35.708333
| 97
| 0.757293
| 142
| 857
| 4.450704
| 0.34507
| 0.101266
| 0.085443
| 0.123418
| 0.371835
| 0.344937
| 0.275316
| 0
| 0
| 0
| 0
| 0.005355
| 0.128355
| 857
| 23
| 98
| 37.26087
| 0.840696
| 0.425904
| 0
| 0
| 0
| 0.111111
| 0.287785
| 0.171843
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef59c84efb2830bb4da68800485a32f52a474ab9
| 14,738
|
py
|
Python
|
src/c4/cmany/cmake.py
|
biojppm/cmany
|
b20c24169d60077122ae29a0c09526913340fd5c
|
[
"MIT"
] | 20
|
2017-05-17T18:43:08.000Z
|
2021-02-13T16:20:53.000Z
|
src/c4/cmany/cmake.py
|
biojppm/cmany
|
b20c24169d60077122ae29a0c09526913340fd5c
|
[
"MIT"
] | 8
|
2017-06-04T17:01:06.000Z
|
2022-03-17T12:43:32.000Z
|
src/c4/cmany/cmake.py
|
biojppm/cmany
|
b20c24169d60077122ae29a0c09526913340fd5c
|
[
"MIT"
] | 1
|
2017-06-04T13:09:19.000Z
|
2017-06-04T13:09:19.000Z
|
import re
import os
from collections import OrderedDict as odict
from .conf import USER_DIR
from .util import cacheattr, setcwd, runsyscmd, logdbg
from . import util
from . import err
_cache_entry = r'^(.*?)(:.*?)=(.*)$'
def hascache(builddir):
c = os.path.join(builddir, 'CMakeCache.txt')
if os.path.exists(c):
return c
return None
def setcachevar(builddir, var, value):
setcachevars(builddir, odict([(var, value)]))
def getcachevar(builddir, var):
v = getcachevars(builddir, [var])
return v[var]
def setcachevars(builddir, varvalues):
with setcwd(builddir, silent=True):
with open('CMakeCache.txt', 'r') as f:
ilines = f.readlines()
olines = []
for l in ilines:
for k, v in varvalues.items():
if l.startswith(k + ':'):
n = re.sub(_cache_entry, r'\1\2=' + v, l)
l = n
olines.append(l)
with open('CMakeCache.txt', 'w') as f:
f.writelines(olines)
def getcachevars(builddir, varlist):
vlist = [v + ':' for v in varlist]
values = odict()
with setcwd(builddir, silent=True):
with open('CMakeCache.txt') as f:
for line in f:
for v in vlist:
if line.startswith(v):
ls = line.strip()
vt = re.sub(_cache_entry, r'\1', ls)
values[vt] = re.sub(_cache_entry, r'\3', ls)
return values
def loadvars(builddir):
"""if builddir does not exist or does not have a cache, returns an
empty odict"""
v = odict()
if builddir is None or not os.path.exists(builddir):
return v
c = os.path.join(builddir, 'CMakeCache.txt')
if os.path.exists(c):
with open(c, 'r') as f:
for line in f:
# logdbg("loadvars0", line.strip())
if not re.match(_cache_entry, line):
continue
ls = line.strip()
name = re.sub(_cache_entry, r'\1', ls)
vartype = re.sub(_cache_entry, r'\2', ls)[1:]
value = re.sub(_cache_entry, r'\3', ls)
# logdbg("loadvars1", name, vartype, value)
v[name] = CMakeCacheVar(name, value, vartype)
return v
# -----------------------------------------------------------------------------
class CMakeCache(odict):
def __init__(self, builddir=None):
super().__init__(loadvars(builddir))
self.dirty = False
self.cache_file = None
if builddir:
self.cache_file = os.path.join(builddir, 'CMakeCache.txt')
def __eq__(self, other):
"""code quality checkers complain that this class adds attributes
without overriding __eq__. So just fool them!"""
return super().__init__(other)
def getvars(self, names):
out = odict()
for n in names:
v = self.get(n)
out[n] = v
return out
def b(self, name, val, **kwargs):
"""set a boolean"""
return self.setvar(name, val, "BOOL", **kwargs)
def s(self, name, val, **kwargs):
"""set a string"""
return self.setvar(name, val, "STRING", **kwargs)
def p(self, name, val, **kwargs):
"""set a path to a dir"""
if util.in_windows():
val = re.sub(r'\\', r'/', val)
return self.setvar(name, val, "PATH", **kwargs)
def f(self, name, val, **kwargs):
"""set a path to a file"""
if util.in_windows():
val = re.sub(r'\\', r'/', val)
return self.setvar(name, val, "FILEPATH", **kwargs)
def i(self, name, val, **kwargs):
"""set a cmake internal var"""
return self.setvar(name, val, "INTERNAL", **kwargs)
def setvar(self, name, val, vartype=None, **kwargs):
v = self.get(name)
if v is not None:
changed = v.reset(val, vartype, **kwargs)
self.dirty |= changed
return changed
else:
v = CMakeCacheVar(name, val, vartype, dirty=True, **kwargs)
self[name] = v
self.dirty = True
return True
def commit(self, builddir):
if (not self.dirty
or builddir is None
or not os.path.exists(builddir)
or not os.path.exists(os.path.join(builddir, 'CMakeCache.txt'))):
return False
tmp = odict()
for _, v in self.items():
if not v.dirty:
continue
tmp[v.name] = v.val
setcachevars(builddir, tmp)
for _, v in self.items():
v.dirty = False
self.dirty = False
return True
# -------------------------------------------------------------------------
class CMakeCacheVar:
def __init__(self, name, val, vartype=None, dirty=False, from_input=False):
self.name = name
self.val = val
self.vartype = self._guess_var_type(name, val, vartype)
self.dirty = dirty
self.from_input = from_input
def _guess_var_type(self, name, val, vartype):
"""make an informed guess of the var type
@todo: add a test for this"""
if vartype is not None:
return vartype
elif val.upper() in ("ON", "OFF", "NO", "YES", "1", "0", "TRUE", "FALSE", "T", "F", "N", "Y"):
# https://cmake.org/pipermail/cmake/2007-December/018548.html
return "BOOL"
elif os.path.isfile(val) or "PATH" in name.upper():
return "FILEPATH"
elif os.path.isdir(val) or "DIR" in name.upper() or os.path.isabs(val):
return "PATH"
else:
return "STRING"
def reset(self, val, vartype='', **kwargs):
"""
:param val:
:param vartype:
:param kwargs:
force_dirty, defaults to False
from_input, defaults to None
:return:
"""
force_dirty = kwargs.get('force_dirty', False)
from_input = kwargs.get('from_input')
if from_input is not None:
self.from_input = from_input
if vartype == 'STRING' or (vartype is None and self.vartype == 'STRING'):
candidates = (val, val.strip("'"), val.strip('"'))
equal = False
for c in candidates:
if c == self.val:
equal = True
break
else:
equal = (self.val == val)
if not equal or (vartype is not None and vartype != self.vartype):
self.val = val
self.vartype = vartype if vartype is not None else self.vartype
self.dirty = True
return True
if force_dirty:
self.dirty = True
return force_dirty
def __repr__(self):
return self.name + ':' + self.vartype + '=' + self.val
def __str__(self):
return self.name + ':' + self.vartype + '=' + self.val
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class CMakeSysInfo:
"""encapsulates the results returned from
`cmake [-G <which_generator>][-T <toolset>][-A <architecture>] --system-information`.
This is used for selecting default values for system, compiler,
generator, etc."""
@staticmethod
def generator():
return cacheattr(__class__, '_generator_default',
lambda: __class__._getstr('CMAKE_GENERATOR', 'default'))
@staticmethod
def system_name(which_generator="default"):
return __class__.var('CMAKE_SYSTEM_NAME', which_generator, lambda v: v.lower())
@staticmethod
def architecture(which_generator="default"):
return __class__.var('CMAKE_SYSTEM_PROCESSOR', which_generator, lambda v: v.lower())
@staticmethod
def cxx_compiler(which_generator="default"):
return __class__.var('CMAKE_CXX_COMPILER', which_generator)
@staticmethod
def c_compiler(which_generator="default"):
return __class__.var('CMAKE_C_COMPILER', which_generator)
@staticmethod
def var(var_name, which_generator="default", transform_fn=lambda x: x):
gs = __class__._getstr
return cacheattr(__class__, '_{}_{}'.format(var_name, _genid(which_generator)),
lambda: transform_fn(gs(var_name, which_generator)))
@staticmethod
def info(which_generator="default"):
return cacheattr(__class__, '_info_' + _genid(which_generator),
lambda: __class__.system_info(which_generator))
@staticmethod
def _getstr(var_name, which_generator):
regex = r'^{} "(.*)"'.format(var_name)
for l in __class__.info(which_generator):
#logdbg(l.strip("\n"), l.startswith(var_name), var_name)
if l.startswith(var_name):
l = l.strip("\n").lstrip(" ").rstrip(" ")
#logdbg(var_name, "startswith :", l)
if re.match(regex, l):
s = re.sub(regex, r'\1', l)
#logdbg(var_name, "result: '" + s + "'")
return s
#logdbg("--------------------------------------\n", __class__.info(which_generator))
msg = "could not find variable {} in the output of `cmake --system-information -G '{}'`"
raise err.Error(msg, var_name, which_generator)
@staticmethod
def system_info(gen):
"""gen can be a string or a cmany.Generator object"""
from .generator import Generator
logdbg("CMakeSystemInfo: asked info for", gen)
p = _genid(gen)
d = os.path.join(USER_DIR, 'cmake_info', p)
p = os.path.join(d, 'info')
logdbg("CMakeSystemInfo: path=", p)
# https://stackoverflow.com/questions/7015587/python-difference-of-2-datetimes-in-months
if os.path.exists(p) and util.time_since_modification(p).months < 1:
logdbg("CMakeSystemInfo: asked info for", gen, "... found", p)
with open(p, "r") as f:
i = f.readlines()
if i:
return i
else:
logdbg("CMakeSystemInfo: info for gen", gen, "is empty...")
#
if isinstance(gen, Generator):
cmd = ['cmake'] + gen.configure_args() + ['--system-information']
logdbg("CMakeSystemInfo: from generator! '{}' ---> cmd={}".format(gen, cmd))
else:
if gen == "default" or gen == "":
logdbg("CMakeSystemInfo: default! '{}'".format(gen))
cmd = ['cmake', '--system-information']
else:
logdbg("CMakeSystemInfo: assume vs! '{}'".format(gen))
from . import vsinfo
gen = vsinfo.to_gen(gen)
if isinstance(gen, list):
cmd = ['cmake', '-G'] + gen + ['--system-information']
else:
if not (gen.startswith('vs') or gen.startswith('Visual Studio')):
raise Exception("unknown generator: {}".format(gen))
cmd = ['cmake', '-G', gen, '--system-information']
# remove export build commands as cmake reacts badly to it,
# generating an empty info string
_remove_invalid_args_from_sysinfo_cmd(cmd)
print("\ncmany: CMake information for generator '{}' was not found. Creating and storing... cmd={}".format(gen, cmd))
#
if not os.path.exists(d):
os.makedirs(d)
with setcwd(d):
out = runsyscmd(cmd, echo_output=False, capture_output=True)
logdbg("cmany: finished generating information for generator '{}'\n".format(gen), out, cmd)
out = out.strip()
if not out:
from err import InvalidGenerator
raise InvalidGenerator(gen, "for --system-information. cmd='{}'".format(cmd))
with open(p, "w") as f:
f.write(out)
i = out.split("\n")
return i
def _remove_invalid_args_from_sysinfo_cmd(cmd):
gotit = None
# remove compile commands args
for i, elm in enumerate(cmd):
if 'CMAKE_EXPORT_COMPILE_COMMANDS' in elm:
# can't strip out if compile commands is not given as one,
# because the command will become malformed when we remove
if elm not in ('-DCMAKE_EXPORT_COMPILE_COMMANDS=ON', '-DCMAKE_EXPORT_COMPILE_COMMANDS=OFF'):
raise Exception("malformed command")
gotit = i
if gotit is not None:
del cmd[gotit]
# remove architecture args
if '-A' in cmd:
i = cmd.index('-A')
del cmd[i+1]
del cmd[i]
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def _genid(gen):
from .generator import Generator
p = gen.sysinfo_name if isinstance(gen, Generator) else gen
if isinstance(gen, list): p = " ".join(p)
p = re.sub(r'[() ]', '_', p)
return p
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# def get_toolchain_cache(toolchain):
# d = os.path.join(USER_DIR, 'toolchains', re.sub(os.sep, '+', toolchain))
# logdbg("toolchain cache: USER_DIR=", USER_DIR)
# logdbg("toolchain cache: d=", d)
# bd = os.path.join(d, 'build')
# logdbg("toolchain cache: bd=", bd)
# if not os.path.exists(d):
# os.makedirs(d)
# with setcwd(d):
# with open('main.cpp', 'w') as f:
# f.write("int main() {}")
# with open('CMakeLists.txt', 'w') as f:
# f.write("""
# cmake_minimum_required(VERSION 2.6)
# project(toolchain_test)
# add_executable(main main.cpp)
# """)
# if not os.path.exists(bd):
# os.makedirs(bd)
# with setcwd(bd):
# cmd = ['cmake', '-DCMAKE_TOOLCHAIN_FILE='+toolchain, '..']
# runsyscmd(cmd, echo_output=True)
# return loadvars(bd)
def extract_toolchain_compilers(toolchain):
with open(toolchain) as f:
lines = f.readlines()
out = odict()
for l in lines:
res = re.search(r'(set|SET)\ ?\(\ ?(CMAKE_.*?_COMPILER) (.*?)\ ?\)', l)
if res:
res = res.groups()
out[res[1]] = res[2]
return out
| 36.937343
| 125
| 0.519677
| 1,670
| 14,738
| 4.449102
| 0.181437
| 0.016151
| 0.014536
| 0.012113
| 0.259219
| 0.189233
| 0.134051
| 0.11467
| 0.068371
| 0.037147
| 0
| 0.003567
| 0.296105
| 14,738
| 398
| 126
| 37.030151
| 0.712647
| 0.210544
| 0
| 0.212454
| 0
| 0
| 0.113199
| 0.014161
| 0
| 0
| 0
| 0.002513
| 0
| 1
| 0.120879
| false
| 0
| 0.040293
| 0.029304
| 0.311355
| 0.003663
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef5b7b88dd380eec142de24fd5621ee02381ea01
| 3,744
|
py
|
Python
|
RGB_extraction_maize_diversity.py
|
xiangjunli/Maize_Phenotype_Map
|
15765c1a9a58bdf5cfca5602e09e9cbe74d12b98
|
[
"BSD-3-Clause"
] | 4
|
2018-02-06T21:15:31.000Z
|
2018-07-28T14:00:17.000Z
|
RGB_extraction_maize_diversity.py
|
xiangjunli/Maize_Phenotype_Map
|
15765c1a9a58bdf5cfca5602e09e9cbe74d12b98
|
[
"BSD-3-Clause"
] | null | null | null |
RGB_extraction_maize_diversity.py
|
xiangjunli/Maize_Phenotype_Map
|
15765c1a9a58bdf5cfca5602e09e9cbe74d12b98
|
[
"BSD-3-Clause"
] | 2
|
2020-02-07T18:26:09.000Z
|
2020-10-16T15:52:56.000Z
|
import numpy as np
import cv2
import sys
import os
#######################RGB Image Data Analysis############################################################
###Should follow the data structure of image data: Genotype --> Replicates (Plants) --> Different Views --> Image captured by each Day###
# mfold defines the folder name that stores the data in our data structure
mfold = sys.argv[1]
# The ratio between pixels further zoom level and closer zoom level is 1:2.02, each pixel in closer zoom level is 0.746mm. This script generates values based on pixel counts.
# binary function is going to extract green pixels by defined threshold of (2*G)/(R+B) > 1.15
def binary(pic,upper,bottom,left,right):
mypic = []
myl = np.shape(pic)[0]
myw = np.shape(pic)[1]
x1 = left
x2 = right
y1 = upper
y2 = bottom
for iind,i in enumerate(pic):
if iind < y1 or iind > y2:
n = [0]*myw
else:
n = []
for jind,j in enumerate(i):
if j > 1.15:
if jind < x1 or jind > x2:
t = 0
else:
t = 255
else:
t = 0
n.append(t)
mypic.append(n)
mypic = np.array(mypic)
return mypic
# create a function to extract values of plant height, plant width and plant area pixel counts
def call_numeric(thresh):
hh = 0
ww = 0
aa = 0
areas = []
contours,hierarchy = cv2.findContours(thresh, 1, 2)
for c in contours:
areas.append(cv2.contourArea(c))
people = np.array(contours)
ages = np.array(areas)
inds = ages.argsort()
sortedcontours = people[inds]
cnt = sortedcontours[-1]
hull = cv2.convexHull(cnt)
x,y,w,h = cv2.boundingRect(cnt)
hh = str(h)
ww = str(w)
aa = str(cv2.contourArea(hull))
return hh,ww,aa,areas
whole = os.listdir(mfold)
# because two zoom levels were applied on the RGB images in different days, and we analyze plant images in two zoom levels
close = set([])
far = set([])
for i in range(1,27):
close.add('Day_'+str(i).zfill(3))
close.remove('Day_'+str(11).zfill(3))
for i in range(27,33):
far.add('Day_'+str(i).zfill(3))
far.add('Day_'+str(11).zfill(3))
# out is the file with extracted numeric values from RGB images
out = open('RGB_extraction.csv','w')
# create this file to trace some image files that can not load correctly to make sure the whole loop can go correctly
error = open('RGB_extraction_error.csv','w')
out.write('PlantID'+'\t'+'Date'+'\t'+'View'+'\t'+'Plant Height'+'\t'+'Plant Width'+'\t'+'Projected Plant Area'+'\n')
views = ['VIS SV 0','VIS SV 90']
for j1 in sorted(whole):
if j1 == 'Genotype_ZL022':continue
for i1 in os.listdir('{0}/{1}'.format(mfold,j1)):
for v in views:
for d1 in sorted(os.listdir('{0}/{1}/{2}/{3}/'.format(mfold,j1,i1,v))):
nlist = [i1,d1.replace('.png','')]
myview = 'View'+v.replace('VIS SV ','')
na = [myview,'NA','NA','NA']
date = d1.replace('.png','')
try:
abc = cv2.imread('{0}/{1}/{2}/{3}/{4}'.format(mfold,j1,i1,v,d1))
abc = abc.astype(np.float)
imgreen = (2*abc[:,:,1])/(abc[:,:,0]+abc[:,:,2])
if date in close:
thresh = binary(imgreen,50,1950,335,2280)
elif date in far:
thresh = binary(imgreen,50,1450,815,1780)
cv2.imwrite('test.jpg',thresh)
thresh = cv2.imread("test.jpg",cv2.CV_LOAD_IMAGE_GRAYSCALE)
h,w,area,areas0 = call_numeric(thresh)
total = max(areas0)
k = areas0.index(total)
del areas0[k]
for i in areas0:
total -= i
nlist.append(myview)
if date in far:
nlist.append(str(float(h)*2.02))
nlist.append(str(float(w)*2.02))
nlist.append(str(float(total)))
else:
nlist.append(h)
nlist.append(w)
nlist.append(total)
except:
nlist.extend(na)
error.write(j1+':'+i1+':'+v+':'+d1+'\n')
out.write('\t'.join(nlist)+'\n')
out.close()
error.close()
| 32
| 174
| 0.626603
| 610
| 3,744
| 3.82459
| 0.362295
| 0.033005
| 0.007715
| 0.024432
| 0.058294
| 0.032576
| 0
| 0
| 0
| 0
| 0
| 0.04514
| 0.18937
| 3,744
| 116
| 175
| 32.275862
| 0.723558
| 0.236111
| 0
| 0.058824
| 0
| 0
| 0.091534
| 0.008683
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.039216
| 0
| 0.078431
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef5c0e5ff1790c1367e3395cb63ad1ddf91375ef
| 4,620
|
py
|
Python
|
cgtools/skinning.py
|
tneumann/cgtools
|
8f77b6a4642fe79ac85b8449ebd3f72ea0e56032
|
[
"MIT"
] | 10
|
2019-05-02T14:08:32.000Z
|
2021-03-15T16:07:19.000Z
|
cgtools/skinning.py
|
tneumann/cgtools
|
8f77b6a4642fe79ac85b8449ebd3f72ea0e56032
|
[
"MIT"
] | null | null | null |
cgtools/skinning.py
|
tneumann/cgtools
|
8f77b6a4642fe79ac85b8449ebd3f72ea0e56032
|
[
"MIT"
] | 3
|
2019-05-02T14:08:33.000Z
|
2021-02-10T03:47:29.000Z
|
import numpy as np
from . import vector as V
def rbm_to_dualquat(rbm):
import cgkit.cgtypes as cg
q0 = cg.quat().fromMat(cg.mat3(rbm[:3,:3].T.tolist()))
q0 = q0.normalize()
q0 = np.array([q0.w, q0.x, q0.y, q0.z])
t = rbm[:3, 3]
q1 = np.array([
-0.5*( t[0]*q0[1] + t[1]*q0[2] + t[2]*q0[3]),
0.5*( t[0]*q0[0] + t[1]*q0[3] - t[2]*q0[2]),
0.5*(-t[0]*q0[3] + t[1]*q0[0] + t[2]*q0[1]),
0.5*( t[0]*q0[2] - t[1]*q0[1] + t[2]*q0[0]) ])
return np.array(q0.tolist() + q1.tolist())
def dualquats_to_rbms(blendq):
qn = blendq[:,:4]
qd = blendq[:,4:]
len2 = np.sum(qn**2, axis=1)
w, x, y, z = qn[:,0], qn[:,1], qn[:,2], qn[:,3]
t0, t1, t2, t3 = qd[:,0], qd[:,1], qd[:,2], qd[:,3]
M = np.empty((len(blendq), 4, 4))
M[:,0,0] = w*w + x*x - y*y - z*z
M[:,0,1] = 2*x*y - 2*w*z
M[:,0,2] = 2*x*z + 2*w*y
M[:,1,0] = 2*x*y + 2*w*z
M[:,1,1] = w*w + y*y - x*x - z*z
M[:,1,2] = 2*y*z - 2*w*x;
M[:,2,0] = 2*x*z - 2*w*y
M[:,2,1] = 2*y*z + 2*w*x
M[:,2,2] = w*w + z*z - x*x - y*y
M[:,0,3] = -2*t0*x + 2*w*t1 - 2*t2*z + 2*y*t3
M[:,1,3] = -2*t0*y + 2*t1*z - 2*x*t3 + 2*w*t2
M[:,2,3] = -2*t0*z + 2*x*t2 + 2*w*t3 - 2*t1*y
M[:,3] = 0
M[:,3,3] = len2
M /= len2[:,np.newaxis,np.newaxis]
return M
def dq_skinning(pts, BW, dqs):
from scipy import weave
blendq = np.sum(BW[:,:,np.newaxis] * dqs[np.newaxis], axis=1)
code = """
using namespace blitz;
float M00, M01, M02, M03;
float M10, M11, M12, M13;
float M20, M21, M22, M23;
for (int i=0; i<num_pts; i++) {
float w = blendq(i,0);
float x = blendq(i,1);
float y = blendq(i,2);
float z = blendq(i,3);
float t0 = blendq(i,4);
float t1 = blendq(i,5);
float t2 = blendq(i,6);
float t3 = blendq(i,7);
float len2 = 1. / (w*w + x*x + y*y + z*z);
M00 = (w*w + x*x - y*y - z*z) * len2;
M01 = (2*x*y - 2*w*z) * len2;
M02 = (2*x*z + 2*w*y) * len2;
M10 = (2*x*y + 2*w*z) * len2;
M11 = (w*w + y*y - x*x - z*z) * len2;
M12 = (2*y*z - 2*w*x) * len2;
M20 = (2*x*z - 2*w*y) * len2;
M21 = (2*y*z + 2*w*x) * len2;
M22 = (w*w + z*z - x*x - y*y) * len2;
M03 = (-2*t0*x + 2*w*t1 - 2*t2*z + 2*y*t3) * len2;
M13 = (-2*t0*y + 2*t1*z - 2*x*t3 + 2*w*t2) * len2;
M23 = (-2*t0*z + 2*x*t2 + 2*w*t3 - 2*t1*y) * len2;
pts_transformed(i,0) = M00 * pts(i,0) + M01 * pts(i,1) + M02 * pts(i,2) + M03;
pts_transformed(i,1) = M10 * pts(i,0) + M11 * pts(i,1) + M12 * pts(i,2) + M13;
pts_transformed(i,2) = M20 * pts(i,0) + M21 * pts(i,1) + M22 * pts(i,2) + M23;
}
"""
pts_transformed = np.empty_like(pts)
num_pts = len(blendq)
num_bws = BW.shape[1]
weave.inline(code,
["num_pts", "num_bws", "blendq", "pts_transformed", "pts", "BW"],
type_converters=weave.converters.blitz)
return pts_transformed
def dq_skinning_py(pts, BW, dqs, inverse=False):
# blend in dual quaternion space
blendq = np.sum(BW[:,:,np.newaxis] * dqs[np.newaxis], axis=1)
# convert them back to rigid body motion (4x4)
M = dualquats_to_rbms(blendq)
if inverse == True:
print(M)
M = np.array(list(map(np.linalg.inv, M)))
# transform points with final matrix
return V.dehom( np.sum(M * V.hom(pts)[:,np.newaxis,:], axis=2) )
def blend_skinning(pts, BW, rbms, method='lbs'):
"""
perform blend skinning of pts given blend weights BW and the 4x4 rigid body motions in rbms
pts should be an array of points, so the shape should be (num_points, 3)
BW should be an array of blendweights, so the shape should be (num_points, num_rbms)
where num_rbms give the number of rigid body motion parts (joints)
rbms should be an array of shape (num_rbms, 4, 4) - one rigid body motions for each column in BW
supported methods are "lbs" (linear blend skinning)
and "dq" (dual quaternion skinning)
"""
# TODO use masked arrays to accellerate?
if method == 'lbs':
transformed_pts = np.tensordot(V.hom(pts), rbms, axes=(1, 2))
if transformed_pts.shape[-1] == 4:
transformed_pts = V.dehom(transformed_pts)
return np.sum(BW[:,:,np.newaxis] * transformed_pts, axis=1)
elif method == 'dq':
rbms = np.asanyarray(rbms)
dqs = np.array(list(map(rbm_to_dualquat, rbms)))
return dq_skinning(pts, BW, dqs)
else:
raise ValueError("Unknown skinning method")
| 37.868852
| 104
| 0.515368
| 855
| 4,620
| 2.74269
| 0.181287
| 0.016205
| 0.010235
| 0.008529
| 0.219616
| 0.165458
| 0.165458
| 0.104904
| 0.0742
| 0.0742
| 0
| 0.092855
| 0.282035
| 4,620
| 121
| 105
| 38.181818
| 0.614109
| 0.146537
| 0
| 0.020408
| 0
| 0.102041
| 0.345562
| 0
| 0
| 0
| 0
| 0.008264
| 0
| 1
| 0.05102
| false
| 0
| 0.040816
| 0
| 0.153061
| 0.010204
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef5cca29cfc460b593d8a2ef7fb0d7625f148237
| 2,214
|
py
|
Python
|
methods/self_attention.py
|
uyplayer/machine_learning_notice
|
9f6c4a9a5e278321611d9be1e8fa46bf9a1bd416
|
[
"Apache-2.0"
] | 1
|
2019-12-10T12:27:33.000Z
|
2019-12-10T12:27:33.000Z
|
methods/self_attention.py
|
uyplayer/machine_learning_notice
|
9f6c4a9a5e278321611d9be1e8fa46bf9a1bd416
|
[
"Apache-2.0"
] | null | null | null |
methods/self_attention.py
|
uyplayer/machine_learning_notice
|
9f6c4a9a5e278321611d9be1e8fa46bf9a1bd416
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Team : uyplayer team
# Author: uyplayer
# Date :2019/11/20 下午4:22
# Tool :PyCharm
'''
https://blog.csdn.net/c9Yv2cf9I06K2A9E/article/details/79739287
https://msd.misuland.com/pd/13340603045208861
'''
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size # 另一种语言的词汇量
self.dropout_p = dropout_p
self.max_length = max_length
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_outputs): # forward的参数是decoder的输入
# decoder的input是另一种语言的词汇,要么是target,要么是上一个单元返回的output中概率最大的一个
# 初始的hidden用的是encoder的最后一个hidden输出
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
# 将embedded的256词向量和hidden的256词向量合在一起,变成512维向量
# 再用线性全连接变成10维(最长句子词汇数),在算softmax,看
attn_weight = F.softmax(
self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1
)
# torch.cat用于粘贴,dim=1指dim1方向粘贴
# torch.bmm是批矩阵乘操作,attention里将encoder的输出和attention权值相乘
# bmm: (1,1,10)*(1,10,256),权重*向量,得到attention向量
# unsqueeze用于插入一个维度(修改维度)
attn_applied = torch.bmm(attn_weight.unsqueeze(0),
encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]), dim=1)
return output, hidden, attn_weight
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
| 41.773585
| 88
| 0.653117
| 264
| 2,214
| 5.314394
| 0.359848
| 0.078403
| 0.099786
| 0.064148
| 0.112616
| 0.038489
| 0.038489
| 0
| 0
| 0
| 0
| 0.051795
| 0.232611
| 2,214
| 53
| 89
| 41.773585
| 0.773985
| 0.250678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0
| 0.034483
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef5e5867ee1d6b8b8d8f0bd5472d8f25ae61b5ab
| 497
|
py
|
Python
|
Aniyom Ebenezer/phase 1/python 2 basis/Day_21_Challenge_Solution/Question 6 Solution.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 6
|
2020-05-23T19:53:25.000Z
|
2021-05-08T20:21:30.000Z
|
Aniyom Ebenezer/phase 1/python 2 basis/Day_21_Challenge_Solution/Question 6 Solution.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 8
|
2020-05-14T18:53:12.000Z
|
2020-07-03T00:06:20.000Z
|
Aniyom Ebenezer/phase 1/python 2 basis/Day_21_Challenge_Solution/Question 6 Solution.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 39
|
2020-05-10T20:55:02.000Z
|
2020-09-12T17:40:59.000Z
|
"""
Write a Python program that reads a date (from 2016/1/1 to 2016/12/31) and prints the day of the date.
Jan. 1, 2016, is Friday.
Note that 2016 is a leap year.
"""
from datetime import date
print("Input month and date(separated by a single space): ")
m, d = map(int, input().split())
weeks = {1: "Monday", 2: "Tuesday", 3: "Wednesday", 4:"Thursday", 5: "Friday", 6: "Saturday", 7: "sunday"}
w = date.isoweekday(date(2016, m, d))
print("Name of the date: ", weeks[w])
#Reference: w3resources
| 33.133333
| 106
| 0.668008
| 86
| 497
| 3.860465
| 0.651163
| 0.03012
| 0.054217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084337
| 0.16499
| 497
| 15
| 107
| 33.133333
| 0.715663
| 0.366197
| 0
| 0
| 0
| 0
| 0.386364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef5e8dee6b61a5247d6e4659a6ab926d4b74a1e7
| 347
|
py
|
Python
|
test15.py
|
cherytony/test1
|
506ce4cab6f641beff817c81d7a616db29a7131d
|
[
"Apache-2.0"
] | null | null | null |
test15.py
|
cherytony/test1
|
506ce4cab6f641beff817c81d7a616db29a7131d
|
[
"Apache-2.0"
] | null | null | null |
test15.py
|
cherytony/test1
|
506ce4cab6f641beff817c81d7a616db29a7131d
|
[
"Apache-2.0"
] | null | null | null |
"""
题目描述
给定n个字符串,请对n个字符串按照字典序排列。
输入描述:
输入第一行为一个正整数n(1≤n≤1000),下面n行为n个字符串(字符串长度≤100),字符串中只含有大小写字母。
输出描述:
数据输出n行,输出结果为按照字典序排列的字符串。
示例1
输入
9
cap
to
cat
card
two
too
up
boat
boot
输出
boat
boot
cap
card
cat
to
too
two
up
"""
list = []
n = int(input())
for i in range(0, n):
s = input()
list.append(s)
list.sort()
for i in list:
print(i)
| 8.069767
| 58
| 0.674352
| 63
| 347
| 3.761905
| 0.650794
| 0.067511
| 0.050633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039286
| 0.193084
| 347
| 42
| 59
| 8.261905
| 0.796429
| 0.605187
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef5fbbee42c9df1a0ff003ab57c38b8bb1ccfe30
| 2,558
|
py
|
Python
|
0-EXP-TIRA-C10.py
|
webis-de/Luyckx2008
|
a7b2711a354a71ba326ddb1e495a8343091e4d8c
|
[
"Unlicense"
] | null | null | null |
0-EXP-TIRA-C10.py
|
webis-de/Luyckx2008
|
a7b2711a354a71ba326ddb1e495a8343091e4d8c
|
[
"Unlicense"
] | null | null | null |
0-EXP-TIRA-C10.py
|
webis-de/Luyckx2008
|
a7b2711a354a71ba326ddb1e495a8343091e4d8c
|
[
"Unlicense"
] | null | null | null |
import jsonhandler
from LuyckxFeatures import *
import timblClassification as timbl
import os
import numpy as np
from collections import Counter
def parseC10(c10_path):
jsonhandler.loadJson(c10_path)
jsonhandler.loadTraining()
candidates = jsonhandler.candidates
unknowns = jsonhandler.unknowns
files = list()
for cand in candidates:
for fileName in jsonhandler.trainings[cand]:
files.append('%s/%s/%s' % (c10_path, cand, fileName) )
for unknown in unknowns:
files.append('%s/unknown/%s' % (c10_path, unknown) )
parseCorpus(files)
dictPath = "c10"
jsonhandler.loadJson(dictPath)
jsonhandler.loadTraining()
candidates = jsonhandler.candidates
unknowns = jsonhandler.unknowns
authors = list()
uAuthors = list()
for cand in candidates:
a = author(cand)
for fileName in jsonhandler.trainings[cand]:
fName = '%s/%s/%s' % (dictPath, cand, fileName)
pName = '%s/%s/%s' % (dictPath, cand, os.path.splitext(fileName)[0] + '.mbsp')
a.addDoc(fName, pName)
authors.append(a)
for unknown in unknowns:
fName = '%s/unknown/%s' % (dictPath, unknown)
pName = '%s/unknown/%s' % (dictPath, os.path.splitext(unknown)[0] + '.mbsp')
a = author(os.path.splitext(unknown)[0])
a.addDoc(fName, pName)
uAuthors.append(a)
docs = getAllDocuments(authors + uAuthors)
globalFeatures = dict.fromkeys((docs[0].features.keys()))
accuracy = dict.fromkeys((docs[0].features.keys()))
predict = dict.fromkeys((docs[0].features.keys()))
for idk, key in enumerate(globalFeatures.keys()):
globalFeatures[key] = globalFeature(key, docs)
train_fName = '%s/%s_training.c5' % (dictPath, key)
test_fName = '%s/%s_test.c5' % (dictPath, key)
exportC5(getAllDocuments(authors), authors, globalFeatures[key], 50, train_fName)
exportC5(getAllDocuments(uAuthors), uAuthors, globalFeatures[key], 50, test_fName)
noFeatures = len(Counter(globalFeatures[key].chi2).most_common(50))
predict[key] = timbl.classify(train_fName, test_fName, noFeatures)
os.remove(train_fName)
os.remove(test_fName)
# jsonhandler.storeJson(unknowns, predict)
jsonhandler.loadGroundTruth()
with open('%s/results' % dictPath, 'w') as rHandle:
for key in globalFeatures.keys():
cMatrix = timbl.confusionMatrix(jsonhandler.trueAuthors, predict[key])
accuracy[key] = np.sum(np.diag(cMatrix)) / np.sum(cMatrix)
rHandle.write('%s \t %.4f \n' % (key, accuracy[key]))
| 38.179104
| 86
| 0.670837
| 303
| 2,558
| 5.613861
| 0.277228
| 0.009406
| 0.005291
| 0.029982
| 0.260435
| 0.189888
| 0.095238
| 0.095238
| 0
| 0
| 0
| 0.014507
| 0.191556
| 2,558
| 66
| 87
| 38.757576
| 0.808027
| 0.015637
| 0
| 0.241379
| 0
| 0
| 0.051731
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017241
| false
| 0
| 0.103448
| 0
| 0.12069
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef6043c616af761fa9470ba29ff276fd15c95e0d
| 3,133
|
py
|
Python
|
bus.py
|
resc863/Kakao_Chatbot
|
fe4a038de323ad733cd49e69c7ceb283a36bef0c
|
[
"MIT"
] | 1
|
2020-08-01T13:42:26.000Z
|
2020-08-01T13:42:26.000Z
|
bus.py
|
resc863/Kakao_Chatbot
|
fe4a038de323ad733cd49e69c7ceb283a36bef0c
|
[
"MIT"
] | null | null | null |
bus.py
|
resc863/Kakao_Chatbot
|
fe4a038de323ad733cd49e69c7ceb283a36bef0c
|
[
"MIT"
] | 1
|
2021-08-24T14:02:32.000Z
|
2021-08-24T14:02:32.000Z
|
from bs4 import BeautifulSoup
from multiprocessing import Pool
import requests
def lineid(lineno):
lineurl = "http://61.43.246.153/openapi-data/service/busanBIMS2/busInfo?lineno="+lineno+"&serviceKey=0XeO7nbthbiRoMUkYGGah20%2BfXizwc0A6BfjrkL6qhh2%2Fsl8j9PzfSLGKnqR%2F1v%2F%2B6AunxntpLfoB3Ryd3OInQ%3D%3D"
lineid2 = requests.get(lineurl).text
lineid1 = BeautifulSoup(lineid2, "html.parser")
lineid0 = lineid1.find('item')
lineid = lineid0.lineid.string
return lineid
def nextstop(l):
no = l[0]
lineno = l[1]
lineid1 = lineid(lineno)
url = "http://61.43.246.153/openapi-data/service/busanBIMS2/busInfoRoute?lineid="+lineid1+"&serviceKey=0XeO7nbthbiRoMUkYGGah20%2BfXizwc0A6BfjrkL6qhh2%2Fsl8j9PzfSLGKnqR%2F1v%2F%2B6AunxntpLfoB3Ryd3OInQ%3D%3D"
text = requests.get(url).text
soup = BeautifulSoup(text, "html.parser")
nextidx = 0
for item in soup.findAll('item'):
bstop = ""
if item.arsno == None:
bstop = "정보가 없습니다."
else:
bstop = item.arsno.string
curidx = int(item.bstopidx.string)
if bstop == no:
nextidx = curidx
nextidx = nextidx + 1
elif curidx == nextidx:
nextstop = item.bstopnm.string
return nextstop
def getinfo(x):
bus1="186190402"
bus2="186210101"
url1 = 'http://61.43.246.153/openapi-data/service/busanBIMS2/stopArr?serviceKey=ExhrDuBJZ28eMHPRIyFToDuqoT1Lx3ViPoI3uKVLS%2FyucnbaLbQISs4%2FSJWf0AzAV1gkbbtZK5GWvO9clF%2B1aQ%3D%3D&bstopid='+bus1
url2 = 'http://61.43.246.153/openapi-data/service/busanBIMS2/stopArr?serviceKey=ExhrDuBJZ28eMHPRIyFToDuqoT1Lx3ViPoI3uKVLS%2FyucnbaLbQISs4%2FSJWf0AzAV1gkbbtZK5GWvO9clF%2B1aQ%3D%3D&bstopid='+bus2
if x == '0':
html = requests.get(url1).text
else:
html = requests.get(url2).text
return html
def process(b):
result = b.lineno.string + "번 버스" + "\n"
lineno = b.lineno.string
if b.arsno == None:
no = "정보가 없습니다."
else:
no = b.arsno.string
if no == "정보가 없습니다":
nextstop1 = None
else:
l = [no, lineno]
nextstop1 = nextstop(l)
if nextstop1 == None:
result = result + "다음역: 정보가 없습니다.\n"
else:
result = result + "다음역:" + nextstop1 + "\n"
if b.min1==None:
result = result + "현재 최근버스시간이 존재하지않습니다.\n\n"
else:
result = result + b.min1.string + "분 뒤 도착" + "\n\n"
return result
def bus():
result = "양운고 앞 대림1차아파트 정보\n\n"
pool = Pool(processes=2)
html = pool.map(getinfo, '0')[0]
print("00000")
html1 = pool.map(getinfo, '1')[0]
print("22222")
soup = BeautifulSoup(html, "html.parser")
soup1 = BeautifulSoup(html1, "html.parser")
item=soup.findAll('item')
for b in item:
r = process(b)
result = result + r
print("111111")
result = result + "\n\n"
item=soup1.findAll('item')
for b in item:
r = process(b)
result = result + r
return result
if __name__ == "__main__":
print(bus())
| 27.243478
| 210
| 0.616981
| 363
| 3,133
| 5.30303
| 0.30303
| 0.043636
| 0.016623
| 0.022857
| 0.362597
| 0.362597
| 0.362597
| 0.362597
| 0.362597
| 0.20987
| 0
| 0.08144
| 0.255346
| 3,133
| 114
| 211
| 27.482456
| 0.743678
| 0
| 0
| 0.166667
| 0
| 0.047619
| 0.301309
| 0.072774
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059524
| false
| 0
| 0.035714
| 0
| 0.154762
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef60ce6fc063e157d7dfaad93f8114a633854b16
| 4,256
|
py
|
Python
|
model_training.py
|
PatriceC/MLProjectISDP2020
|
64e83824690ccde2714d915c70fb00b20aa66a42
|
[
"MIT"
] | 1
|
2021-01-23T01:04:00.000Z
|
2021-01-23T01:04:00.000Z
|
model_training.py
|
cor3ntino/Time-Series-Prediction-with-Deep-Learning-for-Road-Trafic-Data
|
e8eefdf2e630a53e09f88550357b67732f2bccd0
|
[
"MIT"
] | null | null | null |
model_training.py
|
cor3ntino/Time-Series-Prediction-with-Deep-Learning-for-Road-Trafic-Data
|
e8eefdf2e630a53e09f88550357b67732f2bccd0
|
[
"MIT"
] | 1
|
2021-01-19T16:57:27.000Z
|
2021-01-19T16:57:27.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 13:54:58 2020
@author: Patrice CHANOL & Corentin MORVAN--CHAUMEIL
"""
import numpy as np
import torch
import time
import visualisation
from datetime import datetime
def main(model, criterion, optimizer, scheduler, data_train_loader, data_test_loader, num_epochs, input_window, output_window, batch_size):
"""
Entrainement du modèle et Loss Test.
Parameters
----------
model : TYPE
DESCRIPTION. model to train
criterion : TYPE
DESCRIPTION. criterion to compute
optimizer : TYPE
DESCRIPTION.
scheduler : TYPE
DESCRIPTION.
data_loader_train : TYPE
DESCRIPTION. train set
data_loader_test : TYPE
DESCRIPTION. test set
num_epochs : TYPE
DESCRIPTION. number of epoch to compute
input_window : TYPE
DESCRIPTION. input windonw length
output_window : TYPE
DESCRIPTION. output windonw length
batch_size : TYPE
DESCRIPTION. batch_size
Returns
-------
model : TYPE
DESCRIPTION. trained model
test_loss_list : TYPE
DESCRIPTION. test loss
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dateTimeObj = datetime.now()
print('Début Entrainement : ', dateTimeObj.hour, 'H', dateTimeObj.minute)
test_loss_list = []
n_batches = len(data_train_loader)
# On va entrainer le modèle num_epochs fois
for epoch in range(1, num_epochs + 1):
# Temps epoch
epoch_start_time = time.time()
dateTimeObj = datetime.now()
print('Début epoch', epoch, ':', dateTimeObj.hour, 'H', dateTimeObj.minute)
# Modèle en mode entrainement
model.train()
# Pourcentage du Dataset réaliser
pourcentage = 0.
# Loss du batch en cours
test_loss_batch = []
# Temps pour réaliser 10%
start_time = time.time()
for batch, ((day_of_week, serie_input), serie_output) in enumerate(data_train_loader):
# Initializing a gradient as 0 so there is no mixing of gradient among the batches
optimizer.zero_grad()
# Forward pass
output = model.forward(day_of_week.to(device), serie_input.float().to(device))
loss = criterion(output, serie_output.float().to(device))
# Propagating the error backward
loss.backward()
# Normalisation des gradients si Transformer
if model.name_model == 'Transformer':
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.7)
# Optimizing the parameters
optimizer.step()
# Pourcentage réel réaliser
count_pourcentage = batch / n_batches
# Si on a réalisé 10% nouveau du Dataset, on test
if count_pourcentage >= pourcentage:
# Temps des 10%
T = time.time() - start_time
# Evaluation du modèel
model.eval()
with torch.no_grad():
for ((day_of_week_t, serie_input_t), serie_output_t) in data_test_loader:
output_t = model.forward(day_of_week_t.to(device), serie_input_t.float().to(device))
loss_t = criterion(output_t, serie_output_t.float().to(device))
test_loss_batch.append(loss_t.item())
test_loss = np.mean(test_loss_batch)
test_loss_list.append(test_loss)
print('-'*10)
print("Pourcentage: {}%, Test Loss : {}, Epoch: {}, Temps : {}s".format(round(100*pourcentage), test_loss, epoch, round(T)))
print('-'*10)
# Visualisation
visualisation.pred_vs_reality(model, input_window, output_window, epoch=epoch, pourcentage=round(100*pourcentage))
pourcentage += 0.1
start_time = time.time()
model.train()
print('Fin epoch : {}, Temps de l\'epoch : {}s'.format(epoch, round(time.time() - epoch_start_time)))
visualisation.forecast(model, input_window, output_window, epoch=epoch)
scheduler.step()
model.save()
return model, test_loss_list
| 34.322581
| 140
| 0.608083
| 493
| 4,256
| 5.06288
| 0.320487
| 0.072115
| 0.019231
| 0.027644
| 0.099359
| 0.030449
| 0.030449
| 0
| 0
| 0
| 0
| 0.012387
| 0.298167
| 4,256
| 123
| 141
| 34.601626
| 0.823234
| 0.294878
| 0
| 0.16
| 0
| 0
| 0.048438
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.1
| 0
| 0.14
| 0.12
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef61b3b08001b19237e5f7463a25cc96b621c9fe
| 3,679
|
py
|
Python
|
process_data.py
|
johnnyp2587/fx-drqn
|
0ea8a4ad673a1883dd4630a69629c75c8f49148c
|
[
"MIT"
] | 1
|
2021-01-30T11:50:54.000Z
|
2021-01-30T11:50:54.000Z
|
process_data.py
|
johnnyp2587/fx-drqn
|
0ea8a4ad673a1883dd4630a69629c75c8f49148c
|
[
"MIT"
] | null | null | null |
process_data.py
|
johnnyp2587/fx-drqn
|
0ea8a4ad673a1883dd4630a69629c75c8f49148c
|
[
"MIT"
] | 2
|
2021-01-30T11:50:57.000Z
|
2021-02-04T15:43:54.000Z
|
import numpy as np
import pandas as pd
import datetime
def gen_cols(Pad, cur, lag):
currency = list(np.sort(Pad['currency pair'].unique()))
tmp = Pad[Pad['currency pair'] == cur].sort_values(by=['timestamp'])
for i in range(1,lag+1):
colname1 = 'bid_lag_' + str(i)
colname2 = 'ask_lag_' + str(i)
tmp[colname1] = np.log(tmp['bid price']) - np.log(tmp['bid price'].shift(i))
tmp[colname2] = np.log(tmp['ask price']) - np.log(tmp['ask price'].shift(i))
for ccy in currency:
if ccy == cur:
pass
else:
_tmp = Pad[Pad['currency pair'] == ccy].sort_values(by=['timestamp'])
mid = pd.DataFrame(np.mean(np.asarray([_tmp['bid price'].values,_tmp['ask price'].values]), axis=0))
for i in range(1,lag+1):
colname3 = ccy + '_lag_' + str(i)
tmp[colname3] = np.log(mid) - np.log(mid.shift(i))
tmp['date'] = tmp['timestamp'].astype(str).str[0:10]
tmp['dow'] = pd.to_datetime(tmp['date']).dt.dayofweek
tmp['hh'] = tmp['timestamp'].astype(str).str[11:13]
tmp['mm'] = tmp['timestamp'].astype(str).str[14:16]
tmp['ss'] = tmp['timestamp'].astype(str).str[17:19]
tmp['time_1'] = np.sin(np.pi*tmp['dow'].values/7)
tmp['time_2'] = np.sin(np.pi*tmp['hh'].astype('int64').values/24)
tmp['time_3'] = np.sin(np.pi*tmp['mm'].astype('int64').values/60)
tmp['time_4'] = np.sin(np.pi*tmp['ss'].astype('int64').values/60)
tmp = tmp.drop(['date', 'dow','hh','mm','ss'], axis=1)
tmp = tmp.reset_index(drop=True)
tmp = tmp[lag:]
return tmp
def CreateFeature(cur, lag, week_num):
date_list = ['0201','0203','0204','0205',
'0206','0207','0208','0210',
'0211','0212','0213','0214',
'0215','0217','0218','0219',
'0220','0221','0222','0224',
'0225','0226','0227','0228','0301']
train_week_1 = date_list[0:4]
train_week_2 = date_list[4:8]
train_week_3 = date_list[8:12]
train_week_4 = date_list[12:16]
train_week_5 = date_list[16:20]
eval_week_1 = date_list[4:6]
eval_week_2 = date_list[8:10]
eval_week_3 = date_list[12:14]
eval_week_4 = date_list[16:18]
eval_week_5 = date_list[20:22]
if week_num == 1:
train_week = train_week_1
eval_week = eval_week_1
elif week_num == 2:
train_week = train_week_2
eval_week = eval_week_2
elif week_num == 3:
train_week = train_week_3
eval_week = eval_week_3
elif week_num == 4:
train_week = train_week_4
eval_week = eval_week_4
elif week_num == 5:
train_week = train_week_5
eval_week = eval_week_5
Pad_train = None
Pad_eval = None
for train_date in train_week:
filename = '../pad/pad-' + train_date + '.csv'
tmp = pd.read_csv(filename)
if Pad_train is not None:
Pad_train = Pad_train.append(tmp)
else:
Pad_train = tmp
final_train = gen_cols(Pad_train,cur,lag)
trainname = './data/train_' + cur + '_lag_' + str(lag) + '_week' + str(week_num) + '.csv'
final_train.to_csv(trainname,index=False)
for eval_date in eval_week:
filename = '../pad/pad-' + eval_date + '.csv'
tmp = pd.read_csv(filename)
if Pad_eval is not None:
Pad_eval = Pad_eval.append(tmp)
else:
Pad_eval = tmp
final_eval = gen_cols(Pad_eval,cur,lag)
evalname = './data/eval_' + cur + '_lag_' + str(lag) + '_week' + str(week_num) + '.csv'
final_eval.to_csv(evalname,index=False)
if __name__=='__main__':
CreateFeature('EURUSD', 16, 1)
| 37.927835
| 113
| 0.580864
| 559
| 3,679
| 3.588551
| 0.232558
| 0.071785
| 0.034895
| 0.044865
| 0.228315
| 0.081755
| 0.081755
| 0.065803
| 0.065803
| 0.033898
| 0
| 0.073726
| 0.247893
| 3,679
| 96
| 114
| 38.322917
| 0.651247
| 0
| 0
| 0.078652
| 0
| 0
| 0.120413
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022472
| false
| 0.011236
| 0.033708
| 0
| 0.067416
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef625fbf84f8e46aa31c085f3762960c2186790e
| 3,863
|
py
|
Python
|
benchmark.py
|
tgisaturday/minGPT
|
3ff862f7fac8adbc3dcdf0693d996468fd4c3f7b
|
[
"MIT"
] | null | null | null |
benchmark.py
|
tgisaturday/minGPT
|
3ff862f7fac8adbc3dcdf0693d996468fd4c3f7b
|
[
"MIT"
] | null | null | null |
benchmark.py
|
tgisaturday/minGPT
|
3ff862f7fac8adbc3dcdf0693d996468fd4c3f7b
|
[
"MIT"
] | null | null | null |
import math
import os
from argparse import ArgumentParser
import numpy as np
import torch
from pytorch_lightning import Trainer
from pytorch_lightning import seed_everything
from pytorch_lightning.utilities import rank_zero_info
from pytorch_lightning.callbacks import XLAStatsMonitor
from torch.utils.data import Dataset, DataLoader
from pytorch_lightning import LightningDataModule
from mingpt.lr_decay import LearningRateDecayCallback
from mingpt.model import GPT
class CharDataset(Dataset):
def __init__(self, data, block_size):
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
rank_zero_info('data has %d characters, %d unique.' % (data_size, vocab_size))
self.stoi = {ch: i for i, ch in enumerate(chars)}
self.itos = {i: ch for i, ch in enumerate(chars)}
self.block_size = block_size
self.vocab_size = vocab_size
self.data = data
def __len__(self):
return math.ceil(len(self.data) / (self.block_size + 1))
def __getitem__(self, idx):
# we're actually going to "cheat" and pick a spot in the dataset at random
i = np.random.randint(0, len(self.data) - (self.block_size + 1))
chunk = self.data[i:i + self.block_size + 1]
dix = [self.stoi[s] for s in chunk]
x = torch.tensor(dix[:-1], dtype=torch.long)
y = torch.tensor(dix[1:], dtype=torch.long)
return x, y
class CharDataModule(LightningDataModule):
def __init__(self, batch_size, num_workers, block_size):
super().__init__()
self.batch_size = batch_size
self.num_workers = num_workers
self.block_size = block_size
def setup(self, stage=None):
if not os.path.exists("input.txt"):
os.system("wget https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt")
# you can download this file at https://github.com/karpathy/char-rnn/blob/master/data/tinyshakespeare/input.txt
text = open('input.txt', 'r').read() # don't worry we won't run out of file handles
self.train_dataset = CharDataset(text, self.block_size) # one line of poem is roughly 50 characters
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers)
if __name__ == '__main__':
seed_everything(42)
parser = ArgumentParser()
parser = Trainer.add_argparse_args(parser)
parser.add_argument('--n_layer', default=22, type=int)
parser.add_argument('--n_head', default=16, type=int)
parser.add_argument('--n_embd', default=720, type=int)
parser.add_argument('--learning_rate', default=6e-4, type=float)
parser.add_argument('--block_size', default=128, type=int)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--num_workers', default=16, type=int)
args = parser.parse_args()
if not os.path.exists("input.txt"):
os.system("wget https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt")
dm = CharDataModule(args.batch_size, args.num_workers, args.block_size)
dm.setup()
model = GPT(
vocab_size=dm.train_dataset.vocab_size,
block_size=dm.train_dataset.block_size,
n_layer=args.n_layer,
n_head=args.n_head,
n_embd=args.n_embd,
learning_rate=args.learning_rate
)
lr_decay = LearningRateDecayCallback(
learning_rate=6e-4,
warmup_tokens=512 * 20,
final_tokens=2 * len(dm.train_dataset) * args.block_size
)
trainer = Trainer.from_argparse_args(
args,
max_epochs=5,
tpu_cores=8,
gradient_clip_val=1.0,
callbacks=[lr_decay, XLAStatsMonitor()],
)
trainer.fit(model, datamodule = dm )
| 36.443396
| 119
| 0.681077
| 535
| 3,863
| 4.700935
| 0.330841
| 0.053678
| 0.047316
| 0.031809
| 0.249304
| 0.17336
| 0.153479
| 0.089861
| 0.089861
| 0.089861
| 0
| 0.012426
| 0.208387
| 3,863
| 105
| 120
| 36.790476
| 0.810007
| 0.069635
| 0
| 0.073171
| 0
| 0.02439
| 0.093341
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0.158537
| 0.02439
| 0.292683
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef62a93780f5d22fd2c5c963cb04b78649fda229
| 2,059
|
py
|
Python
|
weather.py
|
corgiclub/CorgiBot_telegram
|
a63d91a74ee497b9a405e93bd3b303367ef95268
|
[
"MIT"
] | null | null | null |
weather.py
|
corgiclub/CorgiBot_telegram
|
a63d91a74ee497b9a405e93bd3b303367ef95268
|
[
"MIT"
] | null | null | null |
weather.py
|
corgiclub/CorgiBot_telegram
|
a63d91a74ee497b9a405e93bd3b303367ef95268
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*
import requests
import json
def get_weather(city: str) -> json:
req = requests.get("https://free-api.heweather.net/s6/weather?location="
"{}&key=89d6bbc3861844d59a6313c16448d293".format(city))
json_data = json.loads(req.text, encoding="UTF8")
return json_data
def get_info(city: str):
try:
resp = get_weather(city)
resp_basic = resp['HeWeather6'][0]['basic']
resp_update = resp['HeWeather6'][0]['update']
resp_now = resp['HeWeather6'][0]['now']
# resp_hourly = resp['HeWeather6'][0]['hourly']
resp_daily_forecast = resp['HeWeather6'][0]['daily_forecast']
resp_today = resp_daily_forecast[0]
resp_tomorrow = resp_daily_forecast[1]
status = resp['HeWeather6'][0]['status']
str_weather = ""
str_weather += "当前城市:{area}-{city}-{loc}\n".format(
area=resp_basic['admin_area'], city=resp_basic['parent_city'], loc=resp_basic['location'])
str_weather += "当前时间:{}\n".format(resp_update['loc'])
str_weather += "当前天气:{},温度:{}℃,体感温度:{}℃\n".format(resp_now['cond_txt'], resp_now['tmp'], resp_now['fl'])
str_weather += \
"今日天气:{d},温度:{min}~{max}℃ 风力:{sc}级 相对湿度:{hum}% 降水概率:{pop}% 紫外线强度:{uv}\n". \
format(d=resp_today['cond_txt_d'], min=resp_today['tmp_min'], max=resp_today['tmp_max'],
sc=resp_today['wind_sc'], hum=resp_today['hum'],
pop=resp_today['pop'], uv=resp_today['uv_index'])
str_weather += "明日天气:{d},温度:{min}~{max}℃ 风力:{sc}级 相对湿度:{hum}% 降水概率:{pop}% 紫外线强度:{uv}\n". \
format(d=resp_tomorrow['cond_txt_d'], min=resp_tomorrow['tmp_min'], max=resp_tomorrow['tmp_max'],
sc=resp_tomorrow['wind_sc'], hum=resp_tomorrow['hum'],
pop=resp_tomorrow['pop'], uv=resp_tomorrow['uv_index'])
str_weather += "NM$L天气预报播报完毕"
except Exception as e:
print(f"Exception: {e}")
status = -1
str_weather = None
return status, str_weather
| 44.76087
| 112
| 0.594463
| 280
| 2,059
| 4.175
| 0.3
| 0.076989
| 0.076989
| 0.015398
| 0.109495
| 0.083832
| 0.083832
| 0.083832
| 0.083832
| 0.083832
| 0
| 0.026119
| 0.219038
| 2,059
| 45
| 113
| 45.755556
| 0.695896
| 0.032054
| 0
| 0
| 0
| 0.054054
| 0.270854
| 0.069347
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.054054
| 0
| 0.162162
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef63d9fcd4c7ced9c5506a721a486919e70bacc7
| 2,536
|
py
|
Python
|
paz/datasets/ferplus.py
|
niqbal996/paz
|
f27205907367415d5b21f90e1a1d1d1ce598e889
|
[
"MIT"
] | 300
|
2020-10-29T08:02:05.000Z
|
2022-03-30T21:47:32.000Z
|
paz/datasets/ferplus.py
|
albertofernandezvillan/paz
|
9fbd50b993f37e1e807297a29c6044c09967c9cc
|
[
"MIT"
] | 30
|
2020-10-29T12:40:32.000Z
|
2022-03-31T14:06:35.000Z
|
paz/datasets/ferplus.py
|
albertofernandezvillan/paz
|
9fbd50b993f37e1e807297a29c6044c09967c9cc
|
[
"MIT"
] | 62
|
2020-10-29T12:34:13.000Z
|
2022-03-29T05:21:45.000Z
|
import os
import numpy as np
from .utils import get_class_names
from ..abstract import Loader
from ..backend.image import resize_image
# IMAGES_PATH = '../datasets/fer2013/fer2013.csv'
# LABELS_PATH = '../datasets/fer2013/fer2013new.csv'
class FERPlus(Loader):
"""Class for loading FER2013 emotion classification dataset.
with FERPlus labels.
# Arguments
path: String. Path to directory that has inside the files:
`fer2013.csv` and `fer2013new.csv`
split: String. Valid option contain 'train', 'val' or 'test'.
class_names: String or list: If 'all' then it loads all default
class names.
image_size: List of length two. Indicates the shape in which
the image will be resized.
# References
- [FerPlus](https://www.kaggle.com/c/challenges-in-representation-\
learning-facial-expression-recognition-challenge/data)
- [FER2013](https://arxiv.org/abs/1608.01041)
"""
def __init__(self, path, split='train', class_names='all',
image_size=(48, 48)):
if class_names == 'all':
class_names = get_class_names('FERPlus')
super(FERPlus, self).__init__(path, split, class_names, 'FERPlus')
self.image_size = image_size
self.images_path = os.path.join(self.path, 'fer2013.csv')
self.labels_path = os.path.join(self.path, 'fer2013new.csv')
self.split_to_filter = {
'train': 'Training', 'val': 'PublicTest', 'test': 'PrivateTest'}
def load_data(self):
data = np.genfromtxt(self.images_path, str, '#', ',', 1)
data = data[data[:, -1] == self.split_to_filter[self.split]]
faces = np.zeros((len(data), *self.image_size))
for sample_arg, sample in enumerate(data):
face = np.array(sample[1].split(' '), dtype=int).reshape(48, 48)
face = resize_image(face, self.image_size)
faces[sample_arg, :, :] = face
emotions = np.genfromtxt(self.labels_path, str, '#', ',', 1)
emotions = emotions[emotions[:, 0] == self.split_to_filter[self.split]]
emotions = emotions[:, 2:10].astype(float)
N = np.sum(emotions, axis=1)
mask = N != 0
N, faces, emotions = N[mask], faces[mask], emotions[mask]
emotions = emotions / np.expand_dims(N, 1)
data = []
for face, emotion in zip(faces, emotions):
sample = {'image': face, 'label': emotion}
data.append(sample)
return data
| 39.015385
| 79
| 0.613565
| 318
| 2,536
| 4.764151
| 0.393082
| 0.052805
| 0.025743
| 0.033663
| 0.063366
| 0.063366
| 0
| 0
| 0
| 0
| 0
| 0.035827
| 0.251577
| 2,536
| 64
| 80
| 39.625
| 0.762381
| 0.300079
| 0
| 0
| 0
| 0
| 0.06188
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.138889
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ef651d134e566a45ca23483fc6b3987d980d24af
| 863
|
py
|
Python
|
code/array/container-with-most-water.py
|
windsuzu/leetcode-python
|
240ca747d58eb78b08dedf4d5a1fdc0fe0b0c6bf
|
[
"MIT"
] | 1
|
2021-09-29T11:05:07.000Z
|
2021-09-29T11:05:07.000Z
|
code/array/container-with-most-water.py
|
windsuzu/leetcode-python
|
240ca747d58eb78b08dedf4d5a1fdc0fe0b0c6bf
|
[
"MIT"
] | null | null | null |
code/array/container-with-most-water.py
|
windsuzu/leetcode-python
|
240ca747d58eb78b08dedf4d5a1fdc0fe0b0c6bf
|
[
"MIT"
] | 1
|
2021-09-29T11:06:32.000Z
|
2021-09-29T11:06:32.000Z
|
from typing import List
class Solution:
def maxArea(self, height: List[int]) -> int:
# We can create "left" and "right" pointers
# the initial width between "l" and "r" is already the maximum
l, r = 0, len(height) - 1
width = r - l
# We can use greedy method to move the lower line to the next line
# For example, if height[l] < height[r], then we move "l" to "l+1"
# if height[l] > height[r], then we move "r" to "r-1"
# if they are the same, then it's ok to move either one
res = 0
while l < r:
res = max(res, width * min(height[l], height[r]))
if height[l] <= height[r]:
l += 1
else:
r -= 1
width -= 1
return res
| 30.821429
| 74
| 0.468134
| 121
| 863
| 3.338843
| 0.46281
| 0.069307
| 0.128713
| 0.138614
| 0.168317
| 0.128713
| 0.128713
| 0.128713
| 0
| 0
| 0
| 0.016327
| 0.432213
| 863
| 28
| 75
| 30.821429
| 0.808163
| 0.391657
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|