hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
de9373d0df66278e0b02dc262104db37303b9a61
| 3,806
|
py
|
Python
|
server-program/clientApplication.py
|
ezequias2d/projeto-so
|
993f3dd12135946fe5b4351e8488b7aa8a18f37e
|
[
"MIT"
] | null | null | null |
server-program/clientApplication.py
|
ezequias2d/projeto-so
|
993f3dd12135946fe5b4351e8488b7aa8a18f37e
|
[
"MIT"
] | null | null | null |
server-program/clientApplication.py
|
ezequias2d/projeto-so
|
993f3dd12135946fe5b4351e8488b7aa8a18f37e
|
[
"MIT"
] | null | null | null |
import socket
import tokens
import connection
import io
import os
from PIL import Image
from message.literalMessage import LiteralMessage
from baseApplication import BaseApplication
class ClientApplication(BaseApplication):
def __init__(self, host, port):
super().__init__(host, port, tokens.CLIENT_TOKEN)
def show_image_file_from_storage(self):
filename = input("Filename:")
file = self.get_file(filename)
img = Image.open(io.BytesIO(file))
img.show()
def see_files_in_storage(self):
files = self.get_files_from_storage()
for filename in files:
print(filename)
def send_file_to_storage(self):
filename = input("Filename:")
self.send_file(filename)
def send_job(self, token):
filename = input("Filename:")
dstfilename = input("Destination filename:")
self.send_literal(token)
self.send_literal(filename)
self.send_literal(dstfilename)
messageToken = self.receive_message().value
message = self.receive_message().value
if messageToken == tokens.INFO_MESSAGE or messageToken == tokens.ERROR_MESSAGE:
print(message)
def remove_file(self):
filename = input("Filename:")
self.send_literal(tokens.REMOVE_FILE)
self.send_literal(filename)
result = self.receive_message(True, 1.0)
if result is not None:
if result.value == tokens.ERROR_MESSAGE or result.value == tokens.INFO_MESSAGE:
message = self.receive_message().value
print(message)
def see_a_logfile(self):
files = [logfile for logfile in self.get_files_from_storage() if os.path.splitext(logfile)[1].lower() == '.log']
count = 0
for logfile in files:
print('{} - {}'.format(count, logfile))
count += 1
index = int(input('Index:'))
filename = files[index]
file = self.get_file(filename)
file = io.BytesIO(file).read()
print('Log:')
print(file.decode('UTF-8'))
def print_commands(self):
print('Commands:')
print('0 - Exit')
print('1 - Flip Image Horizontal')
print('2 - Flip Image Vertical')
print('3 - Rotate Image 90.')
print('4 - Rotate Image 180.')
print('5 - Rotate Image 270.')
print('6 - See Files in Storage.')
print('7 - Send File to Storage.')
print('8 - Show Image File from Storage.')
print('9 - Remove File from Storage.')
print('10 - See a logfile.')
def menu(self):
while not self.is_closed():
self.print_commands()
cmd = int(input("Cmd>"))
if cmd == 0:
self.close()
elif cmd == 1:
self.send_job(tokens.JOB_FLIP_HORIZONTAL)
elif cmd == 2:
self.send_job(tokens.JOB_FLIP_VERTICAL)
elif cmd == 3:
self.send_job(tokens.JOB_ROTATE_90)
elif cmd == 4:
self.send_job(tokens.JOB_ROTATE_180)
elif cmd == 5:
self.send_job(tokens.JOB_ROTATE_270)
elif cmd == 6:
self.see_files_in_storage()
elif cmd == 7:
self.send_file_to_storage()
elif cmd == 8:
self.show_image_file_from_storage()
elif cmd == 9:
self.remove_file()
elif cmd == 10:
self.see_a_logfile()
host = input('Host: ')
ClientApplication(host, 50007)
| 34.288288
| 121
| 0.547031
| 423
| 3,806
| 4.739953
| 0.224586
| 0.04788
| 0.037406
| 0.042394
| 0.223441
| 0.095761
| 0
| 0
| 0
| 0
| 0
| 0.020639
| 0.350762
| 3,806
| 111
| 122
| 34.288288
| 0.790773
| 0
| 0
| 0.126316
| 0
| 0
| 0.094942
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094737
| false
| 0
| 0.084211
| 0
| 0.189474
| 0.210526
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de95cb380efb4a5351375e80063db451dd2899b5
| 3,803
|
py
|
Python
|
TkPy/module.py
|
tbor8080/pyprog
|
3642b9af2a92f7369d9b6fa138e47ba22df3271c
|
[
"MIT"
] | null | null | null |
TkPy/module.py
|
tbor8080/pyprog
|
3642b9af2a92f7369d9b6fa138e47ba22df3271c
|
[
"MIT"
] | null | null | null |
TkPy/module.py
|
tbor8080/pyprog
|
3642b9af2a92f7369d9b6fa138e47ba22df3271c
|
[
"MIT"
] | null | null | null |
import sys
import os
import tkinter.filedialog as fd
from time import sleep
import datetime
import tkinter
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
import threading
# New File & Duplicate File Save
def saveasFilePath( filetype=[ ("",".txt"), ("CSV",".csv") ] ):
return fd.asksaveasfilename(filetypes=filetype, initialdir=os.path.abspath(os.path.dirname(__file__)))
# FileSave
def saveFile(file_name, data, encoding='utf-8'):
with open(file_name, "wt", encoding=encoding) as fp:
fp.write(data)
class PyTkTextEditor:
def __init__(self, geometory='800x600'):
# Window Geometory
self.__geometory=geometory
# Application Path
self.__appdir=os.path.abspath(os.path.dirname(__file__))
self.__fileTypes=[("*", ".txt"),("CSV", ".csv")]
# Child Objects
def getWindowSize(self):
return self.__geometory.split('x')
def __OnClick(self, e):
print(e,self)
def __onKeyPress__(self, e):# KeyPressEventHandle
# print(e.state, e.keycode, self.__root.focus_get(), e, self)
if e.state==8 and e.keycode==65651:# command + s current save
# Debug Print
# self.asSave("sample.txt", textWidget.get("1.0","end"))
if self.__root.filename=="":
self.__root.title("Untitled")
self.__root.filename=self.asSavePath(self.__fileTypes)
self.asSave(self.__root.filename, self.widget.get("1.0","end"))
elif e.state==8 and e.keycode==2949230:# commmand + n ( new open )
self.widget.insert("1.0", "未実装(command + n")
elif e.state==8 and e.keycode==2031727:# commmand + o ( open file )
self.asOpen()
elif e.state==9 and e.keycode==65651:# commmand + shift + s ( save multi )
self.__root.filename=self.asSavePath(self.__fileTypes)
self.__root.title(self.__root.filename)
self.asSave(self.__root.filename, self.widget.get("1.0","end"))
elif e.state==9 and e.keycode==2031727:# commmand + shift + o ( open file multi )
self.widget.insert("1.0", "未実装(Open + Shift + O)")
elif e.state==64 and e.keycode==7927557:# fn + F2
self.widget.insert("1.0", "未実装(fn + F2")
def windows(self):
self.__root=tkinter.Tk()
self.__root.geometry(self.__geometory)
self.__root.filename=''
self.__root.font=''
self.__root.title('Untitled')
self.__root.focus_set()
self.__root.title(self.__root.focus_get())
fonts=('Hiragino,Meiryo',32,'')
width,height=self.getWindowSize()
self.widget=tk.scrolledtext.ScrolledText(self.__root, bg="#fff", width=width, height=height)
self.widget.configure(font=fonts)
self.widget.pack()
self.__root.bind('<Key>', self.__onKeyPress__)
self.__root.mainloop()
return self.__root
def asSave(self, filename, data, encoding='utf-8'):
try:
with open(filename, "wt", encoding=encoding) as f:
f.write(data)
except FileNotFoundError:
print('FileNotFoundError')
def asSavePath(self,filetype=[("",".txt"),("CSV",".csv")]):
return fd.asksaveasfilename(filetypes=filetype, initialdir=self.__appdir)
def asOpenPath(self, filetype=[("*",".txt"),("csv",".csv")]):
return fd.askopenfilename(filetypes=filetype,initialdir=self.__appdir)
def asOpen(self):
self.__root.filename=self.asOpenPath(self.__fileTypes)
self.__root.title(self.__root.filename)
self.__root.focus_set()
text=''
with open(self.__root.filename, 'rt') as fp:
text=fp.read()
self.widget.insert("1.0", text)
| 34.261261
| 106
| 0.616618
| 464
| 3,803
| 4.838362
| 0.273707
| 0.09265
| 0.071269
| 0.080178
| 0.385301
| 0.334967
| 0.253007
| 0.181292
| 0.148775
| 0.109577
| 0
| 0.024466
| 0.236918
| 3,803
| 111
| 107
| 34.261261
| 0.749139
| 0.104391
| 0
| 0.105263
| 0
| 0
| 0.05839
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144737
| false
| 0
| 0.131579
| 0.052632
| 0.355263
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de974a6af213636bff804abc1abfb40a31e4354d
| 8,810
|
py
|
Python
|
judge/base/__init__.py
|
fanzeyi/Vulpix
|
9448e968973073c98231b22663bbebb2a452dcd7
|
[
"BSD-3-Clause"
] | 13
|
2015-03-08T11:59:28.000Z
|
2021-07-11T11:58:01.000Z
|
src/tornado/demos/Vulpix-master/judge/base/__init__.py
|
ptphp/PyLib
|
07ac99cf2deb725475f5771b123b9ea1375f5e65
|
[
"Apache-2.0"
] | null | null | null |
src/tornado/demos/Vulpix-master/judge/base/__init__.py
|
ptphp/PyLib
|
07ac99cf2deb725475f5771b123b9ea1375f5e65
|
[
"Apache-2.0"
] | 3
|
2015-05-29T16:14:08.000Z
|
2016-04-29T07:25:26.000Z
|
# -*- coding: utf-8 -*-
# AUTHOR: Zeray Rice <fanzeyi1994@gmail.com>
# FILE: judge/base/__init__.py
# CREATED: 01:49:33 08/03/2012
# MODIFIED: 15:42:49 19/04/2012
# DESCRIPTION: Base handler
import re
import time
import urllib
import hashlib
import httplib
import datetime
import functools
import traceback
import simplejson as json
from operator import itemgetter
from pygments import highlight
from pygments.lexers import CLexer
from pygments.lexers import CppLexer
from pygments.lexers import DelphiLexer
from pygments.formatters import HtmlFormatter
from sqlalchemy.exc import StatementError
from sqlalchemy.orm.exc import NoResultFound
import tornado.web
import tornado.escape
from tornado.httpclient import AsyncHTTPClient
from judge.db import Auth
from judge.db import Member
from judge.utils import _len
CODE_LEXER = {
1 : DelphiLexer,
2 : CLexer,
3 : CppLexer,
}
CODE_LANG = {
1 : "delphi",
2 : "c",
3 : "cpp",
}
def unauthenticated(method):
"""Decorate methods with this to require that user be NOT logged in"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.current_user:
if self.request.method in ("GET", "HEAD"):
self.redirect("/")
return
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class BaseHandler(tornado.web.RequestHandler):
_ = lambda self, text: self.locale.translate(text) # i18n func
xhtml_escape = lambda self, text: tornado.escape.xhtml_escape(text) if text else text # xhtml escape
def get_page_count(self, count, pre = 10):
'''Return page num by input item num'''
return count / pre + (1 if count % pre else 0)
def get_current_user(self):
'''Check user is logined'''
auth = self.get_secure_cookie("auth")
member_id = self.get_secure_cookie("uid")
member = None
if auth and member_id:
try:
auth = self.db.query(Auth).filter_by(secret = auth).filter_by(member_id = member_id).one()
except StatementError:
# for mysql session broken
self.db.rollback()
auth = self.db.query(Auth).filter_by(secret = auth).filter_by(member_id = member_id).one()
if auth:
member = self.db.query(Member).get(auth.member_id)
if member:
delta = auth.create - datetime.datetime.now()
if delta.days > 20:
""" Refresh Token """
auth.delete()
self.db.commit()
auth = Auth()
auth.member_id = member_id
auth.secret = binascii.b2a_hex(uuid.uuid4().bytes)
auth.create = datetime.datetime.now()
self.db.add(auth)
self.db.commit()
self.set_cookie('auth', auth.secret)
self.set_cookie('uid', auth.member_id)
else:
self.clear_cookie("auth")
self.clear_cookie("uid")
return member
def get_user_locale(self):
'''Get user locale, first check cookie, then browser'''
result = self.get_cookie('LANG', default = None)
if result == None:
result = self.get_browser_locale()
else:
result = tornado.locale.get(result)
return result
def sendmail(self):
'''Send mail func, send mail to someone'''
pass
def render(self, tplname, args = {}):
'''Rewrite render func for use jinja2'''
if "self" in args.keys():
args.pop("self")
tpl = self.jinja2.get_template(tplname)
ren = tpl.render(page = self, _ = self._, user = self.current_user, **args)
self.write(ren)
self.db.close()
self.finish()
def write_error(self, status_code, **kwargs):
'''Rewrite write_error for custom error page'''
if status_code == 404:
self.render("404.html")
return
elif status_code == 500:
error = []
for line in traceback.format_exception(*kwargs['exc_info']):
error.append(line)
error = "\n".join(error)
self.render("500.html", locals())
return
msg = httplib.responses[status_code]
self.render("error.html", locals())
def check_text_value(self, value, valName, required = False, max = 65535, min = 0, regex = None, regex_msg = None, is_num = False, vaild = []):
''' Common Check Text Value Function '''
error = []
if not value:
if required:
error.append(self._("%s is required") % valName)
return error
if is_num:
try:
tmp = int(value)
except ValueError:
return [self._("%s must be a number.") % valName]
else:
if vaild and tmp not in vaild:
return [self._("%s is invalid.") % valName]
return []
if _len(value) > max:
error.append(self._("%s is too long.") % valName)
elif _len(value) < min:
error.append(self._("%s is too short.") % valName)
if regex:
if not regex.match(value):
if regex_msg:
error.append(regex_msg)
else:
error.append(self._("%s is invalid.") % valName)
elif vaild and value not in vaild:
errora.append(self._("%s is invalid.") % valName)
return error
def check_username(self, usr, queryDB = False):
error = []
error.extend(self.check_text_value(usr, self._("Username"), required = True, max = 20, min = 3, \
regex = re.compile(r'^([\w\d]*)$'), \
regex_msg = self._("A username can only contain letters and digits.")))
if not error and queryDB:
try:
query = self.select_member_by_username_lower(usr.lower())
except NoResultFound:
pass
else:
error.append(self._("That username is taken. Please choose another."))
return error
def check_password(self, pwd):
return self.check_text_value(pwd, self._("Password"), required = True, max = 32, min = 6)
def check_email(self, email, queryDB = False):
error = []
error.extend(self.check_text_value(email, self._("E-mail"), required = True, max = 100, min = 3, \
regex = re.compile(r"(?:^|\s)[-a-z0-9_.+]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}(?:\s|$)", re.IGNORECASE), \
regex_msg = self._("Your Email address is invalid.")))
if not error and queryDB:
try:
query = self.select_member_by_email(email)
except NoResultFound:
pass
else:
error.append(self._("That Email is taken. Please choose another."))
return error
def get_gravatar_url(self, email):
gravatar_id = hashlib.md5(email.lower()).hexdigest()
return "http://www.gravatar.com/avatar/%s?d=mm" % (gravatar_id)
def post_to_judger(self, query, judger, callback = None):
query["time"] = time.time()
query["code"] = query["code"].decode("utf-8")
query = dict(sorted(query.iteritems(), key=itemgetter(1)))
jsondump = json.dumps(query)
sign = hashlib.sha1(jsondump + judger.pubkey.strip()).hexdigest()
query["sign"] = sign
http_client = AsyncHTTPClient()
http_client.fetch(judger.path, method = "POST", body = urllib.urlencode({"query" : json.dumps(query)}), callback = callback)
def highlight_code(self, code, lang):
return highlight(code, CODE_LEXER[lang](), HtmlFormatter(linenos = True))
codestr = highlight(code, CODE_LEXER[lang](), HtmlFormatter(nowrap = True))
table = '<div class="highlight"><table><tr><td class="gutter"><pre class="line-numbers">'
code = ''
lines = codestr.split("\n")
for index, line in zip(range(len(lines)), lines):
table += "<span class='line-number'>%d</span>\n" % (index + 1)
code += "<span class='line'>%s</span>\n" % line
table += "</pre></td><td class='code'><pre><code class='%s'>%s</code></pre></td></tr></table></div>" % (CODE_LANG[lang], code)
return table
@property
def db(self):
return self.application.db
@property
def jinja2(self):
return self.application.jinja2
| 40.787037
| 147
| 0.559932
| 1,035
| 8,810
| 4.66087
| 0.29372
| 0.016584
| 0.018657
| 0.013474
| 0.16791
| 0.151119
| 0.100332
| 0.100332
| 0.064677
| 0.045605
| 0
| 0.015778
| 0.316572
| 8,810
| 215
| 148
| 40.976744
| 0.785418
| 0.062089
| 0
| 0.177083
| 0
| 0.015625
| 0.092163
| 0.029053
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088542
| false
| 0.026042
| 0.119792
| 0.015625
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de9773cffe9839ef07dd2219fd1b0246be382284
| 1,839
|
py
|
Python
|
src/blog/migrations/0001_initial.py
|
triump0870/rohan
|
3bd56ccdc35cb67823117e78dc02becbfbd0b329
|
[
"MIT"
] | null | null | null |
src/blog/migrations/0001_initial.py
|
triump0870/rohan
|
3bd56ccdc35cb67823117e78dc02becbfbd0b329
|
[
"MIT"
] | null | null | null |
src/blog/migrations/0001_initial.py
|
triump0870/rohan
|
3bd56ccdc35cb67823117e78dc02becbfbd0b329
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import markdownx.models
import myblog.filename
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
('slug', models.SlugField(unique=True, max_length=255)),
('content', markdownx.models.MarkdownxField()),
('image', models.ImageField(upload_to=myblog.filename.generatefilename(b'posts/'), null=True, verbose_name=b'Cover Image', blank=True)),
('status', models.CharField(default=b'p', max_length=1, choices=[(b'd', b'Draft'), (b'p', b'Published'), (b'w', b'Withdrawn')])),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(related_name='posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_at', 'title'],
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.SlugField(unique=True, max_length=200)),
],
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(to='blog.Tag'),
),
]
| 39.12766
| 152
| 0.582926
| 187
| 1,839
| 5.572193
| 0.465241
| 0.034549
| 0.03071
| 0.040307
| 0.278311
| 0.224568
| 0.224568
| 0.151631
| 0.151631
| 0.151631
| 0
| 0.008191
| 0.269712
| 1,839
| 46
| 153
| 39.978261
| 0.767684
| 0.011419
| 0
| 0.275
| 0
| 0
| 0.088656
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de9bc65cbfa30de1a8294fb16fd3712d1ce427db
| 3,566
|
py
|
Python
|
#17.py
|
Domino2357/daily-coding-problem
|
95ddef9db53c8b895f2c085ba6399a3144a4f8e6
|
[
"MIT"
] | null | null | null |
#17.py
|
Domino2357/daily-coding-problem
|
95ddef9db53c8b895f2c085ba6399a3144a4f8e6
|
[
"MIT"
] | null | null | null |
#17.py
|
Domino2357/daily-coding-problem
|
95ddef9db53c8b895f2c085ba6399a3144a4f8e6
|
[
"MIT"
] | null | null | null |
"""
This problem was asked by Google.
Suppose we represent our file system by a string in the following manner:
The string "dir\n\tsubdir1\n\tsubdir2\n\t\tfile.ext" represents:
dir
subdir1
subdir2
file.ext
The directory dir contains an empty sub-directory subdir1 and a sub-directory subdir2 containing a file file.ext.
The string "dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext" represents:
dir
subdir1
file1.ext
subsubdir1
subdir2
subsubdir2
file2.ext
The directory dir contains two sub-directories subdir1 and subdir2. subdir1 contains a file file1.ext and an empty
second-level sub-directory subsubdir1. subdir2 contains a second-level sub-directory subsubdir2 containing a file file2.ext.
We are interested in finding the longest (number of characters) absolute path to a file within our file system. For example,
in the second example above, the longest absolute path is "dir/subdir2/subsubdir2/file2.ext", and its length is 32
(not including the double quotes).
Given a string representing the file system in the above format, return the length of the longest absolute path to a
file in the abstracted file system. If there is no file in the system, return 0.
Note:
The name of a file contains at least a period and an extension.
The name of a directory or sub-directory will not contain a period.
"""
# I am assuming that the number of t's in /n/t/t/t.../t/ stands for the level in the tree
# Furthermore, I am assuming the format of the string to be consistent
# last but not least I'll make the assumption that this is actually a tree, i.e., it has no cycles
def trace_back(string_tree):
return longest_path_to_file(deserialize(string_tree))
class FileTree:
def __init__(self, val, children):
self.val = val
self.children = children
def longest_path_to_file(file_tree, max_path_length = 0):
deepest_layer = True
for child in file_tree.children:
if child.children:
deepest_layer = False
if deepest_layer:
for child in file_tree.children:
print("Couldn't finish this in time")
# top level idea: deserialize the tree and then perform the operation on it
def deserialize(string_file_tree):
# split off the root
root = ''
children = []
i = 0
while i < len(string_file_tree):
if string_file_tree[i] == '\\':
break
else:
root = root + string_file_tree[i]
del string_file_tree[i]
i += 1
if not string_file_tree:
return FileTree(root, [])
else:
# cut off first \n\t\tsomefile
del string_file_tree[0:4]
for subtree in find_subtree(string_file_tree):
children.append(deserialize(subtree))
def find_subtree(string_file_tree):
subtree = ''
del string_file_tree[0:4]
j = 0
while j < len(string_file_tree):
# cut of the next subtree beginning with \n\tsomefilename, do recursion afterwards
if string_file_tree[j:j + 4] == "\\n\\t":
if not string_file_tree[j + 5] == "\\":
break
else:
# delete the \t\
del string_file_tree[j+3:j+4]
j += 1
else:
subtree += string_file_tree[j]
del string_file_tree[j]
j += 1
if not string_file_tree:
return [subtree]
else:
return [subtree] + find_subtree(string_file_tree)
if __name__ == '__main__':
print()
| 31.280702
| 124
| 0.666854
| 538
| 3,566
| 4.29368
| 0.299257
| 0.072727
| 0.109091
| 0.036797
| 0.183117
| 0.080519
| 0.022511
| 0
| 0
| 0
| 0
| 0.017372
| 0.257431
| 3,566
| 113
| 125
| 31.557522
| 0.854985
| 0.551598
| 0
| 0.288462
| 0
| 0
| 0.027628
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096154
| false
| 0
| 0
| 0.019231
| 0.192308
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de9bd50729808fda9f77f7ae5831c5d7b432a027
| 1,315
|
py
|
Python
|
turbot/db.py
|
emre/turbot
|
7bc49a8b79bce7f2490036d9255e5b3df8fff4b1
|
[
"MIT"
] | 3
|
2017-10-17T22:02:06.000Z
|
2018-05-07T10:29:31.000Z
|
turbot/db.py
|
emre/turbot
|
7bc49a8b79bce7f2490036d9255e5b3df8fff4b1
|
[
"MIT"
] | null | null | null |
turbot/db.py
|
emre/turbot
|
7bc49a8b79bce7f2490036d9255e5b3df8fff4b1
|
[
"MIT"
] | 3
|
2018-10-16T13:28:57.000Z
|
2021-02-24T13:23:29.000Z
|
from os.path import expanduser, exists
from os import makedirs
TURBOT_PATH = expanduser('~/.turbot')
UPVOTE_LOGS = expanduser("%s/upvote_logs" % TURBOT_PATH)
CHECKPOINT = expanduser("%s/checkpoint" % TURBOT_PATH)
REFUND_LOG = expanduser("%s/refunds" % TURBOT_PATH)
def load_checkpoint(fallback_block_num=None):
try:
return int(open(CHECKPOINT).read())
except FileNotFoundError as e:
if not exists(TURBOT_PATH):
makedirs(TURBOT_PATH)
dump_checkpoint(fallback_block_num)
return load_checkpoint()
def dump_checkpoint(block_num):
f = open(CHECKPOINT, 'w+')
f.write(str(block_num))
f.close()
def load_refunds():
try:
refunds = open(REFUND_LOG).readlines()
refunds = [r.replace("\n", "") for r in refunds]
except FileNotFoundError as e:
if not exists(TURBOT_PATH):
makedirs(TURBOT_PATH)
f = open(REFUND_LOG, 'w+')
f.close()
refunds = []
return refunds
def refund_key(to, memo, amount):
return "%s-%s-%s" % (to, memo, amount)
def add_refund(to, memo, amount):
f = open(REFUND_LOG, 'a+')
f.write(refund_key(to, memo, amount))
f.close()
def already_refunded(to, memo, amount):
refunds = load_refunds()
return refund_key(to, memo, amount) in refunds
| 24.351852
| 56
| 0.650951
| 173
| 1,315
| 4.768786
| 0.300578
| 0.09697
| 0.087273
| 0.054545
| 0.233939
| 0.157576
| 0.157576
| 0.157576
| 0.157576
| 0.157576
| 0
| 0
| 0.222814
| 1,315
| 53
| 57
| 24.811321
| 0.807241
| 0
| 0
| 0.289474
| 0
| 0
| 0.047148
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.052632
| 0.026316
| 0.342105
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dea196647fceafaeec0ee9058ac3907d2c76082c
| 3,752
|
py
|
Python
|
pys3crypto.py
|
elitest/pys3crypto
|
9dfef5935ff1c663b8641eaa052e778cdf34a565
|
[
"MIT"
] | null | null | null |
pys3crypto.py
|
elitest/pys3crypto
|
9dfef5935ff1c663b8641eaa052e778cdf34a565
|
[
"MIT"
] | null | null | null |
pys3crypto.py
|
elitest/pys3crypto
|
9dfef5935ff1c663b8641eaa052e778cdf34a565
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Original Author @elitest
# This script uses boto3 to perform client side decryption
# of data encryption keys and associated files
# and encryption in ways compatible with the AWS SDKs
# This support is not available in boto3 at this time
# Wishlist:
# Currently only tested with KMS managed symmetric keys.
# Error checking
import boto3, argparse, base64, json
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import padding
from cryptography.hazmat.primitives.ciphers import (
Cipher, algorithms, modes
)
# Build the parser
argparser = argparse.ArgumentParser(description='Prints info about deleted items in s3 buckets and helps you download them.')
argparser.add_argument('bucket', help='The bucket that contains the file.')
argparser.add_argument('region', help='The region the CMK is in.')
argparser.add_argument('key', help='The name of the file that you would like to download and decrypt.')
argparser.add_argument('--profile', default='default', help='The profile name in ~/.aws/credentials')
args = argparser.parse_args()
# Set variables from arguments
bucket = args.bucket
region = args.region
profile = args.profile
key = args.key
# Setup AWS clients
boto3.setup_default_session(profile_name=profile, region_name=region)
s3_client = boto3.client('s3')
response = s3_client.get_object(Bucket=bucket,Key=key)
kms_client = boto3.client('kms')
# This function decrypts the encrypted key associated with the file
# and decrypts it
def decrypt_dek(metadata):
# Encrypted key
keyV2 = base64.b64decode(metadata['Metadata']['x-amz-key-v2'])
# Key ARN
context = json.loads(metadata['Metadata']['x-amz-matdesc'])
# This decrypts the DEK using KMS
dek = kms_client.decrypt(CiphertextBlob=keyV2, EncryptionContext=context)
return dek['Plaintext']
def decrypt(key, algo, iv, ciphertext, tag):
if algo == 'AES/GCM/NoPadding':
# Construct a Cipher object, with the key, iv, and additionally the
# GCM tag used for authenticating the message.
decryptor = Cipher(
algorithms.AES(key),
modes.GCM(iv, tag),
backend=default_backend()
).decryptor()
# Decryption gets us the authenticated plaintext.
# If the tag does not match an InvalidTag exception will be raised.
return decryptor.update(ciphertext) + decryptor.finalize()
elif algo == 'AES/CBC/PKCS5Padding':
# Construct a Cipher object, with the key, iv
decryptor = Cipher(
algorithms.AES(key),
modes.CBC(iv),
backend=default_backend()
).decryptor()
# Decryption gets us the plaintext.
data = decryptor.update(ciphertext) + decryptor.finalize()
# Apparently PKCS5 and 7 are basically the same for our purposes
unpadder = padding.PKCS7(128).unpadder()
return unpadder.update(data) + unpadder.finalize()
else:
print('Unknown algorithm or padding.')
exit()
# Decrypt the DEK
plaintextDek = decrypt_dek(response)
# Get the encrypted body
# Haven't tested with large files
body=response['Body'].read()
# We need the content length for GCM to build the tag
contentLen = response['Metadata']['x-amz-unencrypted-content-length']
# IV
iv = base64.b64decode(response['Metadata']['x-amz-iv'])
# Algorithm
alg = response['Metadata']['x-amz-cek-alg']
# This splits the tag and data from the body if GCM
if alg == 'AES/GCM/NoPadding':
data = body[0:int(contentLen)]
tagLen = response['Metadata']['x-amz-tag-len']
tag = body[int(contentLen):int(tagLen)]
else:
data = body[:]
tag = ''
# Decrypt the file
plaintext = decrypt(plaintextDek,alg,iv,data,tag)
print(plaintext)
| 36.427184
| 125
| 0.709488
| 499
| 3,752
| 5.296593
| 0.386774
| 0.020431
| 0.027242
| 0.030269
| 0.121831
| 0.090049
| 0.062807
| 0.062807
| 0
| 0
| 0
| 0.010475
| 0.185768
| 3,752
| 102
| 126
| 36.784314
| 0.854664
| 0.29371
| 0
| 0.169492
| 0
| 0
| 0.193659
| 0.012223
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033898
| false
| 0
| 0.067797
| 0
| 0.152542
| 0.033898
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dea3d4b6a9500edd440cd83df9ceb44f4b4e36eb
| 1,777
|
py
|
Python
|
openTEL_11_19/presentation_figures/tm112_utils.py
|
psychemedia/presentations
|
a4d7058b1f716c59a89d0bcd1390ead75d769d43
|
[
"Apache-2.0"
] | null | null | null |
openTEL_11_19/presentation_figures/tm112_utils.py
|
psychemedia/presentations
|
a4d7058b1f716c59a89d0bcd1390ead75d769d43
|
[
"Apache-2.0"
] | null | null | null |
openTEL_11_19/presentation_figures/tm112_utils.py
|
psychemedia/presentations
|
a4d7058b1f716c59a89d0bcd1390ead75d769d43
|
[
"Apache-2.0"
] | 1
|
2019-11-05T10:35:40.000Z
|
2019-11-05T10:35:40.000Z
|
from IPython.display import HTML
#TO DO - the nested table does not display?
#Also, the nested execution seems to take a long time to run?
#Profile it to see where I'm going wrong!
def obj_display(v, nest=False, style=True):
def nested(v):
if nest:
return obj_display(v, style=False)
return v
"""Generate a simple visualisation of an object's structure. """
html = '''<style type='text/css'>
.vartable {{
border-style: solid !important;
border-width: 2px !important;
}}
.vartable td {{
border-style: solid !important;
border-width: 2px !important;
text-align: left;
}}
</style>''' if style else ''
if isinstance(v, int) or isinstance(v, str):
html = html+'''<table class='vartable'><tr><td>ID:<br/>{v_id}</td>
<td>TYPE:<br/>{v_typ}</td></tr>
<tr><td colspan=2>VALUE:<br/>{v_val}</td></tr></table>'''
html = html.format(v_id = id(v), v_typ = type(v).__name__, v_val=v)
elif isinstance(v, list) or isinstance(v, dict):
html = html+'''<table class='vartable'><tr><td>ID:<br/>{v_id}</td>
<td>TYPE:<br/>{v_typ}</td></tr>
<tr><td colspan=2>VALUE:<br/>{v_val}</td></tr></table>'''
if isinstance(v, dict):
v_items = ''.join(['<td>[{i}]: <strong>{v}</strong></td>'.format(i=i, v=nested(v_item) ) for v_item, i in enumerate(v)])
else:
v_items = ''.join(['<td>[{i}]: <strong>{v}</strong></td>'.format(i=i, v= nested(v_item) ) for i, v_item in enumerate(v)])
v_val='<table><tr>{v_items}</tr></table>'.format(v_items = v_items)
html = html.format(v_id = id(v), v_typ = type(v).__name__, v_val=v_val)
display(HTML(html))
| 38.630435
| 133
| 0.563309
| 269
| 1,777
| 3.598513
| 0.30855
| 0.018595
| 0.022727
| 0.051653
| 0.46281
| 0.46281
| 0.46281
| 0.46281
| 0.363636
| 0.363636
| 0
| 0.00297
| 0.241981
| 1,777
| 45
| 134
| 39.488889
| 0.715664
| 0.07991
| 0
| 0.363636
| 0
| 0
| 0.444799
| 0.207403
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.151515
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dea6f4a43ec33dab31441d90f5221fa29eeb9456
| 8,191
|
py
|
Python
|
analysis_guis/code_test.py
|
Sepidak/spikeGUI
|
25ae60160308c0a34e7180f3e39a1c4dc6aad708
|
[
"MIT"
] | null | null | null |
analysis_guis/code_test.py
|
Sepidak/spikeGUI
|
25ae60160308c0a34e7180f3e39a1c4dc6aad708
|
[
"MIT"
] | 3
|
2021-08-09T21:51:41.000Z
|
2021-08-09T21:51:45.000Z
|
analysis_guis/code_test.py
|
Sepidak/spikeGUI
|
25ae60160308c0a34e7180f3e39a1c4dc6aad708
|
[
"MIT"
] | 3
|
2021-10-16T14:07:59.000Z
|
2021-10-16T17:09:03.000Z
|
# -*- coding: utf-8 -*-
"""
Simple example using BarGraphItem
"""
# import initExample ## Add path to library (just for examples; you do not need this)
import numpy as np
import pickle as p
import pandas as pd
from analysis_guis.dialogs.rotation_filter import RotationFilter
from analysis_guis.dialogs import config_dialog
from analysis_guis.dialogs.info_dialog import InfoDialog
from rotation_analysis.analysis.probe.probe_io.probe_io import TriggerTraceIo, BonsaiIo, IgorIo
from PyQt5.QtWidgets import QApplication
from datetime import datetime
from dateutil import parser
import analysis_guis.calc_functions as cfcn
import analysis_guis.rotational_analysis as rot
import matplotlib.pyplot as plt
from pyphys.pyphys.pyphys import PxpParser
from collections import OrderedDict
import analysis_guis.common_func as cf
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
date2sec = lambda t: np.sum([3600 * t.hour, 60 * t.minute, t.second])
trig_count = lambda data, cond: len(np.where(np.diff(data[cond]['cpg_ttlStim']) > 1)[0]) + 1
get_bin_index = lambda x, y: next((i for i in range(len(y)) if x < y[i]), len(y)) - 1
def setup_polar_spike_freq(r_obj, sFreq, b_sz, is_pos):
'''
:param wvPara:
:param tSpike:
:param sFreq:
:param b_sz:
:return:
'''
# memory allocation
wvPara, tSpike = r_obj.wvm_para[i_filt], r_obj.t_spike[i_filt],
ind_inv, xi_bin_tot = np.empty(2, dtype=object), np.empty(2, dtype=object)
# calculates the bin times
xi_bin_tot[0], t_bin, t_phase = rot.calc_wave_kinematic_times(wvPara[0][0], b_sz, sFreq, is_pos, yDir=-1)
xi_bin_tot[1], dt_bin = -xi_bin_tot[0], np.diff(t_bin)
# determines the bin indices
for i in range(2):
xi_mid, ind_inv[i] = np.unique(0.5 * (xi_bin_tot[i][:-1] + xi_bin_tot[i][1:]), return_inverse=True)
# memory allocation
yDir = wvPara[0]['yDir']
n_trial, n_bin = len(yDir), len(xi_mid)
tSp_bin = np.zeros((n_bin, n_trial))
#
for i_trial in range(n_trial):
# combines the time spikes in the order that the CW/CCW phases occur
ii = int(yDir[i_trial] == 1)
tSp = np.hstack((tSpike[1 + ii][i_trial], tSpike[2 - ii][i_trial] + t_phase))
# appends the times
t_hist = np.histogram(tSp, bins=t_bin)
for j in range(len(t_hist[0])):
i_bin = ind_inv[ii][j]
tSp_bin[i_bin, i_trial] += t_hist[0][j] / (2.0 * dt_bin[j])
# returns the final bin
return xi_mid, tSp_bin
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
# loads the data for testing
with open('C:\\Work\\EPhys\\Code\\Sepi\\wvPara.p', 'rb') as fp:
wvPara = p.load(fp)
tSpike = p.load(fp)
#
sFreq = 30000
kb_sz = 10
title_str = ['Displacement', 'Velocity']
lg_str = ['Type 1', 'Type 2', 'Type 3']
# memory allocation
n_filt = len(wvPara)
c = cf.get_plot_col(n_filt)
#
fig = plt.figure()
ax = np.empty(2, dtype=object)
#
for i_type in range(2):
# sets up the spiking frequency arrays
tSp_bin = np.empty(n_filt, dtype=object)
for i_filt in range(n_filt):
xi_mid, tSp_bin[i_filt] = setup_polar_spike_freq(wvPara[i_filt], tSpike[i_filt], sFreq, kb_sz, i_type==0)
#
xi_min = xi_mid[0] - np.diff(xi_mid[0:2])[0]/2
theta = np.pi * (1 - (xi_mid - xi_min) / np.abs(2 * xi_min))
x_tick = np.linspace(xi_min, -xi_min, 7 + 2 * i_type)
# creates the subplot
ax[i_type] = plt.subplot(1, 2, i_type + 1, projection='polar')
ax[i_type].set_thetamin(0)
ax[i_type].set_thetamax(180)
# creates the radial plots for each of the filter types
h_plt = []
for i_filt in range(n_filt):
# creates the plot and resets the labels
tSp_mn = np.mean(tSp_bin[i_filt], axis=1)
h_plt.append(ax[i_type].plot(theta, tSp_mn, 'o-', c=c[i_filt]))
# sets the axis properties (first filter only)
if i_filt == 0:
ax[i_type].set_title(title_str[i_type])
ax[i_type].set_xticks(np.pi * (x_tick - xi_min) / np.abs(2 * xi_min))
ax[i_type].set_xticklabels([str(int(np.round(-x))) for x in x_tick])
# sets the legend (first subplot only)
if i_type == 0:
ax[i_type].legend(lg_str, loc=1)
# determines the overall radial maximum (over all subplots) and resets the radial ticks
y_max = [max(x.get_ylim()) for x in ax]
i_max = np.argmax(y_max)
dy = np.diff(ax[i_max].get_yticks())[0]
y_max_tot = dy * (np.floor(y_max[i_max] / dy) + 1)
# resets the axis radial limits
for x in ax:
x.set_ylim(0, y_max_tot)
# shows the plot
plt.show()
a = 1
# app = QApplication([])
# h_obj = RotationFilter(data)
# h_obj = InfoDialog(data)
# a = 1
# #
# igor_waveforms_path = 'G:\\Seagate\\Work\\EPhys\\Data\\CA326_C_day3\\Igor\\CA326_C_day3'
# bonsai_metadata_path = 'G:\\Seagate\\Work\\EPhys\\Data\\CA326_C_day3\\Bonsai\\CA326_C_day3_all.csv'
#
# #
# file_time_key = 'FileTime'
# bonsai_io = BonsaiIo(bonsai_metadata_path)
#
#
# # determines the indices of the experiment condition triel group
# t_bonsai = [parser.parse(x) for x in bonsai_io.data['Timestamp']]
# t_bonsai_sec = np.array([date2sec(x) for x in t_bonsai])
# d2t_bonsai = np.diff(t_bonsai_sec, 2)
# grp_lim = grp_lim = [-1] + list(np.where(d2t_bonsai > 60)[0] + 1) + [len(d2t_bonsai) + 1]
# ind_grp = [np.arange(grp_lim[x] + 1, grp_lim[x + 1] + 1) for x in range(len(grp_lim) - 1)]
#
# # sets the time, name and trigger count from each of these groups
# t_bonsai_grp = [t_bonsai_sec[x[0]] for x in ind_grp]
# c_bonsai_grp = [bonsai_io.data['Condition'][x[0]] for x in ind_grp]
# n_trig_bonsai = [len(x) for x in ind_grp]
#
# # determines the feasible variables from the igor data file
# igor_data = PxpParser(igor_waveforms_path)
# var_keys = list(igor_data.data.keys())
# is_ok = ['command' in igor_data.data[x].keys() if isinstance(igor_data.data[x], OrderedDict) else False for x in var_keys]
#
# # sets the name, time and trigger count from each of the igor trial groups
# c_igor_grp = [y for x, y in zip(is_ok, var_keys) if x]
# t_igor_grp, t_igor_str, n_trig_igor = [], [], [trig_count(igor_data.data, x) for x in c_igor_grp]
# for ck in c_igor_grp:
# t_igor_str_nw = igor_data.data[ck]['vars'][file_time_key][0]
# t_igor_str.append(t_igor_str_nw)
# t_igor_grp.append(date2sec(datetime.strptime(t_igor_str_nw, '%H:%M:%S').time()))
#
# # calculates the point-wise differences between the trial timer and trigger count
# dt_grp = cfcn.calc_pointwise_diff(t_igor_grp, t_bonsai_grp)
# dn_grp = cfcn.calc_pointwise_diff(n_trig_igor, n_trig_bonsai)
#
# # ensures that only groups that have equal trigger counts are matched
# dt_max = np.max(dt_grp) + 1
# dt_grp[dn_grp > 0] = dt_max
#
# #
# iter = 0
# while 1:
# i2b = np.argmin(dt_grp, axis=1)
# i2b_uniq, ni2b = np.unique(i2b, return_counts=True)
#
# ind_multi = np.where(ni2b > 1)[0]
# if len(ind_multi):
# if iter == 0:
# for ii in ind_multi:
# jj = np.where(i2b == i2b_uniq[ii])[0]
#
# imn = np.argmin(dt_grp[jj, i2b[ii]])
# for kk in jj[jj != jj[imn]]:
# dt_grp[kk, i2b[ii]] = dt_max
# else:
# pass
# else:
# break
#
# # sets the igor-to-bonsai name groupings
# i2b_key, x = {}, np.array(c_igor_grp)[i2b]
# for cc in c_bonsai_grp:
# if cc not in i2b_key:
# jj = np.where([x == cc for x in c_bonsai_grp])[0]
# i2b_key[cc] = x[jj]
| 37.401826
| 129
| 0.605421
| 1,301
| 8,191
| 3.586472
| 0.256726
| 0.015002
| 0.015431
| 0.010716
| 0.087655
| 0.04715
| 0.036434
| 0.015002
| 0.015002
| 0
| 0
| 0.021917
| 0.270297
| 8,191
| 218
| 130
| 37.573395
| 0.758742
| 0.43792
| 0
| 0.025316
| 0
| 0
| 0.02526
| 0.008735
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012658
| false
| 0
| 0.227848
| 0
| 0.253165
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dea9df41450058a28e28c535ce8960f8b770dc38
| 1,147
|
py
|
Python
|
pex/pip/download_observer.py
|
sthagen/pantsbuild-pex
|
bffe6c3641b809cd3b20adbc7fdb2cf7e5f54309
|
[
"Apache-2.0"
] | null | null | null |
pex/pip/download_observer.py
|
sthagen/pantsbuild-pex
|
bffe6c3641b809cd3b20adbc7fdb2cf7e5f54309
|
[
"Apache-2.0"
] | null | null | null |
pex/pip/download_observer.py
|
sthagen/pantsbuild-pex
|
bffe6c3641b809cd3b20adbc7fdb2cf7e5f54309
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
from pex.pip.log_analyzer import LogAnalyzer
from pex.typing import TYPE_CHECKING, Generic
if TYPE_CHECKING:
from typing import Iterable, Mapping, Optional, Text
import attr # vendor:skip
else:
from pex.third_party import attr
@attr.s(frozen=True)
class Patch(object):
code = attr.ib(default=None) # type: Optional[Text]
args = attr.ib(default=()) # type: Iterable[str]
env = attr.ib(factory=dict) # type: Mapping[str, str]
if TYPE_CHECKING:
from typing import TypeVar
_L = TypeVar("_L", bound=LogAnalyzer)
class DownloadObserver(Generic["_L"]):
def __init__(
self,
analyzer, # type: _L
patch=Patch(), # type: Patch
):
# type: (...) -> None
self._analyzer = analyzer
self._patch = patch
@property
def analyzer(self):
# type: () -> _L
return self._analyzer
@property
def patch(self):
# type: () -> Patch
return self._patch
| 23.408163
| 66
| 0.646905
| 141
| 1,147
| 5.099291
| 0.446809
| 0.029207
| 0.038943
| 0.05007
| 0.083449
| 0.083449
| 0
| 0
| 0
| 0
| 0
| 0.006928
| 0.244987
| 1,147
| 48
| 67
| 23.895833
| 0.823326
| 0.2415
| 0
| 0.133333
| 0
| 0
| 0.004662
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.233333
| 0.066667
| 0.566667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
deabe0363fc1143c6a3fe5cc62b534d0a3e480ca
| 2,096
|
py
|
Python
|
pbpstats/data_loader/nba_possession_loader.py
|
pauldevos/pbpstats
|
71c0b5e2bd45d0ca031646c70cd1c1f30c6a7152
|
[
"MIT"
] | null | null | null |
pbpstats/data_loader/nba_possession_loader.py
|
pauldevos/pbpstats
|
71c0b5e2bd45d0ca031646c70cd1c1f30c6a7152
|
[
"MIT"
] | null | null | null |
pbpstats/data_loader/nba_possession_loader.py
|
pauldevos/pbpstats
|
71c0b5e2bd45d0ca031646c70cd1c1f30c6a7152
|
[
"MIT"
] | null | null | null |
from pbpstats.resources.enhanced_pbp import StartOfPeriod
class NbaPossessionLoader(object):
"""
Class for shared methods between :obj:`~pbpstats.data_loader.data_nba.possessions_loader.DataNbaPossessionLoader`
and :obj:`~pbpstats.data_loader.stats_nba.possessions_loader.StatsNbaPossessionLoader`
Both :obj:`~pbpstats.data_loader.data_nba.possessions_loader.DataNbaPossessionLoader`
and :obj:`~pbpstats.data_loader.stats_nba.possessions_loader.StatsNbaPossessionLoader` should inherit from this class
This class should not be instantiated directly
"""
def _split_events_by_possession(self):
"""
splits events by possession
:returns: list of lists with events for each possession
"""
events = []
possession_events = []
for event in self.events:
possession_events.append(event)
if event.is_possession_ending_event:
events.append(possession_events)
possession_events = []
return events
def _add_extra_attrs_to_all_possessions(self):
"""
adds possession number and next and previous possession
"""
number = 1
for i, possession in enumerate(self.items):
if i == 0 and i == len(self.items) - 1:
possession.previous_possession = None
possession.next_possession = None
elif isinstance(possession.events[0], StartOfPeriod) or i == 0:
possession.previous_possession = None
possession.next_possession = self.items[i + 1]
number = 1
elif (
i == len(self.items) - 1
or possession.period != self.items[i + 1].period
):
possession.previous_possession = self.items[i - 1]
possession.next_possession = None
else:
possession.previous_possession = self.items[i - 1]
possession.next_possession = self.items[i + 1]
possession.number = number
number += 1
| 39.54717
| 121
| 0.624046
| 220
| 2,096
| 5.772727
| 0.327273
| 0.056693
| 0.03937
| 0.043307
| 0.456693
| 0.434646
| 0.426772
| 0.32126
| 0.32126
| 0.32126
| 0
| 0.008826
| 0.297233
| 2,096
| 52
| 122
| 40.307692
| 0.85336
| 0.28292
| 0
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.03125
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dead01ec590550c2d98b328ed72222f137d3778b
| 7,033
|
py
|
Python
|
vmware_nsx_tempest/tests/nsxv/api/base_provider.py
|
gravity-tak/vmware-nsx-tempest
|
3a1007d401c471d989345bb5a3f9769f84bd4ac6
|
[
"Apache-2.0"
] | null | null | null |
vmware_nsx_tempest/tests/nsxv/api/base_provider.py
|
gravity-tak/vmware-nsx-tempest
|
3a1007d401c471d989345bb5a3f9769f84bd4ac6
|
[
"Apache-2.0"
] | null | null | null |
vmware_nsx_tempest/tests/nsxv/api/base_provider.py
|
gravity-tak/vmware-nsx-tempest
|
3a1007d401c471d989345bb5a3f9769f84bd4ac6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions
from tempest.api.network import base
from tempest import config
from tempest import test
CONF = config.CONF
class BaseAdminNetworkTest(base.BaseAdminNetworkTest):
# NOTE(akang): This class inherits from BaseAdminNetworkTest.
# By default client is cls.client, but for provider network,
# the client is admin_client. The test class should pass
# client=self.admin_client, if it wants to create provider
# network/subnet.
@classmethod
def skip_checks(cls):
super(BaseAdminNetworkTest, cls).skip_checks()
if not test.is_extension_enabled('provider', 'network'):
msg = "Network Provider Extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(BaseAdminNetworkTest, cls).resource_setup()
cls.admin_netwk_info = []
@classmethod
def resource_cleanup(cls):
if CONF.service_available.neutron:
for netwk_info in cls.admin_netwk_info:
net_client, network = netwk_info
try:
cls._try_delete_resource(net_client.delete_network,
network['id'])
except Exception:
pass
super(BaseAdminNetworkTest, cls).resource_cleanup()
@classmethod
def create_network(cls, network_name=None, client=None,
**kwargs):
net_client = client if client else cls.admin_networks_client
network_name = network_name or data_utils.rand_name('ADM-network-')
post_body = {'name': network_name}
post_body.update(kwargs)
body = net_client.create_network(**post_body)
network = body['network']
cls.admin_netwk_info.append([net_client, network])
return body
@classmethod
def update_network(cls, network_id, client=None, **kwargs):
net_client = client if client else cls.admin_networks_client
return net_client.update_network(network_id, **kwargs)
@classmethod
def delete_network(cls, network_id, client=None):
net_client = client if client else cls.admin_networks_client
return net_client.delete_network(network_id)
@classmethod
def show_network(cls, network_id, client=None, **kwargs):
net_client = client if client else cls.admin_networks_client
return net_client.show_network(network_id, **kwargs)
@classmethod
def list_networks(cls, client=None, **kwargs):
net_client = client if client else cls.admin_networks_client
return net_client.list_networks(**kwargs)
@classmethod
def create_subnet(cls, network, client=None,
gateway='', cidr=None, mask_bits=None,
ip_version=None, cidr_offset=0, **kwargs):
ip_version = (ip_version if ip_version is not None
else cls._ip_version)
net_client = client if client else cls.admin_subnets_client
post_body = get_subnet_create_options(
network['id'], ip_version,
gateway=gateway, cidr=cidr, cidr_offset=cidr_offset,
mask_bits=mask_bits, **kwargs)
return net_client.create_subnet(**post_body)
@classmethod
def update_subnet(cls, subnet_id, client=None, **kwargs):
net_client = client if client else cls.admin_subnets_client
return net_client.update_subnet(subnet_id, **kwargs)
@classmethod
def delete_subnet(cls, subnet_id, client=None):
net_client = client if client else cls.admin_subnets_client
return net_client.delete_subnet(subnet_id)
@classmethod
def show_subnet(cls, subnet_id, client=None, **kwargs):
net_client = client if client else cls.admin_subnets_client
return net_client.show_subnet(subnet_id, **kwargs)
@classmethod
def list_subnets(cls, client=None, **kwargs):
net_client = client if client else cls.admin_subnets_client
return net_client.list_subnets(**kwargs)
# add other create methods, i.e. security-group, port, floatingip
# if needed.
def get_subnet_create_options(network_id, ip_version=4,
gateway='', cidr=None, mask_bits=None,
num_subnet=1, gateway_offset=1, cidr_offset=0,
**kwargs):
"""When cidr_offset>0 it request only one subnet-options:
subnet = get_subnet_create_options('abcdefg', 4, num_subnet=4)[3]
subnet = get_subnet_create_options('abcdefg', 4, cidr_offset=3)
"""
gateway_not_set = (gateway == '')
if ip_version == 4:
cidr = cidr or netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = mask_bits or CONF.network.tenant_network_mask_bits
elif ip_version == 6:
cidr = (
cidr or netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr))
mask_bits = mask_bits or CONF.network.tenant_network_v6_mask_bits
# Find a cidr that is not in use yet and create a subnet with it
subnet_list = []
if cidr_offset > 0:
num_subnet = cidr_offset + 1
for subnet_cidr in cidr.subnet(mask_bits):
if gateway_not_set:
gateway_ip = gateway or (
str(netaddr.IPAddress(subnet_cidr) + gateway_offset))
else:
gateway_ip = gateway
try:
subnet_body = dict(
network_id=network_id,
cidr=str(subnet_cidr),
ip_version=ip_version,
gateway_ip=gateway_ip,
**kwargs)
if num_subnet <= 1:
return subnet_body
subnet_list.append(subnet_body)
if len(subnet_list) >= num_subnet:
if cidr_offset > 0:
# user request the 'cidr_offset'th of cidr
return subnet_list[cidr_offset]
# user request list of cidr
return subnet_list
except exceptions.BadRequest as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
if not is_overlapping_cidr:
raise
else:
message = 'Available CIDR for subnet creation could not be found'
raise exceptions.BuildErrorException(message)
return {}
| 39.072222
| 78
| 0.65278
| 879
| 7,033
| 4.994312
| 0.216155
| 0.047153
| 0.034169
| 0.038724
| 0.351708
| 0.328474
| 0.26492
| 0.248519
| 0.230296
| 0.207517
| 0
| 0.005272
| 0.271861
| 7,033
| 179
| 79
| 39.290503
| 0.851982
| 0.177023
| 0
| 0.259843
| 0
| 0
| 0.028233
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.110236
| false
| 0.007874
| 0.047244
| 0
| 0.275591
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
deafcfc518bad5ab9572431f7de653f846580238
| 1,050
|
py
|
Python
|
python/5.concurrent/ZCoroutine/z_new_ipc/8.condition.py
|
lotapp/BaseCode
|
0255f498e1fe67ed2b3f66c84c96e44ef1f7d320
|
[
"Apache-2.0"
] | 25
|
2018-06-13T08:13:44.000Z
|
2020-11-19T14:02:11.000Z
|
python/5.concurrent/ZCoroutine/z_new_ipc/8.condition.py
|
lotapp/BaseCode
|
0255f498e1fe67ed2b3f66c84c96e44ef1f7d320
|
[
"Apache-2.0"
] | null | null | null |
python/5.concurrent/ZCoroutine/z_new_ipc/8.condition.py
|
lotapp/BaseCode
|
0255f498e1fe67ed2b3f66c84c96e44ef1f7d320
|
[
"Apache-2.0"
] | 13
|
2018-06-13T08:13:38.000Z
|
2022-01-06T06:45:07.000Z
|
import asyncio
cond = None
p_list = []
# 生产者
async def producer(n):
for i in range(5):
async with cond:
p_list.append(f"{n}-{i}")
print(f"[生产者{n}]生产商品{n}-{i}")
# 通知任意一个消费者
cond.notify() # 通知全部消费者:cond.notify_all()
# 摸拟一个耗时操作
await asyncio.sleep(0.01)
# 消费者
async def consumer(i):
while True:
async with cond:
if p_list:
print(f"列表商品:{p_list}")
name = p_list.pop() # 消费商品
print(f"[消费者{i}]消费商品{name}")
print(f"列表剩余:{p_list}")
# 摸拟一个耗时操作
await asyncio.sleep(0.01)
else:
await cond.wait()
async def main():
global cond
cond = asyncio.Condition() # 初始化condition
p_tasks = [asyncio.create_task(producer(i)) for i in range(2)] # 两个生产者
c_tasks = [asyncio.create_task(consumer(i)) for i in range(5)] # 五个消费者
await asyncio.gather(*p_tasks, *c_tasks)
if __name__ == "__main__":
asyncio.run(main())
| 23.333333
| 75
| 0.526667
| 137
| 1,050
| 3.883212
| 0.40146
| 0.056391
| 0.033835
| 0.06203
| 0.174812
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0.012987
| 0.34
| 1,050
| 44
| 76
| 23.863636
| 0.75469
| 0.086667
| 0
| 0.137931
| 0
| 0
| 0.082278
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034483
| 0
| 0.034483
| 0.137931
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
deb039b791ed71607787c0d4ffc9f5bb4edef521
| 930
|
py
|
Python
|
Q846_Hand-of-Straights.py
|
xiaosean/leetcode_python
|
844ece02d699bfc620519bd94828ed0e18597f3e
|
[
"MIT"
] | null | null | null |
Q846_Hand-of-Straights.py
|
xiaosean/leetcode_python
|
844ece02d699bfc620519bd94828ed0e18597f3e
|
[
"MIT"
] | null | null | null |
Q846_Hand-of-Straights.py
|
xiaosean/leetcode_python
|
844ece02d699bfc620519bd94828ed0e18597f3e
|
[
"MIT"
] | null | null | null |
from collections import Counter
class Solution:
def isNStraightHand(self, hand: List[int], W: int) -> bool:
n = len(hand)
groups = 0
if n == 0 or n % W != 0:
return False
groups_num = n // W
c = Counter(hand)
keys = list(c.keys())
keys.sort()
step = 0
for _ in range(groups_num):
groups = []
step_lock = None
for idx, k in enumerate(keys[step:step+W]):
if c[k] > 0:
c[k] -= 1
if groups and k != groups[-1]+1:
return False
groups += [k]
if step_lock is None and c[k] > 0:
step += idx
step_lock = True
if step_lock is None:
step += W
if len(groups) < W:
return False
return True
| 31
| 63
| 0.410753
| 109
| 930
| 3.440367
| 0.357798
| 0.085333
| 0.090667
| 0.064
| 0.085333
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019231
| 0.496774
| 930
| 30
| 64
| 31
| 0.782051
| 0
| 0
| 0.103448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.034483
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
deba0ac91a90f7d9408ab094dc6d137f7476170c
| 4,495
|
py
|
Python
|
smart_contract/__init__.py
|
publicqi/CTFd-Fox
|
b1d0169db884cdf3cb665faa8987443e7630d108
|
[
"MIT"
] | 1
|
2021-01-09T15:20:14.000Z
|
2021-01-09T15:20:14.000Z
|
smart_contract/__init__.py
|
publicqi/CTFd-Fox
|
b1d0169db884cdf3cb665faa8987443e7630d108
|
[
"MIT"
] | null | null | null |
smart_contract/__init__.py
|
publicqi/CTFd-Fox
|
b1d0169db884cdf3cb665faa8987443e7630d108
|
[
"MIT"
] | null | null | null |
from __future__ import division # Use floating point for math calculations
from flask import Blueprint
from CTFd.models import (
ChallengeFiles,
Challenges,
Fails,
Flags,
Hints,
Solves,
Tags,
db,
)
from CTFd.plugins import register_plugin_assets_directory
from CTFd.plugins.challenges import CHALLENGE_CLASSES, BaseChallenge
from CTFd.plugins.flags import get_flag_class
from CTFd.utils.uploads import delete_file
from CTFd.utils.user import get_ip
class SmartContractChallenge(BaseChallenge):
id = "smart_contract"
name = "smart_contract"
templates = {
"create": "/plugins/smart_contract/assets/create.html",
"update": "/plugins/smart_contract/assets/update.html",
"view": "/plugins/smart_contract/assets/view.html",
}
scripts = {
"create": "/plugins/smart_contract/assets/create.js",
"update": "/plugins/smart_contract/assets/update.js",
"view": "/plugins/smart_contract/assets/view.js",
}
route = "/plugins/smart_contract/assets/"
blueprint = Blueprint(
"smart_contract", __name__, template_folder="templates", static_folder="assets"
)
@staticmethod
def create(request):
data = request.form or request.get_json()
challenge = Challenges(**data)
db.session.add(challenge)
db.session.commit()
return challenge
@staticmethod
def read(challenge):
data = {
"id": challenge.id,
"name": challenge.name,
"value": challenge.value,
"description": challenge.description,
"category": challenge.category,
"state": challenge.state,
"max_attempts": challenge.max_attempts,
"type": challenge.type,
"type_data": {
"id": SmartContractChallenge.id,
"name": SmartContractChallenge.name,
"templates": SmartContractChallenge.templates,
"scripts": SmartContractChallenge.scripts,
},
}
return data
@staticmethod
def update(challenge, request):
data = request.form or request.get_json()
for attr, value in data.items():
setattr(challenge, attr, value)
db.session.commit()
return challenge
@staticmethod
def delete(challenge):
Fails.query.filter_by(challenge_id=challenge.id).delete()
Solves.query.filter_by(challenge_id=challenge.id).delete()
Flags.query.filter_by(challenge_id=challenge.id).delete()
files = ChallengeFiles.query.filter_by(challenge_id=challenge.id).all()
for f in files:
delete_file(f.id)
ChallengeFiles.query.filter_by(challenge_id=challenge.id).delete()
Tags.query.filter_by(challenge_id=challenge.id).delete()
Hints.query.filter_by(challenge_id=challenge.id).delete()
Challenges.query.filter_by(id=challenge.id).delete()
db.session.commit()
@staticmethod
def attempt(challenge, request):
data = request.form or request.get_json()
submission = data["submission"].strip()
flags = Flags.query.filter_by(challenge_id=challenge.id).all()
for flag in flags:
if get_flag_class(flag.type).compare(flag, submission):
return True, "Correct"
return False, "Incorrect"
@staticmethod
def solve(user, team, challenge, request):
data = request.form or request.get_json()
submission = data["submission"].strip()
solve = Solves(
user_id=user.id,
team_id=team.id if team else None,
challenge_id=challenge.id,
ip=get_ip(req=request),
provided=submission,
)
db.session.add(solve)
db.session.commit()
db.session.close()
@staticmethod
def fail(user, team, challenge, request):
data = request.form or request.get_json()
submission = data["submission"].strip()
wrong = Fails(
user_id=user.id,
team_id=team.id if team else None,
challenge_id=challenge.id,
ip=get_ip(request),
provided=submission,
)
db.session.add(wrong)
db.session.commit()
db.session.close()
def load(app):
CHALLENGE_CLASSES["smart_contract"] = SmartContractChallenge
register_plugin_assets_directory(
app, base_path="/plugins/smart_contract/assets/"
)
| 32.338129
| 87
| 0.629588
| 486
| 4,495
| 5.676955
| 0.218107
| 0.087713
| 0.056542
| 0.079739
| 0.460674
| 0.460674
| 0.333092
| 0.300471
| 0.183762
| 0.137006
| 0
| 0
| 0.261624
| 4,495
| 138
| 88
| 32.572464
| 0.831274
| 0.008899
| 0
| 0.262295
| 0
| 0
| 0.120144
| 0.068269
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065574
| false
| 0
| 0.065574
| 0
| 0.229508
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
debcd3fde3c56a4f5ccca0c23d8a57a7d2afd960
| 588
|
py
|
Python
|
Numbers/PrimeFac.py
|
Arjuna197/the100
|
2963b4fe1b1b8e673a23b2cf97f4bcb263af9781
|
[
"MIT"
] | 1
|
2022-02-20T18:49:49.000Z
|
2022-02-20T18:49:49.000Z
|
Numbers/PrimeFac.py
|
dan-garvey/the100
|
2963b4fe1b1b8e673a23b2cf97f4bcb263af9781
|
[
"MIT"
] | 13
|
2017-12-13T02:31:54.000Z
|
2017-12-13T02:37:45.000Z
|
Numbers/PrimeFac.py
|
dan-garvey/the100
|
2963b4fe1b1b8e673a23b2cf97f4bcb263af9781
|
[
"MIT"
] | null | null | null |
import math
from math import*
def isPrime(num):
if num%2==0 or num%3==0:
return False
for n in range(5, int(num**(1/2))):
if num%n==0:
return False
return True
print('enter a positive integer')
FacMe=int(input())
primefacts=[1]
if not isPrime(FacMe):
if FacMe % 2==0:
primefacts.append(2)
if FacMe % 3==0:
primefacts.append(3)
for i in range(5,FacMe):
if FacMe%i==0:
if isPrime(i):
primefacts.append(i)
else:
primefacts.append(FacMe)
print(primefacts)
| 21.777778
| 40
| 0.547619
| 85
| 588
| 3.788235
| 0.388235
| 0.198758
| 0.074534
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042821
| 0.32483
| 588
| 26
| 41
| 22.615385
| 0.768262
| 0
| 0
| 0.083333
| 0
| 0
| 0.042705
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.083333
| 0
| 0.25
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
debe6ce18f853e6b1e54abf97ade00987edf8450
| 1,270
|
py
|
Python
|
runner/run_descriptions/runs/curious_vs_vanilla.py
|
alex-petrenko/curious-rl
|
6cd0eb78ab409c68f8dad1a8542d625f0dd39114
|
[
"MIT"
] | 18
|
2018-12-29T01:52:25.000Z
|
2021-11-08T06:48:20.000Z
|
runner/run_descriptions/runs/curious_vs_vanilla.py
|
alex-petrenko/curious-rl
|
6cd0eb78ab409c68f8dad1a8542d625f0dd39114
|
[
"MIT"
] | 2
|
2019-06-13T12:52:55.000Z
|
2019-10-30T03:27:11.000Z
|
runner/run_descriptions/runs/curious_vs_vanilla.py
|
alex-petrenko/curious-rl
|
6cd0eb78ab409c68f8dad1a8542d625f0dd39114
|
[
"MIT"
] | 3
|
2019-05-11T07:50:53.000Z
|
2021-11-18T08:15:56.000Z
|
from runner.run_descriptions.run_description import RunDescription, Experiment, ParamGrid
_params = ParamGrid([
('prediction_bonus_coeff', [0.00, 0.05]),
])
_experiments = [
Experiment(
'doom_maze_very_sparse',
'python -m algorithms.curious_a2c.train_curious_a2c --env=doom_maze_very_sparse --gpu_mem_fraction=0.1 --train_for_env_steps=2000000000',
_params.generate_params(randomize=False),
),
# Experiment(
# 'doom_maze_sparse',
# 'python -m algorithms.curious_a2c.train_curious_a2c --env=doom_maze_sparse --gpu_mem_fraction=0.1 --train_for_env_steps=100000000',
# _params.generate_params(randomize=False),
# ),
# Experiment(
# 'doom_maze',
# 'python -m algorithms.curious_a2c.train_curious_a2c --env=doom_maze --gpu_mem_fraction=0.1 --train_for_env_steps=50000000',
# _params.generate_params(randomize=False),
# ),
# Experiment(
# 'doom_basic',
# 'python -m algorithms.curious_a2c.train_curious_a2c --env=doom_basic --gpu_mem_fraction=0.1 --train_for_env_steps=10000000',
# _params.generate_params(randomize=False),
# ),
]
DOOM_CURIOUS_VS_VANILLA = RunDescription('doom_curious_vs_vanilla', experiments=_experiments, max_parallel=5)
| 40.967742
| 145
| 0.711024
| 156
| 1,270
| 5.352564
| 0.301282
| 0.095808
| 0.081437
| 0.11497
| 0.653892
| 0.613174
| 0.613174
| 0.555689
| 0.431138
| 0.354491
| 0
| 0.05482
| 0.166929
| 1,270
| 30
| 146
| 42.333333
| 0.734405
| 0.499213
| 0
| 0
| 0
| 0.083333
| 0.322581
| 0.301613
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dec0b14005ec6feafc62d8f18253556640fa35db
| 145,150
|
py
|
Python
|
py/countdowntourney.py
|
elocemearg/atropine
|
894010bcc89d4e6962cf3fc15ef526068c38898d
|
[
"CC-BY-4.0"
] | null | null | null |
py/countdowntourney.py
|
elocemearg/atropine
|
894010bcc89d4e6962cf3fc15ef526068c38898d
|
[
"CC-BY-4.0"
] | null | null | null |
py/countdowntourney.py
|
elocemearg/atropine
|
894010bcc89d4e6962cf3fc15ef526068c38898d
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/python3
import sys
import sqlite3;
import re;
import os;
import random
import qualification
from cttable import CandidateTable, TableVotingGroup, PhantomTableVotingGroup
import cttable
SW_VERSION_SPLIT = (1, 1, 4)
SW_VERSION = ".".join([str(x) for x in SW_VERSION_SPLIT])
EARLIEST_COMPATIBLE_DB_VERSION = (0, 7, 0)
RANK_WINS_POINTS = 0;
RANK_POINTS = 1;
RANK_WINS_SPREAD = 2;
RATINGS_MANUAL = 0
RATINGS_GRADUATED = 1
RATINGS_UNIFORM = 2
CONTROL_NUMBER = 1
CONTROL_CHECKBOX = 2
UPLOAD_FAIL_TYPE_HTTP = 1
UPLOAD_FAIL_TYPE_REJECTED = 2
LOG_TYPE_NEW_RESULT = 1
LOG_TYPE_CORRECTION = 2
LOG_TYPE_COMMENT = 96
LOG_TYPE_COMMENT_VIDEPRINTER_FLAG = 1
LOG_TYPE_COMMENT_WEB_FLAG = 4
teleost_modes = [
{
"id" : "TELEOST_MODE_AUTO",
"name" : "Auto",
"desc" : "Automatic control. This will show Fixtures at the start of a round, Standings/Videprinter during the round, and Standings/Table Results when all games in the round have been played.",
"menuorder" : 0,
"image" : "/images/screenthumbs/auto.png",
"fetch" : [ "all" ]
},
{
"id" : "TELEOST_MODE_STANDINGS",
"name" : "Standings",
"desc" : "The current standings table and nothing else.",
"image" : "/images/screenthumbs/standings_only.png",
"menuorder" : 5,
"fetch" : [ "standings" ]
},
{
"id" : "TELEOST_MODE_STANDINGS_VIDEPRINTER",
"name" : "Standings / Videprinter",
"desc" : "Standings table with latest results appearing in the lower third of the screen.",
"image" : "/images/screenthumbs/standings_videprinter.png",
"menuorder" : 1,
"fetch" : [ "standings", "logs" ]
},
{
"id" : "TELEOST_MODE_STANDINGS_RESULTS",
"name" : "Standings / Table Results",
"desc" : "Standings table with the current round's fixtures and results cycling on the lower third of the screen.",
"image" : "/images/screenthumbs/standings_results.png",
"menuorder" : 2,
"fetch" : [ "standings", "games" ]
},
{
"id" : "TELEOST_MODE_TECHNICAL_DIFFICULTIES",
"name" : "Technical Difficulties",
"desc" : "Ceci n'est pas un probleme technique.",
"image" : "/images/screenthumbs/technical_difficulties.png",
"menuorder" : 10,
"fetch" : []
},
{
"id" : "TELEOST_MODE_FIXTURES",
"name" : "Fixtures",
"desc" : "Table of all fixtures in the next or current round.",
"image" : "/images/screenthumbs/fixtures.png",
"menuorder" : 3,
"fetch" : [ "games" ]
},
{
"id" : "TELEOST_MODE_TABLE_NUMBER_INDEX",
"name" : "Table Number Index",
"desc" : "A list of all the player names and their table numbers, in alphabetical order of player name.",
"image" : "/images/screenthumbs/table_index.png",
"menuorder" : 4,
"fetch" : [ "games" ]
},
{
"id" : "TELEOST_MODE_OVERACHIEVERS",
"name" : "Overachievers",
"desc" : "Table of players ranked by how highly they finish above their seeding position. This is only relevant if the players have different ratings.",
"image" : "/images/screenthumbs/overachievers.png",
"menuorder" : 6,
"fetch" : [ "overachievers" ]
},
{
"id" : "TELEOST_MODE_TUFF_LUCK",
"name" : "Tuff Luck",
"desc" : "Players who have lost three or more games, ordered by the sum of their three lowest losing margins.",
"image" : "/images/screenthumbs/tuff_luck.png",
"menuorder" : 7,
"fetch" : [ "tuffluck" ]
},
{
"id" : "TELEOST_MODE_HIGH_SCORES",
"name" : "High scores",
"desc" : "Highest winning scores, losing scores and combined scores in all heat games.",
"image" : "/images/screenthumbs/high_scores.jpg",
"menuorder" : 8,
"fetch" : [ "highscores" ]
}
#{
# "id" : "TELEOST_MODE_FASTEST_FINISHERS",
# "name" : "Fastest Finishers",
# "desc" : "A cheeky way to highlight which tables are taking too long to finish their games.",
# "image" : "/images/screenthumbs/placeholder.png",
# "menuorder" : 9,
# "fetch" : []
#}
#,{
# "id" : "TELEOST_MODE_CLOCK",
# "name" : "Clock",
# "desc" : "For some reason.",
# "image" : "/images/screenthumbs/placeholder.png",
# "menuorder" : 10,
# "fetch" : []
#}
]
teleost_mode_id_to_num = dict()
for idx in range(len(teleost_modes)):
teleost_modes[idx]["num"] = idx
teleost_mode_id_to_num[teleost_modes[idx]["id"]] = idx
teleost_per_view_option_list = [
(teleost_mode_id_to_num["TELEOST_MODE_AUTO"], "autousetableindex", CONTROL_CHECKBOX, "$CONTROL Show name-to-table index at start of round", 0),
(teleost_mode_id_to_num["TELEOST_MODE_AUTO"], "autocurrentroundmusthavegamesinalldivisions", CONTROL_CHECKBOX, "$CONTROL Only switch to Fixtures display after fixtures are generated for all divisions", 1),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS"], "standings_only_lines", CONTROL_NUMBER, "Players per page", 12),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS"], "standings_only_scroll", CONTROL_NUMBER, "Page scroll interval $CONTROL seconds", 12),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_VIDEPRINTER"], "standings_videprinter_standings_lines", CONTROL_NUMBER, "Players per page", 8),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_VIDEPRINTER"], "standings_videprinter_standings_scroll", CONTROL_NUMBER, "Page scroll interval $CONTROL seconds", 10),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_VIDEPRINTER"], "standings_videprinter_spell_big_scores", CONTROL_CHECKBOX, "$CONTROL Videprinter: repeat unbelievably high scores in words", 0),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_VIDEPRINTER"], "standings_videprinter_big_score_min", CONTROL_NUMBER, "$INDENT An unbelievably high score is $CONTROL or more", 90),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_RESULTS"], "standings_results_standings_lines", CONTROL_NUMBER, "Players per standings page", 8),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_RESULTS"], "standings_results_standings_scroll", CONTROL_NUMBER, "Standings scroll interval $CONTROL seconds", 10),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_RESULTS"], "standings_results_results_lines", CONTROL_NUMBER, "Number of results per page", 3),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_RESULTS"], "standings_results_results_scroll", CONTROL_NUMBER, "Results scroll interval $CONTROL seconds", 5),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_RESULTS"], "standings_results_show_unstarted_round_if_single_game", CONTROL_CHECKBOX, "$CONTROL Show unstarted next round if it only has one game", 1),
(teleost_mode_id_to_num["TELEOST_MODE_FIXTURES"], "fixtures_lines", CONTROL_NUMBER, "Lines per page", 12),
(teleost_mode_id_to_num["TELEOST_MODE_FIXTURES"], "fixtures_scroll", CONTROL_NUMBER, "Page scroll interval $CONTROL seconds", 10),
(teleost_mode_id_to_num["TELEOST_MODE_TABLE_NUMBER_INDEX"], "table_index_rows", CONTROL_NUMBER, "Rows per page $CONTROL", 12),
(teleost_mode_id_to_num["TELEOST_MODE_TABLE_NUMBER_INDEX"], "table_index_columns", CONTROL_NUMBER, "Columns per page", 2),
(teleost_mode_id_to_num["TELEOST_MODE_TABLE_NUMBER_INDEX"], "table_index_scroll", CONTROL_NUMBER, "Page scroll interval $CONTROL seconds", 12)
]
create_tables_sql = """
begin transaction;
-- PLAYER table
create table if not exists player (
id integer primary key autoincrement,
name text,
rating float,
team_id int,
short_name text,
withdrawn int not null default 0,
division int not null default 0,
division_fixed int not null default 0,
avoid_prune int not null default 0,
require_accessible_table int not null default 0,
preferred_table int not null default -1,
unique(name), unique(short_name)
);
-- TEAM table
create table if not exists team (
id integer primary key autoincrement,
name text,
colour int,
unique(name)
);
insert into team(name, colour) values('White', 255 * 256 * 256 + 255 * 256 + 255);
insert into team(name, colour) values('Blue', 128 * 256 + 255);
-- GAME table, containing scheduled games and played games
create table if not exists game (
round_no int,
seq int,
table_no int,
division int,
game_type text,
p1 integer,
p1_score integer,
p2 integer,
p2_score integer,
tiebreak int,
unique(round_no, seq)
);
-- game log, never deleted from
create table if not exists game_log (
seq integer primary key autoincrement,
ts text,
round_no int,
round_seq int,
table_no int,
division int,
game_type text,
p1 integer,
p1_score int,
p2 integer,
p2_score int,
tiebreak int,
log_type int,
comment text default null
);
-- Games where we don't yet know who the players are going to be, but we
-- do know it's going to be "winner of this match versus winner of that match".
create table if not exists game_pending (
round_no int,
seq int,
seat int,
winner int,
from_round_no int,
from_seq int,
unique(round_no, seq, seat)
);
-- options, such as what to sort players by, how to decide fixtures, etc
create table if not exists options (
name text primary key,
value text
);
-- metadata for per-view options in teleost (values stored in "options" above)
create table if not exists teleost_options (
mode int,
seq int,
name text primary key,
control_type int,
desc text,
default_value text,
unique(mode, seq)
);
-- Table in which we persist the HTML form settings given to a fixture
-- generator
create table if not exists fixgen_settings (
fixgen text,
name text,
value text
);
-- Round names. When a fixture generator generates some fixtures, it will
-- probably create a new round. This is always given a number, but it can
-- also be given a name, e.g. "Quarter-finals". The "round type" column is
-- no longer used.
create table if not exists rounds (
id integer primary key,
type text,
name text
);
create view if not exists rounds_derived as
select r.id,
case when r.name is not null and r.name != '' then r.name
when gc.qf = gc.total then 'Quarter-finals'
when gc.sf = gc.total then 'Semi-finals'
when gc.f = gc.total then 'Final'
when gc.tp = gc.total then 'Third Place'
when gc.f + gc.tp = gc.total then 'Final & Third Place'
else 'Round ' || cast(r.id as text) end as name
from rounds r,
(select g.round_no,
sum(case when g.game_type = 'QF' then 1 else 0 end) qf,
sum(case when g.game_type = 'SF' then 1 else 0 end) sf,
sum(case when g.game_type = '3P' then 1 else 0 end) tp,
sum(case when g.game_type = 'F' then 1 else 0 end) f,
sum(case when g.game_type = 'N' then 1 else 0 end) n,
sum(case when g.game_type = 'P' then 1 else 0 end) p,
count(*) total
from game g
group by g.round_no) gc
where gc.round_no = r.id;
create view if not exists completed_game as
select * from game
where p1_score is not null and p2_score is not null;
create view if not exists completed_heat_game as
select * from game
where p1_score is not null and p2_score is not null and game_type = 'P';
create view if not exists game_divided as
select round_no, seq, table_no, game_type, p1 p_id, p1_score p_score,
p2 opp_id, p2_score opp_score, tiebreak
from game
union all
select round_no, seq, table_no, game_type, p2 p_id, p2_score p_score,
p1 opp_id, p1_score opp_score, tiebreak
from game;
create view if not exists heat_game_divided as
select * from game_divided where game_type = 'P';
create view if not exists player_wins as
select p.id, sum(case when g.p_id is null then 0
when g.p_score is null or g.opp_score is null then 0
when g.p_score == 0 and g.opp_score == 0 and g.tiebreak then 0
when g.p_score > g.opp_score then 1
else 0 end) wins
from player p left outer join heat_game_divided g on p.id = g.p_id
group by p.id;
create view if not exists player_draws as
select p.id, sum(case when g.p_id is null then 0
when g.p_score is null or g.opp_score is null then 0
when g.p_score == 0 and g.opp_score == 0 and g.tiebreak then 0
when g.p_score == g.opp_score then 1
else 0 end) draws
from player p left outer join heat_game_divided g on p.id = g.p_id
group by p.id;
create view if not exists player_points as
select p.id, sum(case when g.p_score is null then 0
when g.tiebreak and g.p_score > g.opp_score
then g.opp_score
else g.p_score end) points
from player p left outer join heat_game_divided g on p.id = g.p_id
group by p.id;
create view if not exists player_points_against as
select p.id, sum(case when g.opp_score is null then 0
when g.tiebreak and g.opp_score > g.p_score
then g.p_score
else g.opp_score end) points_against
from player p left outer join heat_game_divided g on p.id = g.p_id
group by p.id;
create view if not exists player_played as
select p.id, sum(case when g.p_score is not null and g.opp_score is not null then 1 else 0 end) played
from player p left outer join heat_game_divided g on p.id = g.p_id
group by p.id;
create view if not exists player_played_first as
select p.id, count(g.p1) played_first
from player p left outer join completed_heat_game g on p.id = g.p1
group by p.id;
create table final_game_types(game_type text, power int);
insert into final_game_types values ('QF', 2), ('SF', 1), ('F', 0);
create view if not exists player_finals_results as
select p.id, coalesce(gd.game_type, gt.game_type) game_type,
case when gd.p_score is null then '-'
when gd.p_score > gd.opp_score then 'W'
when gd.p_score = gd.opp_score then 'D'
else 'L'
end result
from player p, final_game_types gt
left outer join game_divided gd on p.id = gd.p_id
and (gd.game_type = gt.game_type or (gt.game_type = 'F' and gd.game_type = '3P'));
create view if not exists player_finals_form as
select p.id, coalesce(pfr_qf.result, '-') qf,
coalesce(pfr_sf.result, '-') sf,
case when pfr_f.result is null then '-'
when pfr_f.game_type = '3P' then lower(pfr_f.result)
else pfr_f.result end f
from player p
left outer join player_finals_results pfr_qf on p.id = pfr_qf.id and pfr_qf.game_type = 'QF'
left outer join player_finals_results pfr_sf on p.id = pfr_sf.id and pfr_sf.game_type = 'SF'
left outer join player_finals_results pfr_f on p.id = pfr_f.id and pfr_f.game_type in ('3P', 'F')
group by p.id;
create view if not exists player_standings as
select p.id, p.name, p.division, played.played, wins.wins, draws.draws,
points.points, points_against.points_against, ppf.played_first,
pff.qf || pff.sf || upper(pff.f) finals_form,
case when pff.f = '-' then 0
else
case when pff.qf = 'W' then 48
when pff.qf = 'D' then 32
when pff.qf = 'L' then 16
else case when pff.sf != '-' or pff.f != '-' then 48 else 0 end
end +
case when pff.sf = 'W' then 12
when pff.sf = 'D' then 8
when pff.sf = 'L' then 4
-- If you're playing in a third place match then you're considered
-- to have lost the nonexistent semi-final. If you're playing in a
-- final then you're considered to have won the semi-final.
else case when pff.f in ('w', 'd', 'l') then 4
when pff.f in ('W', 'D', 'L') then 12
else 0 end
end +
case when pff.f = 'W' then 3
when pff.f = 'D' then 2
when pff.f = 'L' then 1
else 0
end
end finals_points
from player p, player_wins wins, player_draws draws, player_played played,
player_points points, player_points_against points_against,
player_played_first ppf, player_finals_form pff
where p.id = wins.id
and p.id = played.id
and p.id = points.id
and p.id = draws.id
and p.id = points_against.id
and p.id = ppf.id
and p.id = pff.id;
-- Tables for controlling the display system Teleost
create table if not exists teleost(current_mode int);
delete from teleost;
insert into teleost values(0);
create table if not exists teleost_modes(num int, name text, desc text);
create table if not exists tr_opts (
bonus float,
rating_diff_cap float
);
delete from tr_opts;
insert into tr_opts (bonus, rating_diff_cap) values (50, 40);
-- View for working out tournament ratings
-- For each game, you get 50 + your opponent's rating if you win,
-- your opponent's rating if you draw, and your opponent's rating - 50 if
-- you lost. For the purpose of this calculation, your opponent's rating
-- is your opponent's rating at the start of the tourney, except where that
-- is more than 40 away from your own, in which case it's your rating +40 or
-- -40 as appropriate.
-- The 50 and 40 are configurable, in the tr_opts table.
create view tournament_rating as
select p.id, p.name,
avg(case when hgd.p_score > hgd.opp_score then rel_ratings.opp_rating + tr_opts.bonus
when hgd.p_score = hgd.opp_score then rel_ratings.opp_rating
else rel_ratings.opp_rating - tr_opts.bonus end) tournament_rating
from player p, heat_game_divided hgd on p.id = hgd.p_id,
(select me.id p_id, you.id opp_id,
case when you.rating < me.rating - tr_opts.rating_diff_cap
then me.rating - tr_opts.rating_diff_cap
when you.rating > me.rating + tr_opts.rating_diff_cap
then me.rating + tr_opts.rating_diff_cap
else you.rating end opp_rating
from player me, player you, tr_opts) rel_ratings
on rel_ratings.p_id = p.id and hgd.opp_id = rel_ratings.opp_id,
tr_opts
where hgd.p_score is not null and hgd.opp_score is not null
group by p.id, p.name;
-- Table for information about tables (boards). The special table_no -1 means
-- the default settings for tables. So if table -1 is marked as accessible
-- that means every table not listed is considered to be accessible.
create table board (
table_no integer primary key,
accessible integer not null
);
-- By default, if a board isn't listed in this table then it isn't accessible.
insert into board (table_no, accessible) values (-1, 0);
-- Log any failures to upload updates
create table if not exists upload_error_log (
ts text,
failure_type int,
message text
);
-- Time of last successful upload
create table if not exists upload_success (
ts text
);
insert into upload_success values (null);
commit;
""";
class TourneyException(Exception):
def __init__(self, description=None):
if description:
self.description = description;
class TourneyInProgressException(TourneyException):
description = "Tournament is in progress."
pass;
class PlayerDoesNotExistException(TourneyException):
description = "Player does not exist."
pass;
class PlayerExistsException(TourneyException):
description = "Player already exists."
pass;
class DuplicatePlayerException(TourneyException):
description = "No two players are allowed to have the same name."
pass
class UnknownRankMethodException(TourneyException):
description = "Unknown ranking method."
pass;
class DBNameExistsException(TourneyException):
description = "Tourney name already exists."
pass;
class DBNameDoesNotExistException(TourneyException):
description = "No tourney by that name exists."
pass;
class InvalidDBNameException(TourneyException):
description = "Invalid tourney name."
pass;
class InvalidRatingException(TourneyException):
description = "Invalid rating. Rating must be an integer."
pass;
class TooManyPlayersException(TourneyException):
description = "You've got too many players. Turf some out onto the street."
pass
class IncompleteRatingsException(TourneyException):
description = "Incomplete ratings - specify ratings for nobody or everybody."
pass;
class InvalidDivisionNumberException(TourneyException):
description = "Invalid division number"
pass
class InvalidPlayerNameException(TourneyException):
description = "A player's name is not allowed to be blank or consist entirely of whitespace."
class InvalidTableSizeException(TourneyException):
description = "Invalid table size - number of players per table must be 2 or 3."
pass;
class FixtureGeneratorException(TourneyException):
description = "Failed to generate fixtures."
pass;
class PlayerNotInGameException(TourneyException):
description = "That player is not in that game."
pass;
class NotMostRecentRoundException(TourneyException):
description = "That is not the most recent round."
pass
class NoGamesException(TourneyException):
description = "No games have been played."
pass
class IllegalDivisionException(TourneyException):
description = "Cannot distribute players into the specified number of divisions in the way you have asked, either because there aren't enough players, or the number of players in a division cannot be set to the requested multiple."
pass
class DBVersionMismatchException(TourneyException):
description = "This tourney database file was created with a version of atropine which is not compatible with the one you're using."
pass
class InvalidEntryException(TourneyException):
description = "Result entry is not valid."
pass
class QualificationTimeoutException(TourneyException):
description = "In calculating the standings table, we took too long to work out which players, if any, have qualified for the final. This may be due to an unusually large number of players, or an unusual tournament setup. In this case it is strongly recommended go to General Setup and disable qualification analysis by setting the number of places in the qualification zone to zero."
pass
class InvalidDateException(TourneyException):
def __init__(self, reason):
self.description = reason
def get_teleost_mode_services_to_fetch(mode):
if mode < 0 or mode >= len(teleost_modes):
return [ "all" ]
else:
return teleost_modes[mode]["fetch"]
class Player(object):
def __init__(self, name, rating=0, team=None, short_name=None, withdrawn=False, division=0, division_fixed=False, player_id=None, avoid_prune=False, require_accessible_table=False, preferred_table=None):
self.name = name;
self.rating = rating;
self.team = team;
self.withdrawn = bool(withdrawn)
if short_name:
self.short_name = short_name
else:
self.short_name = name
self.division = division
# If true, player has been manually put in this division rather than
# happened to fall into it because of their rating
self.division_fixed = division_fixed
self.player_id = player_id
self.avoid_prune = avoid_prune
self.require_accessible_table = require_accessible_table
self.preferred_table = preferred_table
def __eq__(self, other):
if other is None:
return False;
elif self.name == other.name:
return True;
else:
return False;
def __ne__(self, other):
return not(self.__eq__(other));
# Emulate a 3-tuple
def __len__(self):
return 3;
def __getitem__(self, key):
return [self.name, self.rating, self.division][key];
def __str__(self):
return self.name;
def is_player_known(self):
return True;
def is_pending(self):
return False;
def is_withdrawn(self):
return self.withdrawn
def make_dict(self):
return {
"name" : self.name,
"rating" : self.rating
};
def get_name(self):
return self.name;
def get_rating(self):
return self.rating
def get_id(self):
return self.player_id
def get_team_colour_tuple(self):
if self.team:
return self.team.get_colour_tuple()
else:
return None
def get_team(self):
return self.team
def get_team_id(self):
if self.team:
return self.team.get_id()
else:
return None
def get_short_name(self):
return self.short_name
def get_division(self):
return self.division
def is_division_fixed(self):
return self.division_fixed
def is_avoiding_prune(self):
return self.avoid_prune
def is_requiring_accessible_table(self):
return self.require_accessible_table
def get_preferred_table(self):
if self.preferred_table is None or self.preferred_table < 0:
return None
else:
return self.preferred_table
def get_first_name(name):
return name.split(" ", 1)[0]
def get_first_name_and_last_initial(name):
names = name.split(" ", 1)
if len(names) < 2 or len(names[1]) < 1:
return get_first_name(name)
else:
return names[0] + " " + names[1][0]
def get_short_name(name, player_names):
short_name = get_first_name(name)
for op in player_names:
if name != op and short_name == get_first_name(op):
break
else:
return short_name
short_name = get_first_name_and_last_initial(name)
for op in player_names:
if name != op and short_name == get_first_name_and_last_initial(op):
break
else:
return short_name
return name
# When we submit a player list to a new tournament, set_players() takes a list
# of these objects.
class EnteredPlayer(object):
def __init__(self, name, rating, division=0, team_id=None,
avoid_prune=False, withdrawn=False,
requires_accessible_table=False, preferred_table=None):
self.name = name.strip()
self.short_name = self.name
self.rating = rating
self.division = division
self.team_id = team_id
self.avoid_prune = avoid_prune
self.withdrawn = withdrawn
self.requires_accessible_table = requires_accessible_table
self.preferred_table = preferred_table
def get_name(self):
return self.name
def get_rating(self):
return self.rating
def set_rating(self, rating):
self.rating = rating
def set_short_name(self, short_name):
self.short_name = short_name
def get_short_name(self):
return self.short_name
def get_division(self):
return self.division
def get_team_id(self):
return self.team_id
def get_avoid_prune(self):
return self.avoid_prune
def get_withdrawn(self):
return self.withdrawn
def get_requires_accessible_table(self):
return self.requires_accessible_table
def get_preferred_table(self):
return self.preferred_table
# This object can be on one side and/or other of a Game, just like a Player.
# However, it does not represent a player. It represents the winner or loser
# of another specific game yet to be played.
class PlayerPending(object):
def __init__(self, round_no, round_seq, winner=True, round_short_name=None):
self.round_no = round_no;
self.round_seq = round_seq;
self.winner = winner;
self.round_short_name = round_short_name if round_short_name else ("R%d" % self.round_no)
def __eq__(self, other):
if other is None:
return False;
elif self.round_no == other.round_no and self.round_seq == other.round_seq and self.winner == other.winner:
return True;
else:
return False;
def __len__(self):
return 3;
def __getitem__(self, key):
return [None, 0, 0][key];
def is_player_known(self):
return False;
def is_pending(self):
return True;
def make_dict(self):
return {
"round" : self.round_no,
"round_seq" : self.round_seq,
"winner" : self.winner,
"round_short_name" : self.round_short_name
};
@staticmethod
def from_dict(d):
return PlayerPending(d["round"], d["round_seq"], d["winner"], d["round_short_name"]);
def get_name(self):
return None;
def __str__(self):
if self.round_short_name is None:
return "%s of R%d.%d" % ("Winner" if self.winner else "Loser", self.round_no, self.round_seq);
else:
return "%s of %s.%d" % ("Winner" if self.winner else "Loser", self.round_short_name, self.round_seq);
def get_pending_game_details(self):
return (self.round_no, self.round_seq, self.winner);
# COLIN Hangover 2015: each player is assigned a team
class Team(object):
def __init__(self, team_id, team_name, colour=0xffffff):
self.team_id = team_id;
self.name = team_name;
self.colour = colour;
def get_name(self):
return self.name
def get_id(self):
return self.team_id
def get_hex_colour(self):
return "%06x" % (self.colour)
def get_colour_tuple(self):
return ((self.colour >> 16) & 0xff, (self.colour >> 8) & 0xff, self.colour & 0xff)
class StandingsRow(object):
def __init__(self, position, name, played, wins, points, draws, spread, played_first, rating, tournament_rating, withdrawn, finals_form, finals_points):
self.position = position
self.name = name
self.played = played
self.wins = wins
self.points = points
self.draws = draws
self.spread = spread
self.played_first = played_first
self.rating = rating
self.tournament_rating = tournament_rating
self.withdrawn = withdrawn
self.qualified = False
self.finals_form = finals_form
self.finals_points = finals_points
def __str__(self):
return "%3d. %-25s %3dw %3dd %4dp%s" % (self.position, self.name, self.wins, self.draws, self.points, " (W)" if self.withdrawn else "")
# Emulate a list for bits of the code that require it
def __len__(self):
return 8
def __getitem__(self, index):
return [self.position, self.name, self.played, self.wins, self.points, self.draws, self.spread, self.played_first][index]
def is_qualified(self):
return self.qualified
class Game(object):
def __init__(self, round_no, seq, table_no, division, game_type, p1, p2, s1=None, s2=None, tb=False):
self.round_no = round_no;
self.seq = seq;
self.table_no = table_no;
self.division = division
self.game_type = game_type;
self.p1 = p1;
self.p2 = p2;
self.s1 = s1;
self.s2 = s2;
self.tb = tb;
def is_complete(self):
if self.s1 is not None and self.s2 is not None:
return True;
else:
return False;
def are_players_known(self):
if self.p1.is_player_known() and self.p2.is_player_known():
return True;
else:
return False;
def get_team_colours(self):
return [self.p1.get_team_colour_tuple(), self.p2.get_team_colour_tuple()]
def contains_player(self, player):
if self.p1 == player or self.p2 == player:
return True;
else:
return False;
def is_tiebreak(self):
return self.tb
def get_score(self):
return (self.s1, self.s2)
def __str__(self):
if self.is_complete():
return "Round %d, %s, Table %d, %s %s %s" % (self.round_no, get_general_division_name(self.division), self.table_no, str(self.p1), self.format_score(), str(self.p2));
else:
return "Round %d, %s, Table %d, %s v %s" % (self.round_no, get_general_division_name(self.division), self.table_no, str(self.p1), str(self.p2));
def get_short_string(self):
if self.is_complete():
return "%s %s %s" % (str(self.p1), self.format_score(), str(self.p2))
else:
return "%s v %s" % (str(self.p1), str(self.p2))
def make_dict(self):
names = self.get_player_names();
if self.p1.is_pending():
p1pending = self.p1.make_dict();
else:
p1pending = None;
if self.p2.is_pending():
p2pending = self.p2.make_dict();
else:
p2pending = None;
return {
"round_no" : self.round_no,
"round_seq" : self.seq,
"table_no" : self.table_no,
"division" : self.division,
"game_type" : self.game_type,
"p1" : names[0],
"p2" : names[1],
"p1pending" : p1pending,
"p2pending" : p2pending,
"s1" : self.s1,
"s2" : self.s2,
"tb" : self.tb
};
def is_between_names(self, name1, name2):
if not self.p1.is_player_known() or not self.p2.is_player_known():
return False;
(pname1, pname2) = self.get_player_names();
if (pname1 == name1 and pname2 == name2) or (pname1 == name2 and pname2 == name1):
return True;
else:
return False;
def get_players(self):
return [ self.p1, self.p2 ]
def get_player_names(self):
return [self.p1.get_name(), self.p2.get_name()];
def get_short_player_names(self):
return [self.p1.get_short_name(), self.p2.get_short_name()]
def get_player_score(self, player):
if self.p1.is_player_known() and self.p1 == player:
score = self.s1;
elif self.p2.is_player_known() and self.p2 == player:
score = self.s2;
else:
raise PlayerNotInGameException("player %s is not in the game between %s and %s." % (str(player), str(self.p1), str(self.p2)));
return score;
def get_player_name_score(self, player_name):
if self.p1.is_player_known() and (self.p1.get_name().lower() == player_name.lower() or self.p1.get_name() == player_name):
return self.s1
elif self.p2.is_player_known() and (self.p2.get_name().lower() == player_name.lower() or self.p2.get_name() == player_name):
return self.s2
else:
raise PlayerNotInGameException("Player %s not in the game between %s and %s." % (str(player_name), str(self.p1), str(self.p2)))
def get_opponent_score(self, player):
if self.p1 == player:
score = self.s2;
elif self.p2 == player:
score = self.s1;
else:
raise PlayerNotInGameException("player %s is not in the game between %s and %s." % (str(player), str(self.p1), str(self.p2)));
return score;
def set_player_score(self, player, score):
if self.p1 == player:
self.s1 = score;
elif self.p2 == player:
self.s2 = score;
else:
raise PlayerNotInGameException("player %s is not in the game between %s and %s." % (str(player), str(self.p1), str(self.p2)));
def set_tiebreak(self, tb):
self.tb = tb;
def set_score(self, s1, s2, tb):
self.s1 = s1;
self.s2 = s2;
self.tb = tb;
def get_round_no(self):
return self.round_no
def get_division(self):
return self.division
def get_table_no(self):
return self.table_no
def get_round_seq(self):
return self.seq
def get_game_type(self):
return self.game_type
def format_score(self):
if self.s1 is None and self.s2 is None:
return "";
if self.s1 is None:
left = "";
else:
left = str(self.s1);
if self.s2 is None:
right = "";
else:
right = str(self.s2);
if self.tb:
if self.s1 == 0 and self.s2 == 0:
left = "X"
right = "X"
elif self.s1 > self.s2:
left += "*";
else:
right += "*";
return left + " - " + right;
def is_double_loss(self):
if self.s1 is not None and self.s2 is not None and self.s1 == 0 and self.s2 == 0 and self.tb:
return True
else:
return False
# Emulate a list of values
def __len__(self):
return 10;
def __getitem__(self, key):
return [self.round_no, self.seq, self.table_no, self.division, self.game_type, str(self.p1), self.s1, str(self.p2), self.s2, self.tb ][key];
def get_general_division_name(num):
if num < 0:
return "Invalid division number %d" % (num)
elif num > 25:
return "Division %d" % (num + 1)
else:
return "Division %s" % (chr(ord('A') + num))
def get_general_short_division_name(num):
if num < 0:
return ""
elif num > 25:
return int(num + 1)
else:
return chr(ord('A') + num)
class TeleostOption(object):
def __init__(self, mode, seq, name, control_type, desc, value):
self.mode = mode
self.seq = seq
self.name = name
self.control_type = control_type
self.desc = desc
self.value = value
class Tourney(object):
def __init__(self, filename, tourney_name, versioncheck=True):
self.filename = filename;
self.name = tourney_name;
self.db = sqlite3.connect(filename);
if versioncheck:
cur = self.db.cursor()
cur.execute("select value from options where name = 'atropineversion'")
row = cur.fetchone()
if row is None:
raise DBVersionMismatchException("This tourney database file was created by an atropine version prior to 0.7.0. It's not compatible with this version of atropine.")
else:
version = row[0]
version_split = version.split(".")
if len(version_split) != 3:
raise DBVersionMismatchException("This tourney database has an invalid version number %s." % (version))
else:
try:
version_split = list(map(int, version_split))
except ValueError:
raise DBVersionMismatchException("This tourney database has an invalid version number %s." % (version))
if tuple(version_split) < EARLIEST_COMPATIBLE_DB_VERSION:
raise DBVersionMismatchException("This tourney database was created with atropine version %s, which is not compatible with this version of atropine (%s)" % (version, SW_VERSION))
self.db_version = tuple(version_split)
else:
self.db_version = (0, 0, 0)
if self.db_version > (0,8,0):
self.round_view_name = "rounds_derived"
else:
self.round_view_name = "rounds"
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
def get_name(self):
return self.name
def get_full_name(self):
return self.get_attribute("fullname", self.name)
def set_full_name(self, name):
self.set_attribute("fullname", name)
def get_venue(self):
return self.get_attribute("venue", "")
def set_venue(self, venue):
self.set_attribute("venue", venue)
def get_event_date(self):
date_str = self.get_attribute("eventdate", None)
if not date_str:
return (None, None, None)
else:
fields = date_str.split("-")
if len(fields) != 3:
return (None, None, None)
try:
return tuple([int(x) for x in fields])
except ValueError:
return (None, None, None)
def get_event_date_string(self):
(year, month, day) = self.get_event_date()
if not day or not month or not year:
return None
else:
return "%04d-%02d-%02d" % (year, month, day)
def check_date(self, year, month, day):
if month < 1 or month > 12:
raise InvalidDateException("Invalid date: %d is not a valid month." % (month))
if year < 1 or year > 9999:
raise InvalidDateException("Invalid date: year %04d is out of range." % (year))
if day < 1:
raise InvalidDateException("Invalid date: day of month %d is out of range." % (day))
leap = (year % 4 == 0 and not (year % 100 == 0 and year % 400 != 0))
if month == 2:
day_max = 29 if leap else 28
elif month in (4, 6, 9, 11):
day_max = 30
else:
day_max = 31
if day > day_max:
raise InvalidDateException("Invalid date: day of month %d is out of range for month %d." % (day, month))
def set_event_date(self, year, month, day):
if not year or not month or not day:
self.set_attribute("eventdate", "")
else:
self.check_date(year, month, day)
self.set_attribute("eventdate", "%04d-%02d-%02d" % (year, month, day))
def get_db_version(self):
return ".".join([str(x) for x in self.db_version])
def get_software_version(self):
return get_software_version()
# Number of games in the GAME table - that is, number of games played
# or in progress.
def get_num_games(self):
cur = self.db.cursor();
cur.execute("select count(*) from game");
row = cur.fetchone();
count = row[0];
cur.close();
return count;
def get_next_free_table_number_in_round(self, round_no):
cur = self.db.cursor()
cur.execute("select max(table_no) from game g where g.round_no = ?", (round_no,))
row = cur.fetchone()
if row is None or row[0] is None:
next_table_no = 1
else:
next_table_no = row[0] + 1
cur.close()
return next_table_no
def get_next_free_seq_number_in_round(self, round_no):
cur = self.db.cursor()
cur.execute("select max(seq) from game g where g.round_no = ?", (round_no,))
row = cur.fetchone()
if row is None or row[0] is None:
next_seq_no = 1
else:
next_seq_no = row[0] + 1
cur.close()
return next_seq_no
def get_next_free_round_number_for_division(self, div):
cur = self.db.cursor()
cur.execute("select max(round_no) from game g where g.division = ?", (div,))
row = cur.fetchone()
if row is None or row[0] is None:
round_no = 1
else:
round_no = row[0] + 1
cur.close()
return round_no
def get_round_name(self, round_no):
cur = self.db.cursor();
cur.execute("select name from " + self.round_view_name + " where id = ?", (round_no,));
row = cur.fetchone();
if not row:
cur.close();
return None;
else:
cur.close();
return row[0];
def get_short_round_name(self, round_no):
cur = self.db.cursor();
cur.execute("select cast(id as text) short_name from rounds where id = ?", (round_no,));
row = cur.fetchone();
if not row:
cur.close();
return None;
else:
cur.close();
return row[0];
def get_rounds(self):
cur = self.db.cursor();
cur.execute("select g.round_no, r.name from game g left outer join " +
self.round_view_name + " r on g.round_no = r.id group by g.round_no");
rounds = [];
for row in cur:
rdict = dict();
if not row[1]:
rdict["name"] = "Round " + str(row[0]);
else:
rdict["name"] = row[1];
rdict["num"] = row[0];
rounds.append(rdict);
cur.close();
return rounds;
def get_round(self, round_no):
cur = self.db.cursor();
cur.execute("select r.id, r.name from " + self.round_view_name + " r where id = ?", (round_no,));
row = cur.fetchone()
d = None
if row is not None:
d = dict()
d["num"] = row[0]
d["name"] = row[1]
cur.close()
return d
def name_round(self, round_no, round_name):
# Does round_no already exist?
cur = self.db.cursor();
cur.execute("select id from rounds where id = ?", (round_no,));
rows = cur.fetchall();
if len(rows) > 0:
cur.close();
cur = self.db.cursor();
cur.execute("update rounds set name = ?, type = null where id = ?", (round_name, round_no));
else:
cur.close();
cur = self.db.cursor();
cur.execute("insert into rounds(id, name, type) values (?, ?, null)", (round_no, round_name));
self.db.commit();
cur.close()
def get_largest_table_game_count(self, round_no):
cur = self.db.cursor()
cur.execute("select max(num_games) from (select table_no, count(*) num_games from game where round_no = ? group by table_no) x", (round_no,))
result = cur.fetchone()
if result[0] is None:
count = 0
else:
count = int(result[0])
self.db.commit()
cur.close()
return count;
def player_name_exists(self, name):
cur = self.db.cursor()
cur.execute("select count(*) from player where lower(name) = ? or name = ?", (name.lower(), name))
row = cur.fetchone()
if row[0]:
cur.close()
return True
else:
cur.close()
return False
def set_player_avoid_prune(self, name, value):
if self.db_version < (0, 7, 7):
return
cur = self.db.cursor()
cur.execute("update player set avoid_prune = ? where lower(name) = ? or name = ?", (1 if value else 0, name.lower(), name))
cur.close()
self.db.commit()
def get_player_avoid_prune(self, name):
if self.db_version < (0, 7, 7):
return False
cur = self.db.cursor()
cur.execute("select avoid_prune from player where lower(name) = ? or name = ?", (name.lower(), name))
row = cur.fetchone()
if row:
retval = bool(row[0])
else:
raise PlayerDoesNotExistException("Can't get whether player \"%s\" is allowed to play prunes because there is no player with that name." % (name))
cur.close()
self.db.commit()
return retval
def add_player(self, name, rating, division=0):
if self.player_name_exists(name):
raise PlayerExistsException("Can't add player \"%s\" because there is already a player with that name." % (name))
cur = self.db.cursor()
cur.execute("insert into player(name, rating, team_id, short_name, withdrawn, division, division_fixed) values(?, ?, ?, ?, ?, ?, ?)",
(name, rating, None, "", 0, division, 0))
cur.close()
self.db.commit()
# Recalculate everyone's short names
cur = self.db.cursor()
players = self.get_players()
for p in players:
short_name = get_short_name(p.get_name(), [ x.get_name() for x in players ])
cur.execute("update player set short_name = ? where (lower(name) = ? or name = ?)", (short_name, p.get_name().lower(), p.get_name()))
self.db.commit()
# players must be a list of EnteredPlayer objects.
# This function removes any players currently registered.
def set_players(self, players, auto_rating_behaviour=RATINGS_UNIFORM):
# If there are any games, in this tournament, it's too late to
# replace the player list. You can, however, withdraw players or
# add individual players.
if self.get_num_games() > 0:
raise TourneyInProgressException("Replacing the player list is not permitted once the tournament has started.");
# Make sure no player names are blank
for p in players:
if not p.get_name():
raise InvalidPlayerNameException()
# Make sure all the player names are case-insensitively unique
for pi in range(len(players)):
for opi in range(pi + 1, len(players)):
if players[pi].get_name().lower() == players[opi].get_name().lower():
raise DuplicatePlayerException("No two players are allowed to have the same name, and you've got more than one %s." % (players[pi].get_name()))
teams = self.get_teams()
team_ids = [t.get_id() for t in teams]
# Make sure for each player that if they're on a team, that team
# exists
for p in players:
team = p.get_team_id()
if team is not None and team not in team_ids:
raise InvalidTeamException("Player \"%s\" is being assigned to a team with an invalid or nonexistent number.\n" % (p.get_name()))
# For each player, work out a "short name", which will be the first
# of their first name, first name and last initial, and full name,
# which is unique for that player.
for p in players:
p.set_short_name(get_short_name(p.get_name(), [ x.get_name() for x in players]))
# Check the ratings, if given, are sane
new_players = [];
for p in players:
if p.get_division() < 0:
raise InvalidDivisionNumberException("Player \"%s\" has been given a division number of %d. It's not allowed to be negative." % (p[0], p[3]))
if p.get_rating() is not None:
rating = p.get_rating()
if rating != 0 and auto_rating_behaviour != RATINGS_MANUAL:
# Can't specify any non-zero ratings if automatic
# rating is enabled.
raise InvalidRatingException("Player \"%s\" has been given a rating (%g) but you have not selected manual rating. If manual rating is not used, players may not be given manual ratings in the initial player list except a rating of 0 to indicate a prune or bye." % (p.get_name(), rating))
else:
if auto_rating_behaviour == RATINGS_MANUAL:
# Can't have unrated players if automatic rating
# has been disabled.
raise InvalidRatingException("Player \"%s\" does not have a rating. If manual rating is selected, all players must be given a rating." % (p.get_name()))
if auto_rating_behaviour != RATINGS_MANUAL:
if auto_rating_behaviour == RATINGS_GRADUATED:
max_rating = 2000
min_rating = 1000
else:
max_rating = 1000
min_rating = 1000
new_players = [];
rating = max_rating;
num_unrated_players = len([x for x in players if x.get_rating() is None])
num_players_given_auto_rating = 0
if max_rating != min_rating and num_unrated_players > max_rating - min_rating:
raise TooManyPlayersException("I don't know what kind of crazy-ass tournament you're running here, but it appears to have more than %d players in it. Automatic rating isn't going to work, and to be honest I'd be surprised if anything else did." % num_unrated_players)
for p in players:
if num_unrated_players == 1:
rating = max_rating
else:
rating = float(max_rating - num_players_given_auto_rating * (max_rating - min_rating) / (num_unrated_players - 1))
rating = round(rating, 2)
if p.get_rating() is None:
p.set_rating(rating)
num_players_given_auto_rating += 1
self.set_attribute("autoratingbehaviour", auto_rating_behaviour);
self.db.execute("delete from player");
self.db.executemany("insert into player(name, rating, team_id, short_name, withdrawn, division, division_fixed, avoid_prune, require_accessible_table, preferred_table) values (?, ?, ?, ?, ?, ?, 0, ?, ?, ?)",
[ (p.get_name(), p.get_rating(), p.get_team_id(),
p.get_short_name(), int(p.get_withdrawn()),
p.get_division(), int(p.get_avoid_prune()),
int(p.get_requires_accessible_table()),
int(p.get_preferred_table()) if p.get_preferred_table() is not None else -1) for p in players ]);
self.db.commit();
def get_auto_rating_behaviour(self):
return self.get_int_attribute("autoratingbehaviour", RATINGS_UNIFORM)
def get_active_players(self):
# Return the list of players in the tournament who are not marked
# as withdrawn.
return self.get_players(exclude_withdrawn=True)
def get_withdrawn_players(self):
return [x for x in self.get_players() if x.withdrawn]
def get_players(self, exclude_withdrawn=False):
cur = self.db.cursor();
if self.db_version < (0, 7, 7):
avoid_prune_value = "0"
else:
avoid_prune_value = "p.avoid_prune"
if self.db_version < (1, 0, 4):
accessible_value = "0"
else:
accessible_value = "p.require_accessible_table"
if self.db_version < (1, 0, 5):
preferred_table_value = "-1"
else:
preferred_table_value = "p.preferred_table"
if exclude_withdrawn:
condition = "where p.withdrawn = 0"
else:
condition = ""
cur.execute("select p.name, p.rating, t.id, t.name, t.colour, p.short_name, p.withdrawn, p.division, p.division_fixed, p.id, %s, %s, %s from player p left outer join team t on p.team_id = t.id %s order by p.rating desc, p.name" % (avoid_prune_value, accessible_value, preferred_table_value, condition))
players = [];
for row in cur:
if row[2] is not None:
team = Team(row[2], row[3], row[4])
else:
team = None
players.append(Player(row[0], row[1], team, row[5], bool(row[6]), row[7], row[8], row[9], row[10], row[11], row[12]));
cur.close();
return players;
def rerate_player(self, name, rating):
try:
rating = float(rating)
except ValueError:
raise InvalidRatingException("Cannot set %s's rating - invalid rating." % name);
cur = self.db.cursor();
cur.execute("update player set rating = ? where (lower(name) = ? or name = ?)", (rating, name.lower(), name));
if cur.rowcount < 1:
self.db.rollback();
raise PlayerDoesNotExistException("Cannot change the rating of player \"" + name + "\" because no player by that name exists.");
cur.close();
self.db.commit();
def rename_player(self, oldname, newname):
newname = newname.strip();
if newname == "":
raise InvalidPlayerNameException()
if self.player_name_exists(newname):
raise PlayerExistsException("Cannot rename player \"%s\" to \"%s\" because there's already another player with that name." % (oldname, newname));
cur = self.db.cursor();
cur.execute("update player set name = ? where (lower(name) = ? or name = ?)", (newname, oldname.lower(), oldname));
if cur.rowcount < 1:
self.db.rollback();
raise PlayerDoesNotExistException("Cannot rename player \"" + oldname + "\" because no player by that name exists.");
cur.close();
# Recalculate everyone's short names, because this name change might
# mean that short names are no longer unique
cur = self.db.cursor()
players = self.get_players()
for p in players:
short_name = get_short_name(p.get_name(), [ x.get_name() for x in players ])
cur.execute("update player set short_name = ? where (lower(name) = ? or name = ?)", (short_name, p.get_name().lower(), p.get_name()))
cur.close()
self.db.commit();
def set_player_division(self, player_name, new_division):
cur = self.db.cursor()
cur.execute("update player set division = ? where (lower(name) = ? or name = ?)", (new_division, player_name.lower(), player_name))
cur.close()
self.db.commit()
# Put each player in a division. The active players are split into
# num_divisions divisions, each of which must have a multiple of
# division_size_multiple players. Names listed as strings in
# automatic_top_div_players are put in the top division. Beyond that,
# players are distributed among the divisions so as to make their sizes
# as equal as possible, while still preserving that the size of every
# division must be a multiple of division_size_multiple.
def set_player_divisions(self, num_divisions, division_size_multiple, by_rating=True, automatic_top_div_players=[]):
players = self.get_players(exclude_withdrawn=True)
# Make a player_ranks map. Players with lower numbers go in higher
# divisions. This may be derived from the player's rating (in which
# case we need to negate it so highly-rated players go in higher
# divisions) or from the player's position in the standings.
player_ranks = dict()
if by_rating:
for p in self.get_players(exclude_withdrawn=False):
player_ranks[p.get_name()] = -p.get_rating()
else:
for s in self.get_standings():
player_ranks[s.name] = s.position
if len(players) % division_size_multiple != 0:
raise IllegalDivisionException()
div_players = [ [] for i in range(num_divisions) ]
remaining_players = []
for p in players:
if p.get_name() in automatic_top_div_players:
div_players[0].append(p)
else:
remaining_players.append(p)
remaining_players = sorted(remaining_players, key=lambda x : player_ranks[x.get_name()]);
# Number of players in the top division is at least
# num_players / num_divisions rounded up to the nearest multiple of
# division_size_multiple.
players_in_div = len(players) // num_divisions
if players_in_div % division_size_multiple > 0:
players_in_div += division_size_multiple - (players_in_div % division_size_multiple)
max_tables_in_div = (len(players) // division_size_multiple) // num_divisions
if (len(players) // division_size_multiple) % num_divisions > 0:
max_tables_in_div += 1
while len(div_players[0]) < players_in_div:
div_players[0].append(remaining_players[0])
remaining_players = remaining_players[1:]
# If division 1 now has an illegal number of players, which is possible
# if, for example, there are 64 players in total but 21 players have
# opted in to division 1, add enough players to satisfy the multiple.
if len(div_players[0]) % division_size_multiple > 0:
num_to_add = division_size_multiple - (len(div_players[0]) % division_size_multiple)
div_players[0] += remaining_players[0:num_to_add]
remaining_players = remaining_players[num_to_add:]
# Sanity check that we've got the right number of players left
if len(remaining_players) % division_size_multiple != 0:
raise IllegalDivisionException()
# Number of tables in total
num_tables = len(players) // division_size_multiple
# If we need an unequal number of players in each division, make
# sure the top divisions get more players
if num_tables % num_divisions > 0 and len(div_players[0]) < max_tables_in_div * division_size_multiple:
# Add another table to division 1
div_players[0] += remaining_players[0:division_size_multiple]
remaining_players = remaining_players[division_size_multiple:]
if num_divisions > 1:
# Distribute the remaining players among the remaining divisions as
# evenly as possible while keeping the size of each division a
# multiple of division_size_multiple.
if len(remaining_players) < division_size_multiple * (num_divisions - 1):
raise ImpossibleDivisionException()
# Number of tables in the divisions after division 1
num_tables = len(remaining_players) // division_size_multiple
# Distribute players amongst divisions, and if we have to have some
# divisions larger than others, make it the higher divisions.
for division in range(1, num_divisions):
div_players[division] += remaining_players[0:((num_tables // (num_divisions - 1)) * division_size_multiple)]
remaining_players = remaining_players[((num_tables // (num_divisions - 1)) * division_size_multiple):]
if num_tables % (num_divisions - 1) >= division:
# This division needs an extra tablesworth
div_players[division] += remaining_players[0:division_size_multiple]
remaining_players = remaining_players[division_size_multiple:]
# Finally, take the withdrawn players, which we haven't put into any
# division, and put them into the division appropriate for their rank.
div_rank_ranges = []
for div_index in range(num_divisions):
div_rank_ranges.append(
(min(player_ranks[x.get_name()] for x in div_players[div_index]),
max(player_ranks[x.get_name()] for x in div_players[div_index])
))
withdrawn_players = [x for x in self.get_players(exclude_withdrawn=False) if x.is_withdrawn()]
for p in withdrawn_players:
for div in range(num_divisions):
if div == num_divisions - 1 or player_ranks[p.get_name()] <= div_rank_ranges[div][1]:
div_players[div].append(p)
break
sql_params = []
division = 0
for l in div_players:
for p in l:
sql_params.append((division, int(p.get_name() in automatic_top_div_players), p.get_name().lower(), p.get_name()))
division += 1
cur = self.db.cursor()
cur.executemany("update player set division = ?, division_fixed = ? where (lower(name) = ? or name = ?)", sql_params)
cur.close()
self.db.commit()
def set_player_withdrawn(self, name, withdrawn):
withdrawn = bool(withdrawn)
cur = self.db.cursor()
cur.execute("update player set withdrawn = ? where name = ?", (1 if withdrawn else 0, name))
if cur.rowcount < 1:
self.db.rollback()
raise PlayerDoesNotExistException("Cannot change withdrawn status for player \"%s\" because no player by that name exists." % (name))
cur.close()
self.db.commit()
def withdraw_player(self, name):
# Set a player as withdrawn, so that the player is not included in the
# player list supplied to the fixture generator for future rounds.
self.set_player_withdrawn(name, 1)
def unwithdraw_player(self, name):
# Change a players withdrawn status to 0
self.set_player_withdrawn(name, 0)
def set_player_requires_accessible_table(self, name, value):
if self.db_version < (1,0,4):
return
cur = self.db.cursor()
cur.execute("update player set require_accessible_table = ? where name = ?", (value, name))
cur.close()
self.db.commit()
def get_player_requires_accessible_table(self, name):
if self.db_version < (1,0,4):
return False
cur = self.db.cursor()
cur.execute("select require_accessible_table from player where name = ?", (name,))
row = cur.fetchone()
if row is None:
raise PlayerDoesNotExistException()
retval = (row[0] != 0)
cur.close()
return retval
def set_player_preferred_table(self, name, value):
if self.db_version < (1, 0, 5):
return
cur = self.db.cursor()
cur.execute("update player set preferred_table = ? where name = ?", (value if value is not None else -1, name))
cur.close()
self.db.commit()
def get_player_preferred_table(self, name):
if self.db_version < (1, 0, 5):
return None
cur = self.db.cursor()
cur.execute("select preferred_table from player where name = ?", (name,))
row = cur.fetchone()
if row is None:
raise PlayerDoesNotExistException()
retval = row[0]
cur.close()
if retval is not None and retval < 0:
retval = None
return retval
def get_player_name(self, player_id):
cur = self.db.cursor();
cur.execute("select name from player where id = ?", (player_id,));
rows = cur.fetchall();
if len(rows) < 1:
raise PlayerDoesNotExistException();
cur.close();
self.db.commit();
return rows[0];
def get_player_tournament_rating(self, name):
cur = self.db.cursor()
cur.execute("select tournament_rating from tournament_rating where (lower(name) = ? or name = ?)", (name.lower(), name))
row = cur.fetchone()
if row is None:
raise PlayerDoesNotExistException()
tournament_rating = row[0]
cur.close()
return tournament_rating
def get_tournament_rating_bonus_value(self):
cur = self.db.cursor()
cur.execute("select bonus from tr_opts")
row = cur.fetchone()
if row is None:
bonus = 50
else:
bonus = row[0]
cur.close()
return bonus
def get_tournament_rating_diff_cap(self):
cur = self.db.cursor()
cur.execute("select rating_diff_cap from tr_opts")
row = cur.fetchone()
if row is None:
diff_cap = 40
else:
diff_cap = row[0]
cur.close()
return diff_cap
def set_tournament_rating_config(self, bonus=50, diff_cap=40):
cur = self.db.cursor()
cur.execute("update tr_opts set bonus = ?, rating_diff_cap = ?", (bonus, diff_cap))
cur.close()
self.db.commit()
def get_show_tournament_rating_column(self):
return bool(self.get_int_attribute("showtournamentratingcolumn", 0))
def set_show_tournament_rating_column(self, value):
self.set_attribute("showtournamentratingcolumn", str(int(value)))
# games is a list of tuples:
# (round_no, seq, table_no, game_type, name1, score1, name2, score2, tiebreak)
def merge_games(self, games):
try:
known_games = [x for x in games if x.are_players_known()];
pending_games = [x for x in games if not x.are_players_known()];
# Records to insert into game_staging, where we use NULL if the
# player isn't known yet
game_records = [(x.round_no, x.seq, x.table_no,
x.division, x.game_type,
x.p1.name if x.p1.is_player_known() else None, x.s1,
x.p2.name if x.p2.is_player_known() else None, x.s2,
x.tb) for x in games];
cur = self.db.cursor();
cur.execute("""create temporary table if not exists game_staging(
round_no int, seq int, table_no int, division int,
game_type text, name1 text, score1 integer,
name2 text, score2 integer, tiebreak integer)""");
cur.execute("""create temporary table if not exists game_staging_ids(
round_no int, seq int, table_no int, division int,
game_type text, p1 integer, score1 integer,
p2 integer, score2 integer, tiebreak integer)""");
cur.execute("""create temporary table if not exists game_pending_staging(
round_no int, seq int, seat int, player_id int)""");
cur.execute("delete from temp.game_staging");
cur.execute("delete from temp.game_staging_ids");
cur.execute("delete from temp.game_pending_staging");
cur.executemany("insert into temp.game_staging values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", game_records);
cur.execute("""insert into temp.game_staging_ids
select g.round_no, g.seq, g.table_no, g.division, g.game_type,
p1.id, g.score1, p2.id, g.score2, g.tiebreak
from temp.game_staging g left outer join player p1
on g.name1 = p1.name left outer join player p2
on g.name2 = p2.name""");
#where g.name1 = p1.name and g.name2 = p2.name""");
cur.execute("select count(*) from temp.game_staging_ids")
results = cur.fetchone()
# Remove any rows that are already in GAME
cur.execute("""delete from temp.game_staging_ids
where exists(select * from game g where
g.round_no = game_staging_ids.round_no and
g.seq = game_staging_ids.seq and
g.table_no = game_staging_ids.table_no and
g.division = game_staging_ids.division and
g.game_type = game_staging_ids.game_type and
g.p1 = game_staging_ids.p1 and
g.p1_score is game_staging_ids.score1 and
g.p2 = game_staging_ids.p2 and
g.p2_score is game_staging_ids.score2 and
g.tiebreak is game_staging_ids.tiebreak)""");
# Write "new result" logs for rows that don't have a matching
# entry in GAME for (round_no, table_no, game_type, p1, p2)
# with a non-NULL score but the entry we're writing has a
# non-NULL score.
cur.execute("""insert into game_log(
ts, round_no, round_seq, table_no, division, game_type,
p1, p1_score, p2, p2_score, tiebreak, log_type)
select current_timestamp, round_no, seq, table_no, division,
game_type, p1, score1, p2, score2, tiebreak, 1
from temp.game_staging_ids gs
where score1 is not null and score2 is not null and
p1 is not null and p2 is not null and
not exists(select * from game g where
g.round_no = gs.round_no and
g.seq = gs.seq and
g.table_no = gs.table_no and
g.division = gs.division and
g.game_type = gs.game_type and
g.p1 = gs.p1 and
g.p2 = gs.p2 and
g.p1_score is not null and
g.p2_score is not null)""");
# And write "correction" logs for rows that do have a matching
# entry in game for (round_no, table_no, game_type, p1, p2)
# with a non-NULL score.
cur.execute("""insert into game_log(
ts, round_no, round_seq, table_no, division, game_type,
p1, p1_score, p2, p2_score, tiebreak, log_type)
select current_timestamp, round_no, seq, table_no, division,
game_type, p1, score1, p2, score2, tiebreak, 2
from temp.game_staging_ids gs
where p1 is not null and p2 is not null and
exists(select * from game g where
g.round_no = gs.round_no and
g.seq = gs.seq and
g.table_no = gs.table_no and
g.division = gs.division and
g.game_type = gs.game_type and
g.p1 = gs.p1 and
g.p2 = gs.p2 and
g.p1_score is not null and
g.p2_score is not null)""");
# Insert rows into game if they're not there already
cur.execute("""insert or replace into game(
round_no, seq, table_no, division, game_type,
p1, p1_score, p2, p2_score, tiebreak)
select * from temp.game_staging_ids""");
# Insert into GAME_PENDING any sides of a game where the player
# is not yet known
pending_games_records = [];
for g in pending_games:
if not g.p1.is_player_known():
pending_games_records.append((g.round_no, g.seq, 1, g.p1.winner, g.p1.round_no, g.p1.round_seq));
if not g.p2.is_player_known():
pending_games_records.append((g.round_no, g.seq, 2, g.p2.winner, g.p2.round_no, g.p2.round_seq));
cur.executemany("""insert or replace into
game_pending
values (?, ?, ?, ?, ?, ?)""",
pending_games_records);
# If we inserted any rows into GAME whose (round_no, round_seq)
# corresponds to (from_round_no, from_round_seq) in GAME_PENDING,
# it means that we can fill in one or more unknown players in
# GAME. For example, if we inserted the result for a semi-final,
# then we might now be able to fill in the player ID for one side
# of the final.
cur.execute("""insert into temp.game_pending_staging
select gp.round_no, gp.seq, gp.seat,
case when gp.winner = 1 and gsi.score1 > gsi.score2
then gsi.p1
when gp.winner = 1 and gsi.score2 > gsi.score1
then gsi.p2
when gp.winner = 0 and gsi.score1 > gsi.score2
then gsi.p2
when gp.winner = 0 and gsi.score2 > gsi.score1
then gsi.p1
else NULL
end player_id
from game_staging_ids gsi, game_pending gp
on gsi.round_no = gp.from_round_no and
gsi.seq = gp.from_seq""");
cur.execute("select * from temp.game_pending_staging");
updcur = self.db.cursor();
for row in cur:
(round_no, seq, seat, player_id) = row;
updcur.execute("update game set p%d = ? where round_no = ? and seq = ? and p1_score is NULL and p2_score is NULL" % (seat), (player_id, round_no, seq));
self.db.commit();
except:
self.db.rollback();
raise;
def post_news_item(self, round_no, text, post_to_videprinter, post_to_web):
if self.db_version >= (1, 0, 6):
cur = self.db.cursor()
log_type = LOG_TYPE_COMMENT
if post_to_videprinter:
log_type |= LOG_TYPE_COMMENT_VIDEPRINTER_FLAG
if post_to_web:
log_type |= LOG_TYPE_COMMENT_WEB_FLAG
cur.execute("""insert into game_log (ts, round_no, round_seq,
table_no, division, game_type, p1, p1_score, p2, p2_score,
tiebreak, log_type, comment) values (
current_timestamp, ?, null,
null, null, null, null, null, null, null,
null, ?, ?)""", (round_no, log_type, text))
cur.close()
self.db.commit()
def edit_news_item(self, seq, new_text, post_to_videprinter, post_to_web):
if self.db_version >= (1, 0, 6):
cur = self.db.cursor()
log_type = LOG_TYPE_COMMENT
if post_to_videprinter:
log_type |= LOG_TYPE_COMMENT_VIDEPRINTER_FLAG
if post_to_web:
log_type |= LOG_TYPE_COMMENT_WEB_FLAG
cur.execute("update game_log set comment = ?, log_type = ? where seq = ? and (log_type & ?) != 0", (new_text, log_type, seq, LOG_TYPE_COMMENT))
cur.close()
self.db.commit()
def delete_round_div(self, round_no, division):
try:
cur = self.db.cursor()
cur.execute("delete from game where round_no = ? and division = ?", (round_no, division))
num_deleted = cur.rowcount
cur.execute("select count(*) from game where round_no = ?", (round_no,))
row = cur.fetchone()
games_left_in_round = -1
if row is not None and row[0] is not None:
games_left_in_round = row[0]
if games_left_in_round == 0:
cur.execute("delete from rounds where id = ?", (round_no,))
cur.close()
self.db.commit()
return num_deleted
except:
self.db.rollback()
raise
def delete_round(self, round_no):
latest_round_no = self.get_latest_round_no();
if latest_round_no is None:
raise NoGamesException()
if latest_round_no != round_no:
raise NotMostRecentRoundException()
try:
cur = self.db.cursor()
cur.execute("delete from game where round_no = ?", (latest_round_no,))
cur.execute("delete from rounds where id = ?", (latest_round_no,))
self.db.commit()
except:
self.db.rollback()
raise
def alter_games(self, alterations):
# alterations is (round_no, seq, p1, p2, game_type)
# but we want (p1name, p2name, game_type, round_no, seq) for feeding
# into the executemany() call.
alterations_reordered = [(x[2].get_name().lower(), x[2].get_name(), x[3].get_name().lower(), x[3].get_name(), x[4], x[0], x[1]) for x in alterations];
cur = self.db.cursor();
cur.executemany("""
update game
set p1 = (select id from player where (lower(name) = ? or name = ?)),
p2 = (select id from player where (lower(name) = ? or name = ?)),
game_type = ?
where round_no = ? and seq = ?""", alterations_reordered);
rows_updated = cur.rowcount;
cur.close();
self.db.commit();
return rows_updated;
def get_player_from_name(self, name):
sql = "select p.name, p.rating, t.id, t.name, t.colour, p.short_name, p.withdrawn, p.division, p.division_fixed, p.id, %s, %s, %s from player p left outer join team t on p.team_id = t.id where (lower(p.name) = ? or p.name = ?)" % (
"0" if self.db_version < (0, 7, 7) else "p.avoid_prune",
"0" if self.db_version < (1, 0, 4) else "p.require_accessible_table",
"-1" if self.db_version < (1, 0, 5) else "p.preferred_table"
);
cur = self.db.cursor();
cur.execute(sql, (name.lower(), name));
row = cur.fetchone();
cur.close();
if row is None:
raise PlayerDoesNotExistException("Player with name \"%s\" does not exist." % name);
else:
if row[2] is not None:
team = Team(row[2], row[3], row[4])
else:
team = None
return Player(row[0], row[1], team, row[5], row[6], row[7], row[8], row[9], row[10], row[11], row[12]);
def get_player_from_id(self, player_id):
sql = "select p.name, p.rating, t.id, t.name, t.colour, p.short_name, p.withdrawn, p.division, p.division_fixed, %s, %s, %s from player p left outer join team t on p.team_id = t.id where p.id = ?" % (
"0" if self.db_version < (0, 7, 7) else "p.avoid_prune",
"0" if self.db_version < (1, 0, 4) else "p.require_accessible_table",
"-1" if self.db_version < (1, 0, 5) else "p.preferred_table"
);
cur = self.db.cursor();
cur.execute(sql, (player_id,));
row = cur.fetchone();
cur.close();
if row is None:
raise PlayerDoesNotExistException("No player exists with ID %d" % player_id);
else:
if row[2] is None:
team = None
else:
team = Team(row[2], row[3], row[4])
return Player(row[0], row[1], team, row[5], row[6], row[7], row[8], player_id, row[9], row[10], row[11]);
def get_latest_started_round(self):
cur = self.db.cursor()
sql = "select max(r.id) from rounds r where (exists(select * from completed_game cg where cg.round_no = r.id) or r.id = (select min(id) from rounds where id >= 0))"
cur.execute(sql)
row = cur.fetchone()
round_no = None
if row is not None and row[0] is not None:
round_no = row[0]
cur.close()
if round_no is None:
return None
return self.get_round(round_no)
def is_round_finished(self, round_no):
cur = self.db.cursor()
cur.execute("select count(*) from game g where round_no = ?", (round_no,))
row = cur.fetchone()
if row is None or row[0] is None:
num_games = 0
else:
num_games = row[0]
cur.execute("select count(*) from completed_game cg where round_no = ?", (round_no,))
row = cur.fetchone()
if row is None or row[0] is None:
num_completed_games = 0
else:
num_completed_games = row[0]
cur.close()
return (num_games > 0 and num_games == num_completed_games)
def round_contains_games_in_all_divisions(self, round_no):
ret = True
cur = self.db.cursor()
cur.execute("select d.division, count(g.round_no) from (select distinct(division) from player p) d left outer join game g on g.division = d.division and g.round_no = ? group by d.division", (round_no,))
for row in cur:
if row[1] == 0:
# There's at least one division that doesn't have
# games generated for it in this round, so don't
# consider this round to exist yet.
ret = False
break
cur.close()
return ret
def get_current_round(self, round_exists_when_all_divisions_have_games=False):
# Return the latest started round, or if that round is finished and
# there's a next round, the next round.
r = self.get_latest_started_round()
if r is None:
return None
if self.is_round_finished(r["num"]):
cur = self.db.cursor()
cur.execute("select min(id) from rounds where id > ?", (r["num"],))
row = cur.fetchone()
if row is not None and row[0] is not None:
next_round_no = row[0]
else:
next_round_no = None
cur.close()
if next_round_no is not None:
# There is a next round
if round_exists_when_all_divisions_have_games:
# Check that this round has at least one game in every
# division, otherwise we won't count it as a valid round
# because it hasn't been fully generated yet
if not self.round_contains_games_in_all_divisions(next_round_no):
next_round_no = None
if next_round_no is not None:
# The next round has been generated, so use that one
r = self.get_round(next_round_no)
else:
if round_exists_when_all_divisions_have_games:
if not self.round_contains_games_in_all_divisions(r["num"]):
r = None
return r
def get_latest_round_no(self):
cur = self.db.cursor();
cur.execute("select max(id) from rounds");
row = cur.fetchone();
if row is None:
cur.close();
return None;
else:
cur.close();
return row[0];
# Get the latest round number for which there is at least one game in
# this division
def get_latest_round_in_division(self, division):
cur = self.db.cursor()
cur.execute("select max(round_no) from game where division = ?", (division,))
row = cur.fetchone()
latest_round = None
if row is not None and row[0] is not None:
latest_round = row[0]
cur.close()
return latest_round
def get_played_unplayed_counts(self, round_no=None):
cur = self.db.cursor();
params = [];
conditions = "";
if round_no is not None:
conditions += "where round_no = ? ";
params.append(round_no);
sql = "select case when p1_score is NULL or p2_score is NULL then 0 else 1 end complete, count(*) from game " + conditions + " group by 1 order by 1";
if params:
cur.execute(sql, params);
else:
cur.execute(sql);
num_played = 0;
num_unplayed = 0;
for r in cur:
if r[0] == 0:
num_unplayed = r[1];
elif r[0] == 1:
num_played = r[1];
cur.close();
return (num_played, num_unplayed);
def count_games_between(self, p1, p2):
sql = """select count(*) from game g
where g.p1 is not null and g.p2 is not null
and (g.p1 = ? and g.p2 = ?) or (g.p1 = ? and g.p2 = ?)"""
cur = self.db.cursor()
cur.execute(sql, (p1.get_id(), p2.get_id(), p2.get_id(), p1.get_id()))
row = cur.fetchone()
cur.close()
if row and row[0]:
return row[0]
else:
return 0
def get_games_between(self, round_no, player_name_1, player_name_2):
conditions = []
params = []
if round_no is not None:
conditions.append("g.round_no = ?")
params.append(round_no)
conditions.append("(((lower(p1.name) = ? or p1.name = ?) and (lower(p2.name) = ? or p2.name = ?)) or ((lower(p2.name) = ? or p2.name = ?) and (lower(p1.name) = ? or p1.name = ?)))")
params.append(player_name_1.lower())
params.append(player_name_1)
params.append(player_name_2.lower())
params.append(player_name_2)
params.append(player_name_1.lower())
params.append(player_name_1)
params.append(player_name_2.lower())
params.append(player_name_2)
conditions.append("(g.p1 is not null and g.p2 is not null)")
cur = self.db.cursor()
sql = """select g.round_no, g.seq, g.table_no, g.division, g.game_type,
g.p1, g.p1_score, g.p2, g.p2_score, g.tiebreak
from game g, player p1 on g.p1 = p1.id,
player p2 on g.p2 = p2.id
where g.p1 is not null and g.p2 is not null """;
for c in conditions:
sql += " and " + c
sql += "\norder by g.round_no, g.division, g.seq";
if len(params) == 0:
cur.execute(sql)
else:
cur.execute(sql, params)
games = []
for row in cur:
(round_no, game_seq, table_no, division, game_type, p1, p1_score, p2, p2_score, tb) = row
if tb is not None:
if tb:
tb = True
else:
tb = False
p1 = self.get_player_from_id(p1)
p2 = self.get_player_from_id(p2)
game = Game(round_no, game_seq, table_no, division, game_type, p1, p2, p1_score, p2_score, tb)
games.append(game);
cur.close();
self.db.commit();
return games;
def get_games(self, round_no=None, table_no=None, game_type=None, only_players_known=True, division=None, only_unplayed=False):
conditions = [];
params = [];
if round_no is not None:
conditions.append("g.round_no = ?");
params.append(round_no);
if table_no is not None:
conditions.append("g.table_no = ?");
params.append(table_no);
if game_type is not None:
conditions.append("g.game_type = ?");
params.append(game_type);
if only_players_known:
conditions.append("(g.p1 is not null and g.p2 is not null)");
if division is not None:
conditions.append("g.division = ?")
params.append(division)
if only_unplayed:
conditions.append("(g.p1_score is null or g.p2_score is null)")
cur = self.db.cursor();
sql = """select g.round_no, g.seq, g.table_no, g.division, g.game_type,
g.p1, g.p1_score, g.p2, g.p2_score, g.tiebreak,
gp1.winner as seat1_which, gp1.from_round_no as seat1_round_no,
gp1.from_seq seat1_seq,
gp2.winner as seat2_which, gp2.from_round_no as seat2_round_no,
gp2.from_seq as seat2_seq
from game g left outer join game_pending gp1
on g.round_no = gp1.round_no and g.seq = gp1.seq and gp1.seat=1
left outer join game_pending gp2
on g.round_no = gp2.round_no and g.seq = gp2.seq and gp2.seat=2
where 1=1 """;
for c in conditions:
sql += " and " + c;
sql += "\norder by g.round_no, g.division, g.seq";
if len(params) == 0:
cur.execute(sql);
else:
cur.execute(sql, params);
rounds = self.get_rounds();
games = [];
for row in cur:
(round_no, game_seq, table_no, division, game_type, p1, p1_score, p2, p2_score, tb, seat1_which, seat1_round_no, seat1_seq, seat2_which, seat2_round_no, seat2_seq) = row
if tb is not None:
if tb:
tb = True
else:
tb = False
for p_index in (1,2):
if p_index == 1:
p_id = p1;
else:
p_id = p2;
if p_id is None:
if p_index == 1:
winner = bool(seat1_which);
of_round_no = int(seat1_round_no);
of_seq = int(seat1_seq);
else:
winner = bool(seat2_which);
of_round_no = int(seat2_round_no);
of_seq = int(seat2_seq);
short_name = "R" + str(of_round_no)
p = PlayerPending(of_round_no, of_seq, winner, short_name);
else:
p = self.get_player_from_id(p_id);
if p_index == 1:
p1 = p;
else:
p2 = p;
game = Game(round_no, game_seq, table_no, division, game_type, p1, p2, p1_score, p2_score, tb)
games.append(game);
cur.close();
self.db.commit();
return games;
def ranked_query(self, query, sort_cols=[]):
pos = 0;
joint = 0;
cur = self.db.cursor();
cur.execute(query);
prev_sort_vals = None;
results = [];
for row in cur:
if sort_cols:
sort_vals = [];
for c in sort_cols:
sort_vals.append(row[c - 1]);
sort_vals = tuple(sort_vals);
if prev_sort_vals and sort_vals == prev_sort_vals:
joint += 1;
else:
pos += joint + 1;
joint = 0;
prev_sort_vals = sort_vals;
else:
pos += 1;
result = [pos];
for val in row:
result.append(val);
result = tuple(result);
results.append(result);
cur.close();
return results;
def get_int_attribute(self, name, defval=None):
value = self.get_attribute(name, defval);
if value is not None:
value = int(value);
return value;
def get_attribute(self, name, defval=None):
cur = self.db.cursor();
cur.execute("select value from options where name = ?", (name,));
value = cur.fetchone();
if value is None or value[0] is None:
value = defval;
else:
value = str(value[0]);
cur.close();
return value;
def set_attribute(self, name, value):
cur = self.db.cursor();
if re.match("^ *-?[0-9]+ *$", str(value)):
value = int(value);
cur.execute("insert or replace into options values (?, ?)", (name, value));
cur.close();
self.db.commit();
def set_teleost_colour_palette(self, value):
self.set_attribute("teleostcolourpalette", value)
def get_teleost_colour_palette(self):
return self.get_attribute("teleostcolourpalette", "Standard")
def get_auto_use_vertical(self):
return self.get_int_attribute("autousevertical", 0) != 0
def set_auto_use_vertical(self, value):
self.set_attribute("autousevertical", str(int(value)))
def set_teleost_animate_scroll(self, value):
self.set_attribute("teleostanimatescroll", str(int(value)))
def get_teleost_animate_scroll(self):
return self.get_int_attribute("teleostanimatescroll", 1) != 0
def set_auto_use_table_index(self, value):
self.set_attribute("autousetableindex", str(int(value)))
def get_auto_use_table_index(self):
return self.get_int_attribute("autousetableindex", 0) != 0
def set_auto_current_round_must_have_games_in_all_divisions(self, value):
self.set_attribute("autocurrentroundmusthavegamesinalldivisions", str(int(value)))
def get_auto_current_round_must_have_games_in_all_divisions(self):
return self.get_int_attribute("autocurrentroundmusthavegamesinalldivisions", 1) != 0
def get_rank_method(self):
return self.get_int_attribute("rankmethod", RANK_WINS_POINTS);
def is_ranking_by_wins(self):
return self.get_rank_method() in [ RANK_WINS_POINTS, RANK_WINS_SPREAD ]
def is_ranking_by_points(self):
return self.get_rank_method() in [ RANK_WINS_POINTS, RANK_POINTS ]
def is_ranking_by_spread(self):
return self.get_rank_method() == RANK_WINS_SPREAD
def set_rank_method(self, method):
if method not in [RANK_WINS_POINTS, RANK_WINS_SPREAD, RANK_POINTS]:
raise UnknownRankMethodException("Can't rank tourney by method %d because I don't know what that is." % method);
self.set_attribute("rankmethod", method);
def set_table_size(self, table_size):
if table_size not in [2,3]:
raise InvalidTableSizeException("Number of players to a table must be 2 or 3.");
self.set_attribute("tablesize", int(table_size));
def get_table_size(self):
return self.get_int_attribute("tablesize", 3);
def set_show_draws_column(self, value):
self.set_attribute("showdrawscolumn", 1 if value else 0)
def get_show_draws_column(self):
return True if self.get_int_attribute("showdrawscolumn", 0) != 0 else False
def get_num_divisions(self):
cur = self.db.cursor()
cur.execute("select max(division) + 1 from player")
row = cur.fetchone()
value = row[0]
if value is None:
value = 1
cur.close()
return value
def get_num_active_players(self, div_index=None):
cur = self.db.cursor()
if div_index is not None:
cur.execute("select count(*) from player where division = %d and withdrawn = 0" % (div_index))
else:
cur.execute("select count(*) from player where withdrawn = 0")
row = cur.fetchone()
value = int(row[0])
cur.close()
return value
def get_num_active_players_requiring_accessible_table(self):
if self.db_version < (1, 0, 4):
return 0
cur = self.db.cursor()
cur.execute("select count(*) from player where require_accessible_table != 0 and withdrawn = 0")
row = cur.fetchone()
if row and row[0] is not None:
count = row[0]
else:
count = 0
cur.close()
return count
def get_division_name(self, num):
name = self.get_attribute("div%d_name" % (num))
if name:
return name
else:
return get_general_division_name(num)
def set_division_name(self, num, name):
self.set_attribute("div%d_name" % (num), name)
def get_short_division_name(self, num):
return get_general_short_division_name(num)
def get_standings(self, division=None, exclude_withdrawn_with_no_games=False, calculate_qualification=True):
method = self.get_rank_method();
if method == RANK_WINS_POINTS:
orderby = "s.wins * 2 + s.draws desc, s.points desc, p.name";
rankcols = [10, 4];
elif method == RANK_WINS_SPREAD:
orderby = "s.wins * 2 + s.draws desc, s.points - s.points_against desc, p.name"
rankcols = [10, 6]
elif method == RANK_POINTS:
orderby = "s.points desc, p.name";
rankcols = [4];
else:
raise UnknownRankMethodException("This tourney's standings are ranked by method %d, which I don't recognise." % method);
# If we're also taking account of any finals matches, then finals
# performance has a higher sorting priority than anything else.
rank_finals = self.get_rank_finals()
if rank_finals:
rankcols = [13] + rankcols
orderby = "13 desc, " + orderby
orderby = "order by " + orderby
conditions = []
if division is not None:
conditions.append("s.division = %d " % (division))
if exclude_withdrawn_with_no_games:
conditions.append("(p.withdrawn = 0 or s.played > 0)")
if conditions:
where_clause = "where " + " and ".join(conditions)
else:
where_clause = ""
results = self.ranked_query("select p.name, s.played, s.wins, s.points, s.draws, s.points - s.points_against spread, s.played_first, p.rating, tr.tournament_rating, s.wins * 2 + s.draws, p.withdrawn, %s, %s from player_standings s, player p on p.id = s.id left outer join tournament_rating tr on tr.id = p.id %s %s " % (
"s.finals_form" if self.db_version >= (1, 0, 7) else "''",
"s.finals_points" if self.db_version >= (1, 0, 7) else "0",
where_clause, orderby), rankcols);
standings = [ StandingsRow(x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], bool(x[11]), x[12], x[13]) for x in results ]
# If anyone has played any finals matches, don't calculate
# qualification because we're already past that and it wouldn't make
# sense anyway.
for s in standings:
if "W" in s.finals_form or "D" in s.finals_form or "L" in s.finals_form:
calculate_qualification = False
break
if division is not None and calculate_qualification:
# If we can, mark already-qualified players as such
qual_places = self.get_int_attribute("div%d_qualplaces" % (division), 0)
last_round = self.get_int_attribute("div%d_lastround" % (division), 0)
all_games_generated = (last_round != 0 and last_round == self.get_latest_round_in_division(division))
num_games_per_player = self.get_int_attribute("div%d_numgamesperplayer" % (division), 0)
draws_expected = self.get_show_draws_column()
if qual_places > 0 and num_games_per_player > 0:
qualification_standings = [
{
"pos" : x.position,
"name" : x.name,
"played" : x.played,
"win_points" : x.wins * 2 + x.draws,
"non_player" : (x.withdrawn or x.rating == 0)
}
for x in standings
]
# Look through the list for any withdrawn players or prunes,
# which will have a non_player value of True. Non-players
# aren't eligible to win anything, so any player ranked
# below a non-player gets bumped up for the purpose of
# deciding qualification.
num_non_players = 0
last_non_player_pos = None
for row in qualification_standings:
if row["non_player"]:
num_non_players += 1
last_non_player_pos = row["pos"]
elif num_non_players > 0:
# Any player below a non-player in the standings
# table gets bumped up one place. If they're below two
# non-players then they get bumped up two places,
# and so on.
if row["pos"] > last_non_player_pos:
row["pos"] -= num_non_players
# Now remove the non-players from the list we'll pass
# to player_has_qualified().
new_qual_standings = []
for row in qualification_standings:
if not row["non_player"]:
new_qual_standings.append(row)
qualification_standings = new_qual_standings
unplayed_games = [ g.get_player_names()
for g in self.get_games(
game_type="P", division=division,
only_unplayed=True
)
]
for row in qualification_standings:
if row["pos"] <= qual_places and method == RANK_WINS_POINTS:
# This player is in the qualification zone - work out if
# they are guaranteed to stay there
try:
qualified = qualification.player_has_qualified(
qualification_standings, row["name"],
unplayed_games, qual_places,
all_games_generated, num_games_per_player,
draws_expected)
except qualification.QualificationTimeoutException:
raise QualificationTimeoutException()
if qualified:
for standings_row in standings:
if standings_row.name == row["name"]:
standings_row.qualified = True
break
return standings
def get_logs_since(self, seq=None, include_new_games=False, round_no=None, maxrows=None):
cur = self.db.cursor();
sql = """select seq, datetime(ts, 'localtime') ts, round_no,
round_seq, table_no, game_type, p1.name p1, p1_score,
p2.name p2, p2_score, tiebreak, log_type, gl.division,
case when exists(
select * from game_log gl2
where gl.round_no = gl2.round_no
and gl.round_seq = gl2.round_seq
and gl.log_type > 0 and gl2.log_type > 0
and gl2.seq > gl.seq
) then 1 else 0 end superseded, %s
from game_log gl left outer join player p1 on gl.p1 = p1.id
left outer join player p2 on gl.p2 = p2.id where 1=1 """ % (
"comment" if self.db_version >= (1, 0, 6) else "null"
);
if seq is not None:
sql += " and seq > ?"
if round_no is not None:
sql += " and round_no = %d" % (round_no)
if not(include_new_games):
sql += " and log_type > 0";
sql += " order by seq desc";
if maxrows:
sql += " limit %d" % (maxrows)
if seq is not None:
cur.execute(sql, (seq,));
else:
cur.execute(sql)
results = cur.fetchall();
cur.close();
return results[::-1]
def get_teleost_modes(self):
cur = self.db.cursor()
cur.execute("select current_mode from teleost")
row = cur.fetchone()
if row is not None:
current_mode = row[0]
else:
current_mode = None
cur.close()
modes = []
for mode in teleost_modes:
mode_copy = mode.copy()
mode_copy["selected"] = False
modes.append(mode_copy)
if current_mode is not None and current_mode >= 0 and current_mode < len(modes):
modes[current_mode]["selected"] = True
return modes
def get_teleost_mode_info(self, mode_index):
if mode_index < 0 or mode_index >= len(teleost_modes):
return None
else:
return teleost_modes[mode_index]
def set_teleost_mode(self, mode):
cur = self.db.cursor();
cur.execute("update teleost set current_mode = ?", (mode,));
cur.close();
self.db.commit();
def define_teleost_modes(self, modes):
# No longer done by Teleost
return
def get_current_teleost_mode(self):
cur = self.db.cursor();
cur.execute("select current_mode from teleost");
row = cur.fetchone();
if row is None:
return teleost_mode_id_to_num.get("TELEOST_MODE_AUTO", 0)
return row[0];
def get_auto_effective_teleost_mode(self):
current_round = self.get_current_round(self.get_auto_current_round_must_have_games_in_all_divisions())
mode_name = None
if not current_round:
# There are no rounds yet, so just default to the standings table
mode_name = "TELEOST_MODE_STANDINGS"
else:
round_no = current_round["num"]
(played, unplayed) = self.get_played_unplayed_counts(round_no=round_no)
if played == 0 and unplayed == 0:
# No games in this round at all, so default to the videprinter
mode_name = "TELEOST_MODE_STANDINGS_VIDEPRINTER"
elif played == 0 and unplayed > 0:
# Fixtures announced, but no games played yet.
# If there is only one game, then show the standings/table
# results screen for this unplayed round, because it's likely
# this is the final and people want to know where they finished
# in the standings, so we don't want to show just the final
# fixture and nothing else.
# If there's more than one game then show the fixture list
# for this round.
if played + unplayed == 1:
mode_name = "TELEOST_MODE_STANDINGS_RESULTS"
elif self.get_auto_use_table_index():
mode_name = "TELEOST_MODE_TABLE_NUMBER_INDEX"
else:
mode_name = "TELEOST_MODE_FIXTURES"
elif played > 0 and unplayed == 0:
# All the games in this round have been played. Switch to the
# standings-and-results screen.
mode_name = "TELEOST_MODE_STANDINGS_RESULTS"
else:
# Otherwise, the round is in progress. Use the standings and
# videprinter display.
mode_name = "TELEOST_MODE_STANDINGS_VIDEPRINTER"
if not mode_name:
# Eh?
mode_name = "TELEOST_MODE_STANDINGS_VIDEPRINTER"
return teleost_mode_id_to_num.get(mode_name, 1)
def get_effective_teleost_mode(self):
# Same as get_current_teleost_mode() except that if it's auto then
# we look at the game state and return which view the display should
# be showing.
mode = self.get_current_teleost_mode();
if mode < 0 or mode >= len(teleost_modes):
return 1
else:
if teleost_modes[mode]["id"] == "TELEOST_MODE_AUTO":
mode = self.get_auto_effective_teleost_mode()
return mode
def is_videprinter_showing(self):
mode = self.get_effective_teleost_mode()
return teleost_modes[mode]["id"] == "TELEOST_MODE_STANDINGS_VIDEPRINTER"
def set_teleost_options(self, options):
# Nope
return
#if self.db_version < (0, 7, 7):
# print self.db_version
# return
#cur = self.db.cursor()
#options_rows = []
#for o in options:
# options_rows.append((o.mode, o.seq, o.name, o.control_type, o.desc, o.value))
# Insert option metadata
#cur.execute("delete from teleost_options")
#cur.executemany("insert into teleost_options(mode, seq, name, control_type, desc, default_value) values (?, ?, ?, ?, ?, ?)", options_rows)
#cur.close()
#self.db.commit()
def get_teleost_options(self, mode=None):
if self.db_version < (0, 7, 7):
return []
options = []
seq = -1
for opt in teleost_per_view_option_list:
seq += 1
cur = self.db.cursor()
if mode is not None and mode != opt[0]:
continue
cur.execute("select value from options where name = ?", (opt[1],))
row = cur.fetchone()
if row is None or row[0] is None:
value = opt[4] # default value
else:
if opt[2] == CONTROL_NUMBER:
value = int(row[0])
else:
value = row[0]
cur.close()
options.append(TeleostOption(
opt[0], # teleost mode
seq,
opt[1], # option name
opt[2], # control type
opt[3], # description
value # effective value
))
#if mode is not None:
# mode_clause = "where telo.mode = %d" % (mode)
#else:
# mode_clause = ""
#cur.execute("select telo.mode, telo.seq, telo.name, telo.control_type, telo.desc, telo.default_value, att.value from teleost_options telo left outer join options att on telo.name = att.name " + mode_clause + " order by telo.mode, telo.seq")
#for row in cur:
# options.append(TeleostOption(int(row[0]), int(row[1]), row[2], row[3], row[4], row[6] if row[6] is not None else row[5]))
#cur.close()
return options
def get_teleost_option_value(self, name):
if self.db_version < (0, 7, 7):
return None
#cur.execute("select telo.default_value, att.value from teleost_options telo left outer join options att on telo.name = att.name where telo.name = ?", (name,))
#row = cur.fetchone()
#value = None
#if row is not None:
# if row[1] is not None:
# value = row[1]
# else:
# value = row[0]
value = self.get_attribute(name, None)
if value is None:
for opt in teleost_per_view_option_list:
if opt[1] == name:
value = opt[4]
break
return value
def set_teleost_option_value(self, name, value):
self.set_attribute(name, value)
def get_num_games_to_play_by_table(self, round_no=None):
sql = """select table_no,
sum(case when p1_score is null and p2_score is null
then 1 else 0 end) games_left
from game""";
if round_no is not None:
sql += " where round_no = %d" % round_no;
sql += " group by table_no";
cur = self.db.cursor();
cur.execute(sql);
d = dict();
for (table, count) in cur:
d[table] = count;
cur.close();
return d;
def get_max_games_per_table(self, round_no=None):
sql = """select max(game_count) from (
select table_no, count(*) game_count
from game""";
if round_no is not None:
sql += " where round_no = %d" % (round_no)
sql += " group by table_no) x"
cur = self.db.cursor()
cur.execute(sql)
row = cur.fetchone()
value = None
if row is not None:
if row[0] is not None:
value = row[0]
cur.close()
return value
def get_latest_game_times_by_table(self, round_no=None):
sql = "select table_no, max(ts) from game_log";
sql += " where log_type = 1";
if round_no is not None:
sql += " and round_no = %d" % round_no;
sql += " group by 1 order by 2";
cur = self.db.cursor();
cur.execute(sql);
d = dict();
for (table, ts) in cur:
d[table] = str(ts);
cur.close();
return d;
def get_teams(self):
sql = "select id, name, colour from team order by id"
cur = self.db.cursor()
cur.execute(sql)
teams = []
for (team_id, team_name, colour) in cur:
teams.append(Team(team_id, team_name, colour))
cur.close()
return teams
def get_team_from_id(self, team_id):
sql = "select id, name, colour from team where id = ?"
cur = self.db.cursor()
cur.execute(sql, (team_id,))
(team_id, team_name, colour) = cur.fetchone();
cur.close()
return Team(team_id, team_name, colour)
def set_player_teams(self, player_teams):
# argument is list of 2-tuples, containing player name and team ID
sql = "update player set team_id = ? where name = ?"
params = []
for pt in player_teams:
params.append((None if pt[1] is None or pt[1] < 0 else pt[1], pt[0]))
self.db.executemany(sql, params)
self.db.commit()
def get_player_teams(self):
sql = "select p.id, t.id from player p left outer join team t on p.team_id = t.id order by p.name"
cur = self.db.cursor()
cur.execute(sql)
player_team_ids = []
for (player_id, team_id) in cur:
player_team_ids.append((player_id, team_id))
cur.close()
player_teams = []
for (p_id, t_id) in player_team_ids:
if t_id is None or t_id < 0:
team = None
else:
team = self.get_team_from_id(t_id)
player = self.get_player_from_id(p_id)
player_teams.append((player, team))
return player_teams
def are_players_assigned_teams(self):
sql = "select count(*) from player where team_id is not null"
cur = self.db.execute(sql)
(num,) = cur.fetchone()
cur.close()
return num > 0
def get_team_scores(self, round_no=None):
sql = """
select t.id, sum(case when p1.team_id != t.id and p2.team_id != t.id then 0
when p1.team_id == p2.team_id then 0
when p1.team_id is null or p2.team_id is null then 0
when p1.team_id = t.id and g.p1_score > g.p2_score then 1
when p2.team_id = t.id and g.p2_score > g.p1_score then 1
else 0 end) score
from team t, game g, player p1, player p2
where g.p1 = p1.id
and g.p2 = p2.id
and g.game_type = 'P'
"""
if round_no is not None:
sql += " and g.round_no = %d" % round_no
sql += " group by t.id order by t.id"
cur = self.db.cursor();
cur.execute(sql)
team_score = []
for (team_id, score) in cur:
team_score.append((self.get_team_from_id(team_id), score))
cur.close()
return team_score
def store_fixgen_settings(self, fixgen_name, settings):
cur = self.db.cursor()
cur.execute("delete from fixgen_settings where fixgen = ?", (fixgen_name,))
rows = []
for name in settings:
rows.append((fixgen_name, name, settings[name]))
cur.executemany("insert into fixgen_settings values (?, ?, ?)", rows)
self.db.commit()
def get_fixgen_settings(self, fixgen_name):
cur = self.db.cursor()
cur.execute("select name, value from fixgen_settings where fixgen = ?", (fixgen_name,))
settings = dict()
for row in cur:
settings[row[0]] = row[1]
self.db.commit()
return settings
def close(self):
self.db.commit();
self.db.close();
def list_occupied_tables_in_round(self, round_no):
table_list = []
cur = self.db.cursor()
cur.execute("select distinct(table_no) from game where round_no = ?", (round_no,))
for row in cur:
if row[0] is not None:
table_list.append(row[0])
cur.close()
return table_list
def get_max_table_number_in_round(self, round_no):
cur = self.db.cursor()
cur.execute("select max(table_no) from game where round_no = ?", (round_no,))
retval = cur.fetchone()[0]
cur.close()
return retval
def get_max_game_seq_in_round(self, round_no):
cur = self.db.cursor()
cur.execute("select max(seq) from game where round_no = ?", (round_no,))
retval = cur.fetchone()[0]
cur.close()
return retval
def list_divisions_playing_in_round(self, round_no):
cur = self.db.cursor()
cur.execute("select distinct(division) from game where round_no = ?", (round_no,))
divs = []
for row in cur:
divs.append(row[0])
cur.close()
return divs
def get_num_active_accessible_players_in_divisions(self, div_set):
if self.db_version < (1, 0, 4) or len(div_set) == 0:
return 0
cur = self.db.cursor()
cur.execute("select count(*) from player where require_accessible_table != 0 and withdrawn = 0 and division in (%s)" % (",".join([str(x) for x in div_set])))
row = cur.fetchone()
if row is None or row[0] is None:
count = 0
else:
count = row[0]
cur.close()
return count
def first_acc_player(self, group):
group_acc_players = [ p for p in group if p.is_requiring_accessible_table() ]
if not group_acc_players:
return ""
else:
return sorted(group_acc_players, key=lambda x : x.get_name())[0].get_name()
# generated_groups is fixgen.GeneratedGroups object
def make_fixtures_from_groups(self, generated_groups):
fixtures = []
num_divisions = self.get_num_divisions()
players = self.get_active_players()
(all_accessible_tables, acc_default) = self.get_accessible_tables()
for rd in generated_groups.get_rounds():
round_no = rd.get_round_no()
# Find out which tables (if any) already have players on, so we
# can avoid giving out those table numbers
occupied_tables = set(self.list_occupied_tables_in_round(round_no))
# Build a list of the remaining players - that is, those players
# who are not in generated_groups and who have not had any games
# generated for them so far this round.
# Also, while we're at it, populate natural_div_to_table numbers
# based on the set of occupied table numbers and the number of
# groups in each division.
remaining_players = players[:]
# remaining_players is all the active players who aren't being
# assigned a game in this round right now.
# Also remove from remaining_players all players who have
# previously been assigned a table in this round. We'll be left
# with the players whose games are yet to be decided, but who
# might want to reserve their favourite table.
games_this_round = self.get_games(round_no=round_no)
for g in games_this_round:
for p in g.get_players():
if p in remaining_players:
remaining_players.remove(p)
start_round_seq = self.get_max_game_seq_in_round(round_no)
if start_round_seq is None:
next_round_seq = 1
else:
next_round_seq = start_round_seq + 1
candidate_tables = cttable.get_candidate_tables(rd, remaining_players, occupied_tables, all_accessible_tables, acc_default)
for ct in candidate_tables:
group_fixtures = self.make_fixtures_from_group(ct.get_group(),
ct.get_round_no(), ct.get_division(),
ct.get_table_no(), next_round_seq, ct.get_game_type(),
ct.get_repeat_threes())
next_round_seq += len(group_fixtures)
fixtures += group_fixtures
return fixtures
def make_fixtures_from_group(self, group, round_no, division, table_no, next_round_seq, game_type, repeat_threes):
group_fixtures = []
round_seq = next_round_seq
if len(group) % 2 == 1:
# If there are an odd number of players on this table, then
# each player takes a turn at hosting, and the player X places
# clockwise from the host plays the player X places
# anticlockwise from the host,
# for X in 1 .. (len(group) - 1) / 2.
for host in range(len(group)):
for x in range(1, (len(group) - 1) // 2 + 1):
left = (host + len(group) + x) % len(group)
right = (host + len(group) - x) % len(group)
p1 = group[left]
p2 = group[right]
fixture = Game(round_no, round_seq, table_no, division, game_type, p1, p2)
group_fixtures.append(fixture)
round_seq += 1
if repeat_threes and len(group) == 3:
fixture = Game(round_no, round_seq, table_no, division, game_type, p2, p1)
group_fixtures.append(fixture)
round_seq += 1
elif len(group) == 4:
# Four players on each table. Don't do the general catch-all
# thing in the next branch, instead show the matches in a
# specific order so that the first two can be played
# simultaneously, then the next two, then the last two.
indices = [ (0,1), (2,3), (0,2), (1,3), (1,2), (3,0) ]
for (x, y) in indices:
fixture = Game(round_no, round_seq, table_no, division, game_type, group[x], group[y])
group_fixtures.append(fixture)
round_seq += 1
else:
# There are an even number of players. Each player X from
# X = 0 .. len(group) - 1 plays each player Y for
# Y in X + 1 .. len(group) - 1
for x in range(len(group)):
for y in range(x + 1, len(group)):
p1 = group[x]
p2 = group[y]
if round_seq % 2 == 0 and len(group) > 2:
(p1, p2) = (p2, p1)
fixture = Game(round_no, round_seq, table_no, division, game_type, p1, p2)
group_fixtures.append(fixture)
round_seq += 1
return group_fixtures
def get_tim_down_award_standings(self, division, num_losing_games):
cur = self.db.cursor()
# Get the set of all players who have lost at least num_losing_games
# games of type P
rows = cur.execute("select p.id, sum(case when (p.id = g.p1 and g.p1_score < g.p2_score) or (p.id = g.p2 and g.p2_score < g.p1_score) then 1 else 0 end) losses from player p, game g where g.game_type = 'P' and p.division = ? and (g.p1 = p.id or g.p2 = p.id) group by p.id", (division,))
eligible_player_ids = set()
for row in rows:
if row[1] >= num_losing_games:
eligible_player_ids.add(row[0])
cur.close()
# Get the list of opponents of these players
p_id_to_opp_list = {}
cur = self.db.cursor()
rows = cur.execute("select p_id, opp_id from heat_game_divided where p_id in (%s) order by p_id, opp_id" % (", ".join([ str(x) for x in eligible_player_ids ])))
for row in rows:
p_id = row[0]
opp_id = row[1]
p_id_to_opp_list[p_id] = p_id_to_opp_list.get(p_id, []) + [opp_id]
cur.close()
# Get the standings table, and for each eligible player, work out the
# average current standings position of their opponents
standings = self.get_standings(division, False, False)
player_name_to_id = {}
for p in self.get_players():
player_name_to_id[p.get_name()] = p.get_id()
p_id_to_standings_pos = {}
for s in standings:
p_id = player_name_to_id.get(s.name)
if p_id is not None:
p_id_to_standings_pos[p_id] = s.position
# For each eligible player, return a tuple containing
# (player object, list of opponent ranks, average opponent ranks)
results = []
for p_id in p_id_to_opp_list:
total_opp_rank = 0
num_opps = 0
rank_list = []
for opp_id in p_id_to_opp_list[p_id]:
pos = p_id_to_standings_pos.get(opp_id)
if pos is not None:
# We only count opponents which are in the current
# division
num_opps += 1
total_opp_rank += pos
rank_list.append(pos)
results.append((self.get_player_from_id(p_id), sorted(rank_list), float(total_opp_rank) / num_opps))
return sorted(results, key=lambda x : x[2])
def get_players_tuff_luck(self, num_losing_games):
p_id_to_losing_margins = dict()
cur = self.db.cursor()
rows = cur.execute("select case when p1_score > p2_score " +
"then p2 else p1 end p_id, " +
"case when tiebreak then 0 else abs(p1_score - p2_score) end margin " +
"from game " +
"where p1_score is not null and p2_score is not null " +
"and p1 is not null and p2 is not null and " +
"p1_score <> p2_score and " +
"game_type = 'P' " +
"order by 1")
for row in rows:
p_id = row[0]
margin = row[1]
p_id_to_losing_margins[p_id] = p_id_to_losing_margins.get(p_id, []) + [margin]
cur.close()
new_margin_map = dict()
for p_id in p_id_to_losing_margins:
# Limit each player to a maximum of num_losing_games, and remove
# from the list any player who has fewer losses than that
margin_list = p_id_to_losing_margins[p_id]
if len(margin_list) >= num_losing_games:
new_margin_map[p_id] = sorted(margin_list)[0:num_losing_games]
p_id_to_losing_margins = new_margin_map
# Return a list of tuples of the form (player, tuffness, margin_list)
tuffness_list = []
for p_id in p_id_to_losing_margins:
margin_list = p_id_to_losing_margins[p_id]
p = self.get_player_from_id(p_id)
if p:
tuffness_list.append((p, sum(margin_list), margin_list))
return sorted(tuffness_list, key=lambda x : x[1])
def get_players_overachievements(self, div_index):
# Get every player's standing position in this division
standings = self.get_standings(div_index)
p_id_to_standings_pos = dict()
p_id_to_rating = dict()
for s in standings:
player = self.get_player_from_name(s.name)
if player:
p_id_to_standings_pos[player.get_id()] = s.position
p_id_to_rating[player.get_id()] = s.rating
p_ids_by_rating = sorted(p_id_to_rating, key=lambda x : p_id_to_rating[x], reverse=True)
# Work out each player's seed, remembering that two players might have
# the same rating
p_id_to_seed = dict()
seed = 0
joint = 1
prev_rating = None
for p_id in p_ids_by_rating:
rating = p_id_to_rating[p_id]
if prev_rating is None or prev_rating != rating:
seed += joint
joint = 1
else:
joint += 1
p_id_to_seed[p_id] = seed
prev_rating = rating
overachievements = []
for p_id in p_id_to_standings_pos:
position = p_id_to_standings_pos[p_id]
seed = p_id_to_seed[p_id]
# We want positive numbers to indicate overachievement
overachievement = seed - position;
player = self.get_player_from_id(p_id)
if player:
overachievements.append((player, seed, position, overachievement))
return sorted(overachievements, key=lambda x : (x[3], x[1]), reverse=True)
# Return true if all player ratings in a division are the same, with the
# exception of players with a zero rating.
def are_player_ratings_uniform(self, div_index):
cur = self.db.cursor()
cur.execute("select p.id, p.rating from player p where p.rating > 0 and p.division = ?", (div_index,))
rating = None
found_difference = False
for row in cur:
if rating is None:
rating = row[1]
else:
if row[1] != rating:
found_difference = True
break
cur.close()
return not found_difference
def get_banner_text(self):
return self.get_attribute("teleost_banner_text", "")
def set_banner_text(self, text):
self.set_attribute("teleost_banner_text", text)
def clear_banner_text(self):
self.set_attribute("teleost_banner_text", "")
def get_game_table_revision_no(self, round_no):
cur = self.db.cursor()
cur.execute("select max(seq) from game_log where round_no = ?", (round_no,))
row = cur.fetchone()
if row is None or row[0] is None:
revision_no = 0
else:
revision_no = row[0]
cur.close()
return revision_no
def get_game_table_revision_time(self, round_no, revision_no):
cur = self.db.cursor()
cur.execute("select datetime(ts, 'localtime') ts from game_log where round_no = ? and seq = ?", (round_no, revision_no))
row = cur.fetchone()
if row is None or row[0] is None:
timestamp = None
else:
timestamp = row[0]
cur.close()
return timestamp
def query_result_to_game_dict_list(self, query):
cur = self.db.cursor()
cur.execute(query)
retlist = []
for row in cur:
retlist.append({
"round_num" : row[0],
"division" : row[3],
"name1" : row[4],
"name2" : row[5],
"score1" : row[6],
"score2" : row[7],
"tb" : row[8]
})
cur.close()
return retlist
def get_highest_winning_scores(self, max_rows):
return self.query_result_to_game_dict_list(
"""
select g.round_no, g.seq, g.table_no, g.division, p1.name, p2.name,
g.p1_score, g.p2_score, g.tiebreak, case when g.p1_score > g.p2_score then g.p1_score else g.p2_score end winning_score
from game g,
player p1 on g.p1 = p1.id,
player p2 on g.p2 = p2.id
where g.game_type = 'P'
and g.p1_score is not null and g.p2_score is not null
and g.p1_score <> g.p2_score
order by 10 desc, 1, 2 limit %d
""" % (max_rows)
)
def get_highest_losing_scores(self, max_rows):
return self.query_result_to_game_dict_list(
"""
select g.round_no, g.seq, g.table_no, g.division, p1.name, p2.name,
g.p1_score, g.p2_score, g.tiebreak,
case when g.p1_score < g.p2_score then g.p1_score else g.p2_score end losing_score
from game g,
player p1 on g.p1 = p1.id,
player p2 on g.p2 = p2.id
where g.game_type = 'P'
and g.p1_score is not null and g.p2_score is not null
and g.p1_score <> g.p2_score
order by 10 desc, 1, 2 limit %d
""" % (max_rows)
)
def get_highest_combined_scores(self, max_rows):
return self.query_result_to_game_dict_list(
"""
select g.round_no, g.seq, g.table_no, g.division, p1.name, p2.name,
g.p1_score, g.p2_score, g.tiebreak,
g.p1_score + g.p2_score combined_score
from game g,
player p1 on g.p1 = p1.id,
player p2 on g.p2 = p2.id
where g.game_type = 'P'
and g.p1_score is not null and g.p2_score is not null
and g.p1_score <> g.p2_score
order by 10 desc, 1, 2 limit %d
""" % (max_rows)
)
def rerate_players_by_id(self):
cur = self.db.cursor()
cur.execute("select id, rating from player where rating != 0 order by id")
player_ids = []
for row in cur:
player_ids.append(row[0])
player_ids_new_ratings = []
max_rating = 2000
min_rating = 1000
for idx in range(len(player_ids)):
pid = player_ids[idx]
if len(player_ids) == 1:
new_rating = max_rating
else:
new_rating = max_rating - float(idx * (max_rating - min_rating)) / (len(player_ids) - 1)
new_rating = round(new_rating, 2)
player_ids_new_ratings.append((new_rating, pid))
cur.executemany("update player set rating = ? where id = ?", player_ids_new_ratings)
cur.close()
self.db.commit()
self.set_attribute("autoratingbehaviour", RATINGS_GRADUATED);
def is_table_accessible(self, table_no):
if self.db_version < (1, 0, 4):
return False
else:
cur = self.db.cursor()
cur.execute("select table_no, accessible from board where table_no in (-1, ?)", (table_no,))
default_value = False
value = None
for row in cur:
if row[0] == -1:
default_value = bool(row[1])
elif row[1] is not None:
value = bool(row[1])
if value is None:
value = default_value
cur.close()
return value
def get_num_accessible_tables(self):
if self.db_version < (1, 0, 4):
return 0
cur = self.db.cursor()
cur.execute("select accessible from board where table_no = -1")
row = cur.fetchone()
if row:
if row[0] is not None and row[0] != 0:
# All tables are accessible except those listed, but we don't
# know how many tables there are.
cur.close()
return None
cur.close()
cur = self.db.cursor()
cur.execute("select count(*) from board where table_no >= 0 and accessible != 0")
row = cur.fetchone()
if row and row[0] is not None:
count = row[0]
else:
count = 0;
cur.close()
return count
# Return value is a pair (int list, bool).
# The bool is the default value for any table number not in the list, and
# the list contains those table numbers which don't agree with that boolean.
# For example, ([1,2,5], True) means all tables are accessible except
# 1, 2 and 5. ([17,18], False) means only tables 17 and 18 are accessible.
def get_accessible_tables(self):
if self.db_version < (1, 0, 4):
return ([], False)
accessible_tables = []
non_accessible_tables = []
defaultly_accessible_tables = []
default_value = False
cur = self.db.cursor()
cur.execute("select table_no, accessible from board order by table_no")
for row in cur:
if row[0] == -1:
default_value = bool(row[1])
elif row[1] is None:
defaultly_accessible_tables.append(row[0])
elif row[1] != 0:
accessible_tables.append(row[0])
else:
non_accessible_tables.append(row[0])
cur.close()
if default_value:
return (non_accessible_tables, True)
else:
return (accessible_tables, False)
def set_accessible_tables(self, table_list, all_except=False):
if self.db_version < (1, 0, 4):
return
cur = self.db.cursor()
# If we add any more columns to BOARD, we'll need to change this so
# we set accessible to NULL in all existing rows, then do an
# insert-or-replace.
cur.execute("delete from board")
# Remove duplicate table numbers
table_set = set(table_list)
table_list = sorted(list(table_set))
params = [ ( x, 0 if all_except else 1 ) for x in table_list ] + [ (-1, 1 if all_except else 0) ]
cur.executemany("insert into board (table_no, accessible) values (?, ?)", params)
cur.close()
self.db.commit()
def get_unique_id(self):
unique_id = self.get_attribute("uniqueid", None)
if unique_id is None:
return self.get_name()
else:
return unique_id
def log_successful_upload(self):
if self.db_version >= (1, 0, 6):
self.db.execute("update upload_success set ts = current_timestamp")
self.db.commit()
def log_failed_upload(self, failure_type, message):
if self.db_version >= (1, 0, 6):
self.db.execute("insert into upload_error_log(ts, failure_type, message) values (current_timestamp, ?, ?)", (failure_type, message))
self.db.commit()
def get_last_successful_upload_time(self):
if self.db_version >= (1, 0, 6):
cur = self.db.cursor()
cur.execute("select strftime('%s', ts) from upload_success")
row = cur.fetchone()
ts = None
if not row or not row[0]:
ts = None
else:
ts = row[0]
if ts is not None:
ts = int(ts)
cur.close()
return ts
else:
return None
def get_last_failed_upload(self):
if self.db_version >= (1, 0, 6):
cur = self.db.cursor()
cur.execute("select strftime('%s', ts), failure_type, message from upload_error_log order by ts desc limit 1")
row = cur.fetchone()
upload_desc = None
if row:
(ts, failure_type, message) = row
if ts is not None:
ts = int(ts)
upload_desc = {}
upload_desc["ts"] = ts
upload_desc["failure_type"] = int(failure_type)
upload_desc["message"] = message
cur.close()
return upload_desc
else:
return None
def set_broadcast_private(self, value):
self.set_attribute("broadcastprivate", 1 if value else 0)
def is_broadcast_private(self):
return self.get_int_attribute("broadcastprivate", 0) != 0
def is_post_to_videprinter_set(self):
return self.get_int_attribute("posttovideprinter", 1) != 0
def is_post_to_web_set(self):
return self.get_int_attribute("posttoweb", 1) != 0
def set_post_to_videprinter(self, value):
return self.set_attribute("posttovideprinter", 1 if value else 0)
def set_post_to_web(self, value):
return self.set_attribute("posttoweb", 1 if value else 0)
def get_rank_finals(self):
return self.get_int_attribute("rankfinals", 1) != 0
def set_rank_finals(self, rank_finals):
return self.set_attribute("rankfinals", 1 if rank_finals else 0)
def get_5_3_table_sizes(num_players):
if num_players < 8:
return []
table_sizes = []
players_left = num_players
while players_left % 5 != 0:
table_sizes.append(3)
players_left -= 3
while players_left > 0:
table_sizes = [5] + table_sizes
players_left -= 5
return table_sizes
def get_game_types():
return [
{ "code" : "P", "name" : "Standard heat game" },
{ "code" : "QF", "name" : "Quarter-final" },
{ "code" : "SF", "name" : "Semi-final" },
{ "code" : "3P", "name" : "Third-place play-off" },
{ "code" : "F", "name" : "Final" } ,
{ "code" : "N", "name" : "Other game not counted in standings" }
]
unique_id_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
def generate_unique_id():
return "".join([ random.choice(unique_id_chars) for x in range(10) ])
def tourney_open(dbname, directory="."):
if not re.match("^[A-Za-z0-9_-]+$", dbname):
raise InvalidDBNameException("The tourney database name can only contain letters, numbers, underscores and hyphens.");
if directory:
if directory[-1] != os.sep:
directory += os.sep;
dbpath = directory + dbname + ".db";
if not os.path.exists(dbpath):
raise DBNameDoesNotExistException("The tourney \"%s\" does not exist." % dbname);
else:
tourney = Tourney(dbpath, dbname, versioncheck=True);
return tourney;
def tourney_create(dbname, directory="."):
if not re.match("^[A-Za-z0-9_-]+$", dbname):
raise InvalidDBNameException("The tourney database name can only contain letters, numbers, underscores and hyphens.");
if len(dbname) > 60:
raise InvalidDBNameException("The tourney database name may not be more than 60 characters long.")
if directory:
if directory[-1] != '/':
directory += "/";
dbpath = directory + dbname + ".db";
if os.path.exists(dbpath):
raise DBNameExistsException("The tourney \"%s\" already exists. Pick another name." % dbname);
tourney = Tourney(dbpath, dbname, versioncheck=False);
tourney.db_version = SW_VERSION_SPLIT;
tourney.db.executescript(create_tables_sql);
tourney.db.execute("insert into options values ('atropineversion', ?)", (SW_VERSION,))
# We now generate a unique ID for each tourney db file. This helps with the
# web broadcast feature. It stops us from accidentally uploading an
# existing tourney such that it overwrites and destroys a different but
# identically-named one on the website.
unique_id = generate_unique_id()
tourney.db.execute("insert into options values ('uniqueid', ?)", (unique_id,))
tourney.db.commit();
return tourney;
def get_software_version():
return SW_VERSION
| 39.691004
| 388
| 0.586035
| 19,416
| 145,150
| 4.19556
| 0.052431
| 0.019506
| 0.010385
| 0.017125
| 0.462724
| 0.381336
| 0.305508
| 0.254527
| 0.222659
| 0.197506
| 0
| 0.01518
| 0.316962
| 145,150
| 3,656
| 389
| 39.70186
| 0.806471
| 0.082962
| 0
| 0.358162
| 0
| 0.015315
| 0.283277
| 0.023378
| 0.000348
| 0
| 0.000152
| 0
| 0
| 1
| 0.091194
| false
| 0.007309
| 0.002785
| 0.032718
| 0.204316
| 0.005569
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dec0da50ce4a56fc78832aa67c6d71d1a1a1c437
| 995
|
py
|
Python
|
t/plugin/plugin_020deploy_test.py
|
jrmsdev/pysadm
|
0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37
|
[
"BSD-3-Clause"
] | 1
|
2019-10-15T08:37:56.000Z
|
2019-10-15T08:37:56.000Z
|
t/plugin/plugin_020deploy_test.py
|
jrmsdev/pysadm
|
0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37
|
[
"BSD-3-Clause"
] | null | null | null |
t/plugin/plugin_020deploy_test.py
|
jrmsdev/pysadm
|
0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Jeremías Casteglione <jrmsdev@gmail.com>
# See LICENSE file.
from glob import glob
from os import path, makedirs
def test_deploy_testing(testing_plugin):
makedirs(path.join('tdata', 'deploy', 'plugin'), exist_ok = True)
p = testing_plugin('testing', ns = '_sadmtest', deploy = True)
print('-- deploy plugin: testing')
p.deploy()
def test_all_deploy(testing_plugin):
makedirs(path.join('tdata', 'deploy', 'plugin'), exist_ok = True)
t = testing_plugin(ns = '_sadmtest', deploy = True, buildDeploy = False)
for opt in t._env.profile.config.options('deploy'):
if opt.startswith('env.'):
pname = '.'.join(opt.split('.')[1:])
if pname == 'testing':
continue
cfgdir = path.join('tdata', 'plugin', pname.replace('.', path.sep), 'config')
for fn in sorted(glob(path.join(cfgdir, '*.ini'))):
cfgfn = path.basename(fn)
print('-- deploy plugin:', pname, cfgfn)
p = testing_plugin(pname, deploy = True, buildCfg = cfgfn)
p.deploy(mockCfg = cfgfn)
| 36.851852
| 80
| 0.676382
| 134
| 995
| 4.91791
| 0.425373
| 0.098634
| 0.059181
| 0.075873
| 0.172989
| 0.172989
| 0.172989
| 0.172989
| 0.172989
| 0.172989
| 0
| 0.001183
| 0.150754
| 995
| 26
| 81
| 38.269231
| 0.778698
| 0.072362
| 0
| 0.095238
| 0
| 0
| 0.155435
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.095238
| 0
| 0.190476
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dec30d56b6d0887d305f33e490a67d25b3dd39cd
| 4,189
|
py
|
Python
|
jsonReadWrite.py
|
nsobczak/ActivityWatchToCSV
|
cefb67e9f1c834008f2b39c0baf6c7c506327a4d
|
[
"Apache-2.0"
] | null | null | null |
jsonReadWrite.py
|
nsobczak/ActivityWatchToCSV
|
cefb67e9f1c834008f2b39c0baf6c7c506327a4d
|
[
"Apache-2.0"
] | null | null | null |
jsonReadWrite.py
|
nsobczak/ActivityWatchToCSV
|
cefb67e9f1c834008f2b39c0baf6c7c506327a4d
|
[
"Apache-2.0"
] | null | null | null |
"""
##############
# jsonReader #
##############
"""
# Import
import json
from platform import system
from enum import Enum
from datetime import timedelta
# %% ____________________________________________________________________________________________________
# ____________________________________________________________________________________________________
# Functions
class Watcher(Enum):
AFK = 1
WEB = 2
WINDOW = 3
def jsonReadWrite(pathToJson, pathWhereToCreateFile, watcher, printJsonFile=False):
"""
Write csv formatted data into file
:param path: path where to create file
:type path: str
:param watcher: watcher type of the file
:type watcher: Watcher
:param printJsonFile: ???
:type printJsonFile: bool
:return: return csv formatted string
:rtype: str
"""
res = "file generated"
with open(pathToJson) as json_file:
dataDict = json.load(json_file)
if (system() != 'Linux' and system() != 'Windows'):
print("{} operating system not supported".format(system()))
else:
print("{} operating system detected".format(system()))
if printJsonFile:
print(json.dumps(dataDict, indent=4))
csvFile = open(pathWhereToCreateFile, "w") # "w" to write strings to the file
if watcher == Watcher.AFK:
print("watcher == Watcher.AFK")
# duration: 956.016
# id: 316
# timestamp: 2019 - 01 - 28
# T10: 28:13.770000 + 00: 00
# data: {'status': 'not-afk'}
res = "Watcher.AFK detected => does nothing"
elif watcher == Watcher.WEB:
print("watcher == Watcher.WEB")
# duration: 1.518
# id: 3210
# timestamp: 2019 - 01 - 31
# T18: 01:45.794000 + 00: 00
# data: {'title': 'New Tab', 'url': 'about:blank', 'audible': False, 'tabCount': 3, 'incognito': False}
res = "Watcher.WEB detected => does nothing"
elif watcher == Watcher.WINDOW:
print("watcher == Watcher.WINDOW")
# duration: 4.017 # <= in seconds
# id: 17
# timestamp: 2019 - 01 - 28
# T01: 11:55.570000 + 00: 00
# data: {'title': 'Terminal - arch@ArchDesktop:~', 'app': 'Xfce4-terminal'} # <= app is the interesting thing
# if printJsonFile:
# # check
# for d in dataDict:
# print('duration: ' + str(d['duration']))
# print('id: ' + str(d['id']))
# print('timestamp: ' + str(d['timestamp']))
# print('data: ' + str(d['data']))
# print(' title: ' + str(d['data']['title']))
# print(' app: ' + str(d['data']['app']))
# print('')
handleWindowWatcher(csvFile, dataDict)
else:
res = "failed to identify watcher type"
print(res)
return res
def handleWindowWatcher(csvFile, dataDict):
columnTitleRow = "date; app; duration(s); duration(h:m:s)\n"
csvFile.write(columnTitleRow)
sortedData = {}
for d in dataDict:
# timestamp only beginning: "2019-01-28T01:11:32.482000+00:00"
date = str(d['timestamp'])[:10]
if not (date in sortedData):
sortedData[date] = {}
app = str(d['data']['app'])
if not (app in sortedData[date]):
sortedData[date][app] = 0
duration = float(d['duration'])
sortedData[date][app] += duration
rows = ""
for keyDate, valueAppDict in sortedData.items():
for keyApp, valueDuration in valueAppDict.items():
# date
rows += keyDate + "; "
# app
rows += keyApp + "; "
# duration
valueDurationStr = str(valueDuration)
leftPart, righPart = valueDurationStr.split('.')
valueDurationStr = leftPart + "," + righPart[:3]
rows += valueDurationStr + "; "
rows += str(timedelta(seconds=valueDuration)) + "\n"
rows += "\n"
csvFile.write(rows)
| 30.136691
| 121
| 0.545476
| 403
| 4,189
| 5.168734
| 0.362283
| 0.053769
| 0.015362
| 0.016323
| 0.048968
| 0.035526
| 0
| 0
| 0
| 0
| 0
| 0.045263
| 0.319647
| 4,189
| 138
| 122
| 30.355072
| 0.685614
| 0.351635
| 0
| 0.035088
| 0
| 0
| 0.128381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035088
| false
| 0
| 0.070175
| 0
| 0.192982
| 0.157895
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dec3efd877d3ce87cbe9fc53530bf43be70d8149
| 306
|
py
|
Python
|
2021-12-23/1.py
|
xiaozhiyuqwq/seniorschool
|
7375038b00a6d2deaec5d70bfac25ddbf4d2558e
|
[
"Apache-2.0"
] | null | null | null |
2021-12-23/1.py
|
xiaozhiyuqwq/seniorschool
|
7375038b00a6d2deaec5d70bfac25ddbf4d2558e
|
[
"Apache-2.0"
] | null | null | null |
2021-12-23/1.py
|
xiaozhiyuqwq/seniorschool
|
7375038b00a6d2deaec5d70bfac25ddbf4d2558e
|
[
"Apache-2.0"
] | null | null | null |
#初始化
t=0
#运算
for x in range(1,9):
for y in range(1,11):
for z in range(1,13):
if 6*x+5*y+4*z==50:
print("计算出x值为 ",x," y值为 ",y," z值为 ",z," 。")
t=t+1
print("计算出一共有 {} 个结果。".format(t))
#by xiaozhiyuqwq
#https://www.rainyat.work
#2021-12-23
| 21.857143
| 60
| 0.46732
| 54
| 306
| 2.648148
| 0.648148
| 0.146853
| 0.167832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112195
| 0.330065
| 306
| 13
| 61
| 23.538462
| 0.585366
| 0.176471
| 0
| 0
| 0
| 0
| 0.141026
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dec771d07fef05c3b6f9bec75d34bca56cffa1b5
| 3,648
|
py
|
Python
|
data_augmentor/multidimension.py
|
ZhiangChen/tornado_ML
|
d8bded61a6a234ca67e31776bc8576c6c18f5621
|
[
"MIT"
] | 2
|
2018-12-09T20:08:51.000Z
|
2021-02-01T17:49:14.000Z
|
data_augmentor/multidimension.py
|
ZhiangChen/tornado_ML
|
d8bded61a6a234ca67e31776bc8576c6c18f5621
|
[
"MIT"
] | 1
|
2019-11-15T06:15:03.000Z
|
2019-11-15T06:15:03.000Z
|
multidimension.py
|
DREAMS-lab/data_augmentor
|
f204ee3af805b17d9946d3d5c6e7ca62398f09e5
|
[
"MIT"
] | null | null | null |
"""
multispectrum
Zhiang Chen,
Feb, 2020
"""
import gdal
import cv2
import numpy as np
import math
import os
class MultDim(object):
def __init__(self):
pass
def readTiff(self, tif_file, channel=3):
self.ds = gdal.Open(tif_file)
B = self.ds.GetRasterBand(1).ReadAsArray()
G = self.ds.GetRasterBand(2).ReadAsArray()
R = self.ds.GetRasterBand(3).ReadAsArray()
if channel ==3:
cv2.imwrite("./datasets/Rock/R.png", R)
cv2.imwrite("./datasets/Rock/G.png", G)
cv2.imwrite("./datasets/Rock/B.png", B)
if channel == 5:
RE = self.ds.GetRasterBand(4).ReadAsArray()
NIR = self.ds.GetRasterBand(5).ReadAsArray()
cv2.imwrite("./datasets/Rock/R.png", R)
cv2.imwrite("./datasets/Rock/G.png", G)
cv2.imwrite("./datasets/Rock/B.png", B)
cv2.imwrite("./datasets/Rock/RE.png", RE)
cv2.imwrite("./datasets/Rock/NIR.png",NIR)
def readImage(self, image_file, channel=3):
if channel==1:
img = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE).astype(np.uint8)
img = np.expand_dims(img, axis=2)
else:
img = cv2.imread(image_file).astype(np.uint8)
return img
def cat(self, data1, data2):
return np.append(data1, data2, axis=2)
def split(self, data, step, path, overlap=0):
dim = data.shape
mult = np.zeros((dim[0]+step, dim[1]+step, dim[2]))
mult[:dim[0], :dim[1], :] = data
xn = int(math.ceil(float(dim[0])/(step-overlap)))
yn = int(math.ceil(float(dim[1])/(step-overlap)))
for i in range(xn):
for j in range(yn):
x = i*(step-overlap)
y = j*(step-overlap)
dt = mult[x:x+step, y:y+step, :]
name = os.path.join(path, str(i)+"_"+str(j)+".npy")
np.save(name, dt)
def addAnnotation(self, mult_path, annotation_path, save_path):
ann_files = os.listdir(annotation_path)
mult_files = os.listdir(mult_path)
for f in ann_files:
if f in mult_files:
ann_name = os.path.join(annotation_path, f)
mult_name = os.path.join(mult_path, f)
ann = np.load(ann_name)
mult = np.load(mult_name)
data = np.append(mult, ann, axis=2)
save_name = os.path.join(save_path, f)
np.save(save_name, data)
if __name__ == '__main__':
st = MultDim()
# split tiles
"""
st.readTiff("./datasets/C3/Orth5.tif", channel=5)
R = st.readImage("./datasets/Rock/R.png", channel=1)
G = st.readImage("./datasets/Rock/G.png", channel=1)
B = st.readImage("./datasets/Rock/B.png", channel=1)
RE = st.readImage("./datasets/Rock/RE.png", channel=1)
NIR = st.readImage("./datasets/Rock/NIR.png", channel=1)
DEM = st.readImage("./datasets/Rock/DEM3.png", channel=3)
data = st.cat(R, G)
data = st.cat(data, B)
data = st.cat(data, RE)
data = st.cat(data, NIR)
data = st.cat(data, DEM)
st.split(data, 400, "./datasets/Rock/mult_10", overlap=10)
"""
# add annotations
# st.addAnnotation("./datasets/Rock/mult/", "./datasets/Rock_test/npy/", "./datasets/Rock_test/mult")
#"""
RGB = st.readImage("./datasets/C3/C3.png", channel=3)
DEM = st.readImage("./datasets/C3/C3_dem.png", channel=3)
data = st.cat(RGB, DEM)
st.split(data, 400, './datasets/C3/rgbd', overlap=10)
#"""
#st.addAnnotation("./datasets/C3/rgbd/", "./datasets/C3_test/npy/", "./datasets/C3_test/rocks")
| 35.076923
| 105
| 0.569353
| 508
| 3,648
| 4.001969
| 0.214567
| 0.106247
| 0.070831
| 0.086572
| 0.185932
| 0.123955
| 0.079685
| 0.079685
| 0.079685
| 0.079685
| 0
| 0.027819
| 0.260965
| 3,648
| 104
| 106
| 35.076923
| 0.726261
| 0.072643
| 0
| 0.092308
| 0
| 0
| 0.089357
| 0.070832
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092308
| false
| 0.015385
| 0.076923
| 0.015385
| 0.215385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
deca8e26bb6a2a9ae53903a22809984f7a74b454
| 26,490
|
py
|
Python
|
project.py
|
PetruSicoe/Python101-GameProject
|
82121a8e110ee484acdf85843725882d60957b25
|
[
"CC-BY-4.0"
] | null | null | null |
project.py
|
PetruSicoe/Python101-GameProject
|
82121a8e110ee484acdf85843725882d60957b25
|
[
"CC-BY-4.0"
] | null | null | null |
project.py
|
PetruSicoe/Python101-GameProject
|
82121a8e110ee484acdf85843725882d60957b25
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python3
from random import randrange
import random
import pygame, sys
from pygame.locals import *
import string
pygame.font.init()
MENU_WIDTH = 1000
MENU_HEIGHT = 1000
GUESS_WIDTH = 1000
GUESS_HEIGHT = 650
HANGMAN_WIDTH = 1300
HANGMAN_HEIGHT = 720
BLACK = (0,0,0)
WHITE = (255,255,255)
RED = (255,0,0)
GREEN = (0,255,0)
LIGHT_YELLOW = (255, 255, 102)
frame_rate = pygame.time.Clock()
back_ground = pygame.image.load("image_kids.jpg")
back_ground_guess = pygame.image.load("schoolboard.jpg")
class GameObject:
def __init__(self, position):
self.position = position
def input(self):
pass
def draw(self):
pass
class Menu(GameObject):
def __init__(self):
self.window = pygame.display.set_mode((MENU_WIDTH,MENU_HEIGHT))
pygame.display.set_caption('Meniu Joc')
#butoanele de accesare ale paginilor jocurilor
self.color_hang = (203, 195, 227)
self.color_hang_hover = (140,106,189)
self.left_hang = MENU_WIDTH / 4 + 100
self.top_hang = MENU_HEIGHT / 3
self.width_hang = 250
self.heigth_hang = 120
self.color_guess = (51, 255, 153)
self.color_guess_hover = (37, 186, 132)
self.left_guess = MENU_WIDTH / 4 + 20
self.top_guess = MENU_HEIGHT / 2 + 50
self.width_guess = 470
self.heigth_guess = 120
#[left, top, width, height]
self.hang_rect = pygame.Rect(self.left_hang, self.top_hang, self.width_hang, self.heigth_hang)
self.guess_rect = pygame.Rect(self.left_guess, self.top_guess, self.width_guess, self.heigth_guess)
def input(self):
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
mouse = pygame.mouse.get_pos()
if self.left_hang <= mouse[0] <= self.left_hang + self.width_hang and self.top_hang <= mouse[1] <= self.top_hang + self.heigth_hang:
hangman = Hangman()
hangman.run()
pygame.quit()
sys.exit()
elif self.left_guess <= mouse[0] <= self.left_guess + self.width_guess and self.top_guess <= mouse[1] <= self.top_guess + self.heigth_guess:
guess = GuessTheNumber()
guess.run()
pygeme.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
def draw(self):
image_rect = back_ground.get_rect()
self.window.fill(BLACK)
self.window.blit(back_ground, image_rect)
#fonturi
self.font = pygame.font.SysFont('Comic Sans MS',50)
#titlul
title_x_pos = MENU_WIDTH / 6 + 50
title_y_pos = MENU_HEIGHT / 6
self.img = self.font.render('Childhood\'s Gamechest', True, BLACK)
self.window.blit(self.img, (title_x_pos , title_y_pos ))
#draw de buton Hangman. Schimb culoarea daca e hover
mouse = pygame.mouse.get_pos()
if self.left_hang <= mouse[0] <= self.left_hang + self.width_hang and self.top_hang <= mouse[1] <= self.top_hang + self.heigth_hang:
pygame.draw.rect(self.window, self.color_hang_hover, self.hang_rect)
else:
pygame.draw.rect(self.window, self.color_hang, self.hang_rect)
#pun text pe buton
self.hang_button = self.font.render('Hangman', True, BLACK)
self.window.blit(self.hang_button, (self.left_hang + 15, self.top_hang + 20))
#draw de buton guess the number
if self.left_guess <= mouse[0] <= self.left_guess + self.width_guess and self.top_guess <= mouse[1] <= self.top_guess + self.heigth_guess:
pygame.draw.rect(self.window, self.color_guess_hover, self.guess_rect)
else:
pygame.draw.rect(self.window, self.color_guess, self.guess_rect)
#pun text pe buton
self.guess_button = self.font.render('Guess the Number', True, BLACK)
self.window.blit(self.guess_button, (self.left_guess + 15, self.top_guess + 20))
pygame.display.update()
pygame.time.Clock().tick(60)
def run(self):
while True:
self.input()
self.draw()
class Hangman(GameObject):
def __init__(self):
self.window = pygame.display.set_mode((HANGMAN_WIDTH, HANGMAN_HEIGHT))
pygame.display.set_caption('Hangman')
self.text = ''
self.guess_text = ''
self.current_letter = ''
#fonturi
self.input_font = pygame.font.SysFont('Comic Sans MS',100)
self.letters_font = pygame.font.SysFont('Comic Sans MS',35)
self.title_font = pygame.font.SysFont('Algerian',100)
#imagine background
self.hang_background = pygame.image.load("papyrus.jpg")
#import de imagini care arata stadiul in functie de numarul de vieti
self.zero_img = pygame.image.load("0.jpg")
self.zero_img = pygame.transform.scale(self.zero_img, (self.zero_img.get_size()[0] + 100, self.zero_img.get_size()[1] + 100))
self.three_img = pygame.image.load("3.jpg")
self.three_img = pygame.transform.scale(self.three_img, (self.three_img.get_size()[0] + 100, self.three_img.get_size()[1] + 100))
self.five_img = pygame.image.load("5.jpg")
self.five_img = pygame.transform.scale(self.five_img, (self.five_img.get_size()[0] + 100, self.five_img.get_size()[1] + 100))
self.six_img = pygame.image.load("6.jpg")
self.six_img = pygame.transform.scale(self.six_img, (self.six_img.get_size()[0] + 100, self.six_img.get_size()[1] + 100))
self.seven_img = pygame.image.load("7.jpg")
self.seven_img = pygame.transform.scale(self.seven_img, (self.seven_img.get_size()[0] + 100, self.seven_img.get_size()[1] + 100))
self.eight_img = pygame.image.load("8.jpg")
self.eight_img = pygame.transform.scale(self.eight_img, (self.eight_img.get_size()[0] + 100, self.eight_img.get_size()[1] + 100))
self.nine_img = pygame.image.load("9.jpg")
self.nine_img = pygame.transform.scale(self.nine_img, (self.nine_img.get_size()[0] + 100, self.nine_img.get_size()[1] + 100))
self.ten_img = pygame.image.load("10.jpg")
self.ten_img = pygame.transform.scale(self.ten_img, (self.ten_img.get_size()[0] + 100, self.ten_img.get_size()[1] + 100))
#loc unde pun litera curenta
self.input_box = pygame.Rect(100, 400, 200, 200)
self.active_box = False
#culori pt input box
self.color_inactive = (64, 64, 64)
self.color_active = (224, 224, 224)
self.nr_lives = 6
self.won = False
self.lost = False
self.timer_index = 0
#fisier cu cuvintele de ghicit. Aleg unul random dintre ele
with open("hangman_input.txt") as file:
lines = file.readlines()
words = lines[randrange(len(lines))].strip("\n")
words = words.split()
self.guess_text = words[randrange(len(words))]
print("de ghicit: " + self.guess_text)
#fac o lista de tupluri cu litera, spatiul ei dedicat si daca e ghicita sau nu
self.letters= []
for i in range(len(self.guess_text)):
self.letters.append( (self.guess_text[i], pygame.Rect(10 + 100 * i, 200, 50 , 50), False) )
#Menu Button
self.color_menu = (203, 195, 227)
self.color_menu_hover = (140, 106, 189)
self.left_menu = HANGMAN_WIDTH / 2 - 200
self.top_menu = HANGMAN_HEIGHT / 2 + 100
self.width_menu = 300
self.heigth_menu = 120
self.menu_rect = pygame.Rect(self.left_menu, self.top_menu, self.width_menu, self.heigth_menu)
def input(self):
#pentru inchiderea meniului
for event in pygame.event.get():
#activarea buttonului de inchidere
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
menu = Menu()
menu.run()
pygame.quit()
sys.exit()
#putem modifica ceva din input box, daca este selectata
if self.active_box:
if event.key == pygame.K_RETURN:
#la enter procesez litera din input box
if len(self.text) > 0:
if self.text in self.guess_text:
pos = self.guess_text.find(self.text)
while pos != -1:
self.letters[pos] = (self.letters[pos][0], self.letters[pos][1], True)
pos = self.guess_text.find(self.text, pos + 1, len(self.guess_text))
#verific daca toate casutele sunt completate. Daca sunt, am castigat
just_won = True
for k in self.letters:
if not k[2]:
just_won = False
if just_won:
self.won = True
else:
self.nr_lives -= 1
if self.nr_lives == 0:
self.lost = True
self.text = ''
elif event.key == pygame.K_BACKSPACE:
self.text = self.text[:-1]
else:
self.current_letter = event.unicode.upper()
#doar o litera trebuie sa fie in casuta
if len(self.text) >= 1:
self.text = self.text[:-1]
self.text += self.current_letter
if event.type == pygame.MOUSEBUTTONDOWN:
if self.input_box.collidepoint(event.pos):
#fac toggle
self.active_box = not self.active_box
else:
self.active_box = False
if self.menu_rect.collidepoint(event.pos):
if self.won or self.lost:
menu = Menu()
menu.run()
pygame.quit()
sys.exit()
def draw(self):
image_rect = self.hang_background.get_rect()
self.window.fill(BLACK)
self.window.blit(self.hang_background, image_rect)
#desenex imaginea ce reprezinta starea + numarul de vieti
if self.nr_lives == 6:
image_rect = self.zero_img.get_rect()
hang_img = self.zero_img
elif self.nr_lives == 5:
image_rect = self.five_img.get_rect()
hang_img = self.five_img
elif self.nr_lives == 4:
image_rect = self.six_img.get_rect()
hang_img = self.six_img
elif self.nr_lives == 3:
image_rect = self.seven_img.get_rect()
hang_img = self.seven_img
elif self.nr_lives == 2:
image_rect = self.eight_img.get_rect()
hang_img = self.eight_img
elif self.nr_lives == 1:
image_rect = self.nine_img.get_rect()
hang_img = self.nine_img
else:
image_rect = self.ten_img.get_rect()
hang_img = self.ten_img
image_rect.x = 880
image_rect.y = 300
self.window.blit(hang_img, image_rect)
#titlul
title_x_pos = HANGMAN_WIDTH / 3 - 30
title_y_pos = 30
self.title = self.title_font.render('HANGMAN', True, BLACK)
self.window.blit(self.title, (title_x_pos , title_y_pos ))
if not self.won and not self.lost:
#literele ghicite/neghicite
for i in range(len(self.letters)):
pygame.draw.rect(self.window, self.color_inactive, self.letters[i][1])
text_surface = self.letters_font.render(self.letters[i][0],True, self.color_active)
if self.letters[i][2] == True:
self.window.blit(text_surface, (self.letters[i][1].x + 15, self.letters[i][1].y + 5))
if not self.active_box:
pygame.draw.rect(self.window, self.color_inactive, self.input_box)
else:
pygame.draw.rect(self.window, self.color_active, self.input_box)
if len(self.text) > 0:
text_surface = self.input_font.render(self.text,True, BLACK)
self.window.blit(text_surface, (self.input_box.x + 65, self.input_box.y + 20))
#caz daca am castigat
if self.won:
#afisez mesaj
if self.timer_index < 1:
self.timer_index += 0.01
text_surface = self.title_font.render("You win", True, GREEN)
self.window.blit(text_surface, (400, 400))
else:
#pun buton meniu dupa ce expira timpul
mouse = pygame.mouse.get_pos()
if self.left_menu <= mouse[0] <= self.left_menu + self.width_menu and self.top_menu <= mouse[1] <= self.top_menu + self.heigth_menu:
pygame.draw.rect(self.window, self.color_menu_hover, self.menu_rect)
else:
pygame.draw.rect(self.window, self.color_menu, self.menu_rect)
#pun text pe buton
self.menu_button = self.letters_font.render('Back to Menu', True, self.color_inactive)
self.window.blit(self.menu_button, (self.left_menu + 30, self.top_menu + 30))
#caz daca am pierdut
if self.lost:
#afisez mesaj
if self.timer_index < 1:
self.timer_index += 0.01
text_surface = self.title_font.render("You lost", True, RED)
self.window.blit(text_surface, (400, 400))
else:
#pun buton meniu dupa ce expira timpul
mouse = pygame.mouse.get_pos()
if self.left_menu <= mouse[0] <= self.left_menu + self.width_menu and self.top_menu <= mouse[1] <= self.top_menu + self.heigth_menu:
pygame.draw.rect(self.window, self.color_menu_hover, self.menu_rect)
else:
pygame.draw.rect(self.window, self.color_menu, self.menu_rect)
#pun text pe buton
self.menu_button = self.letters_font.render('Back to Menu', True, self.color_inactive)
self.window.blit(self.menu_button, (self.left_menu + 30, self.top_menu + 30))
pygame.display.update()
pygame.time.Clock().tick(60)
def run(self):
while True:
self.input()
self.draw()
class GuessTheNumber(GameObject):
def __init__(self):
self.window = pygame.display.set_mode((GUESS_WIDTH, GUESS_HEIGHT))
pygame.display.set_caption('Guess the Number')
self.index = 0
self.lives = 2
self.winner_text = ''
self.losing_text = ''
#fonturi
self.intro_font = pygame.font.SysFont('Comic Sans MS', 50)
self.number_font = pygame.font.SysFont('Comic Sans MS', 30)
self.lives_font = pygame.font.SysFont('Comic Sans MS', 20)
self.message_font = pygame.font.SysFont('Comic Sans MS', 40)
#culori
self.card_color = (194, 175, 161)
self.card_hover = (175, 122, 90)
self.choice = -1
#init cartonasele
#1
self.left_card_one = GUESS_WIDTH / 4 + 70
self.top_card_one = GUESS_HEIGHT / 3
self.width_card_one = 100
self.height_card_one = 70
self.card_one_rect = pygame.Rect(self.left_card_one, self.top_card_one, self.width_card_one, self.height_card_one)
self.rand_1 = randrange(5)
#2
self.left_card_two = GUESS_WIDTH / 4 + 320
self.top_card_two = GUESS_HEIGHT / 3
self.width_card_two = 100
self.height_card_two = 70
self.card_two_rect = pygame.Rect(self.left_card_two, self.top_card_two, self.width_card_two, self.height_card_two)
self.rand_2 = randrange(6, 10)
#3
self.left_card_three = GUESS_WIDTH / 4 + 70
self.top_card_three = GUESS_HEIGHT / 3 + 170
self.width_card_three = 100
self.height_card_three = 70
self.card_three_rect = pygame.Rect(self.left_card_three, self.top_card_three, self.width_card_three, self.height_card_three)
self.rand_3 = randrange(25, 35)
#4
self.left_card_four = GUESS_WIDTH / 4 + 320
self.top_card_four = GUESS_HEIGHT / 3 + 170
self.width_card_four = 100
self.height_card_four = 70
self.card_four_rect = pygame.Rect(self.left_card_four, self.top_card_four, self.width_card_four, self.height_card_four)
self.rand_4 = randrange(10, 20)
#pun toate randomurile intr-o lista
self.randoms_list = ['button_1', 'button_2', 'button_3', 'button_4']
self.to_guess = random.choice(self.randoms_list)
#butoane final
#REPLAY
self.left_replay = GUESS_WIDTH - 150
self.top_replay = GUESS_HEIGHT / 2 - 80
self.width_replay = 60
self.height_replay = 45
self.replay_rect = pygame.Rect(self.left_replay, self.top_replay, self.width_replay, self.height_replay)
#MENU
self.left_menu = GUESS_WIDTH - 150
self.top_menu = GUESS_HEIGHT / 2
self.width_menu = 60
self.height_menu = 45
self.menu_rect = pygame.Rect(self.left_menu, self.top_menu, self.width_menu, self.height_menu)
self.timer_index = 0
def input(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit
if event.type == pygame.MOUSEBUTTONDOWN:
#verific pe ce cartonas a dat click jucatorul/daca click MENU/REPLAY
if self.card_one_rect.collidepoint(event.pos):
if self.randoms_list[0] == self.to_guess:
self.choice = 1
else:
self.choice = 0
self.lives -= 1
elif self.card_two_rect.collidepoint(event.pos):
if self.randoms_list[1] == self.to_guess:
self.choice = 1
else:
self.choice = 0
self.lives -= 1
elif self.card_three_rect.collidepoint(event.pos):
if self.randoms_list[2] == self.to_guess:
self.choice = 1
else:
self.choice = 0
self.lives -= 1
elif self.card_four_rect.collidepoint(event.pos):
if self.randoms_list[3] == self.to_guess:
self.choice = 1
else:
self.choice = 0
self.lives -= 1
elif self.menu_rect.collidepoint(event.pos):
menu = Menu()
menu.run()
pygame.quit()
sys.exit()
elif self.replay_rect.collidepoint(event.pos):
guess = GuessTheNumber()
guess.run()
pygame.quit()
sys.exit()
def draw(self):
image_rect = back_ground_guess.get_rect()
self.window.fill(BLACK)
self.window.blit(back_ground_guess, image_rect)
#afisez titlul
welcome_text = self.intro_font.render('Welcome to GuessTheNumber!', True, WHITE)
self.window.blit(welcome_text, (150, 25))
#afisez numarul de vieti
lives_text = self.lives_font.render(f'lives: {self.lives}', True, LIGHT_YELLOW)
self.window.blit(lives_text, (680, 150))
mouse = pygame.mouse.get_pos()
#afisez cartonasele
#1
if self.left_card_one <= mouse[0] <= self.left_card_one + self.width_card_one and self.top_card_one <= mouse[1] <= self.top_card_one + self.height_card_one:
pygame.draw.rect(self.window, self.card_hover, self.card_one_rect)
else:
pygame.draw.rect(self.window, self.card_color, self.card_one_rect)
self.button_1 = self.number_font.render(str(self.rand_1), True, BLACK)
self.window.blit(self.button_1, (self.card_one_rect.x + 40, self.card_one_rect.y + 10))
#2
if self.left_card_two <= mouse[0] <= self.left_card_two + self.width_card_two and self.top_card_two <= mouse[1] <= self.top_card_two + self.height_card_two:
pygame.draw.rect(self.window, self.card_hover, self.card_two_rect)
else:
pygame.draw.rect(self.window, self.card_color, self.card_two_rect)
self.button_2 = self.number_font.render(str(self.rand_2), True, BLACK)
self.window.blit(self.button_2, (self.card_two_rect.x + 40, self.card_two_rect.y + 10))
#3
if self.left_card_three <= mouse[0] <= self.left_card_three + self.width_card_three and self.top_card_three <= mouse[1] <= self.top_card_three + self.height_card_three:
pygame.draw.rect(self.window, self.card_hover, self.card_three_rect)
else:
pygame.draw.rect(self.window, self.card_color, self.card_three_rect)
self.button_3 = self.number_font.render(str(self.rand_3), True, BLACK)
self.window.blit(self.button_3, (self.card_three_rect.x + 40, self.card_three_rect.y + 10))
#4
if self.left_card_four <= mouse[0] <= self.left_card_four + self.width_card_four and self.top_card_four <= mouse[1] <= self.top_card_four + self.height_card_four:
pygame.draw.rect(self.window, self.card_hover, self.card_four_rect)
else:
pygame.draw.rect(self.window, self.card_color, self.card_four_rect)
self.button_4 = self.number_font.render(str(self.rand_4), True, BLACK)
self.window.blit(self.button_4, (self.card_four_rect.x + 40, self.card_four_rect.y + 10))
if self.choice == 1:
self.winner_text = self.message_font.render('Wow, you won!', True, LIGHT_YELLOW)
self.window.blit(self.winner_text, (400, 300))
#buton replay
if self.left_replay <= mouse[0] <= self.left_replay + self.width_replay and self.top_replay <= mouse[1] <= self.top_replay + self.height_replay:
pygame.draw.rect(self.window, self.card_hover, self.replay_rect)
else:
pygame.draw.rect(self.window, self.card_color, self.replay_rect)
self.replay_b = self.lives_font.render('Replay', True, BLACK)
self.window.blit(self.replay_b, (self.replay_rect.x + 1, self.replay_rect.y + 10))
#buton MENU
if self.left_menu <= mouse[0] <= self.left_menu + self.width_menu and self.top_menu <= mouse[1] <= self.top_menu + self.height_menu:
pygame.draw.rect(self.window, self.card_hover, self.menu_rect)
else:
pygame.draw.rect(self.window, self.card_color, self.menu_rect)
self.menu_b = self.lives_font.render('Menu', True, BLACK)
self.window.blit(self.menu_b, (self.menu_rect.x + 1, self.menu_rect.y + 10))
elif self.choice == 0:
mouse = pygame.mouse.get_pos()
if self.lives == 1:
if self.timer_index < 1:
self.losing_text = self.message_font.render('Oopsey, only one life left!', True, LIGHT_YELLOW)
self.window.blit(self.losing_text, (300, 300))
self.timer_index+=0.01
elif self.lives == 0:
self.losing_text = self.message_font.render('Game over ya loser', True, LIGHT_YELLOW)
self.window.blit(self.losing_text, (350, 300))
#buton replay
if self.left_replay <= mouse[0] <= self.left_replay + self.width_replay and self.top_replay <= mouse[1] <= self.top_replay + self.height_replay:
pygame.draw.rect(self.window, self.card_hover, self.replay_rect)
else:
pygame.draw.rect(self.window, self.card_color, self.replay_rect)
self.replay_b = self.lives_font.render('Replay', True, BLACK)
self.window.blit(self.replay_b, (self.replay_rect.x + 1, self.replay_rect.y + 10))
#buton MENU
if self.left_menu <= mouse[0] <= self.left_menu + self.width_menu and self.top_menu <= mouse[1] <= self.top_menu + self.height_menu:
pygame.draw.rect(self.window, self.card_hover, self.menu_rect)
else:
pygame.draw.rect(self.window, self.card_color, self.menu_rect)
self.menu_b = self.lives_font.render('Menu', True, BLACK)
self.window.blit(self.menu_b, (self.menu_rect.x + 1, self.menu_rect.y + 10))
pygame.display.update()
pygame.time.Clock().tick(60)
def run(self):
while True:
self.input()
self.draw()
if __name__ == "__main__":
menu = Menu()
menu.run()
| 40.197269
| 177
| 0.555795
| 3,379
| 26,490
| 4.150636
| 0.102397
| 0.042781
| 0.029947
| 0.034652
| 0.597433
| 0.525419
| 0.42574
| 0.366346
| 0.334118
| 0.305098
| 0
| 0.033925
| 0.339034
| 26,490
| 659
| 178
| 40.197269
| 0.767091
| 0.048622
| 0
| 0.380952
| 0
| 0.102041
| 0.019766
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034014
| false
| 0.004535
| 0.011338
| 0
| 0.054422
| 0.002268
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
decaa14b52fa5524baf2d5d190931296e44de823
| 2,018
|
py
|
Python
|
Modules/CrossMapLRN.py
|
EmilPi/PuzzleLib
|
31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9
|
[
"Apache-2.0"
] | 52
|
2020-02-28T20:40:15.000Z
|
2021-08-25T05:35:17.000Z
|
Modules/CrossMapLRN.py
|
EmilPi/PuzzleLib
|
31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9
|
[
"Apache-2.0"
] | 2
|
2021-02-14T15:57:03.000Z
|
2021-10-05T12:21:34.000Z
|
Modules/CrossMapLRN.py
|
EmilPi/PuzzleLib
|
31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9
|
[
"Apache-2.0"
] | 8
|
2020-02-28T20:40:11.000Z
|
2020-07-09T13:27:23.000Z
|
import numpy as np
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Backend.Dnn import crossMapLRN, crossMapLRNBackward
from PuzzleLib.Modules.LRN import LRN
class CrossMapLRN(LRN):
def __init__(self, N=5, alpha=1e-4, beta=0.75, K=2.0, name=None):
super().__init__(N, alpha, beta, K, name)
self.gradUsesOutData = True
def updateData(self, data):
self.data, self.workspace = crossMapLRN(data, N=self.N, alpha=self.alpha, beta=self.beta, K=self.K,
test=not self.train)
def updateGrad(self, grad):
self.grad = crossMapLRNBackward(self.inData, self.data, grad, self.workspace,
N=self.N, alpha=self.alpha, beta=self.beta, K=self.K)
def unittest():
maps = 10
data = gpuarray.to_gpu(np.random.randn(1, maps, 1, 1).astype(np.float32))
crossMapLrn = CrossMapLRN()
crossMapLrn(data)
lookBehind = int((crossMapLrn.N - 1) / 2)
lookAhead = crossMapLrn.N - lookBehind
hostData = data.get().reshape(maps, ).astype(np.float32)
norms = np.empty((maps, ), dtype=np.float32)
for i in range(maps):
norm = 0.0
for j in range(max(0, i - lookBehind), min(maps, i + lookAhead)):
norm += hostData[j]**2
norms[i] = crossMapLrn.K + norm * crossMapLrn.alpha / crossMapLrn.N
hostOutData = hostData / norms**crossMapLrn.beta
assert np.allclose(hostOutData, crossMapLrn.data.get().reshape(maps, ).astype(np.float32))
grad = gpuarray.to_gpu(np.random.randn(1, maps, 1, 1).astype(np.float32))
crossMapLrn.backward(grad)
hostGrad = grad.get().reshape(maps, ).astype(np.float32)
hostInGrad = np.zeros((maps, ), dtype=np.float32)
k = 2.0 * crossMapLrn.alpha * crossMapLrn.beta / crossMapLrn.N
for i in range(maps):
hostInGrad[i] += hostGrad[i] / norms[i]**crossMapLrn.beta
for j in range(max(0, i - lookBehind), min(maps, i + lookAhead)):
hostInGrad[j] -= hostGrad[i] * k * hostData[i] * hostData[j] / norms[i]**(crossMapLrn.beta+1)
assert np.allclose(hostInGrad, crossMapLrn.grad.get().reshape(maps, ).astype(np.float32))
if __name__ == "__main__":
unittest()
| 32.031746
| 101
| 0.700198
| 296
| 2,018
| 4.712838
| 0.260135
| 0.051613
| 0.064516
| 0.057348
| 0.316846
| 0.295341
| 0.295341
| 0.200717
| 0.200717
| 0.200717
| 0
| 0.024277
| 0.142716
| 2,018
| 62
| 102
| 32.548387
| 0.782081
| 0
| 0
| 0.095238
| 0
| 0
| 0.003964
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 1
| 0.095238
| false
| 0
| 0.095238
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
decc19f50e9a41be1bc95cb6e0bf5f4f77162b78
| 4,802
|
py
|
Python
|
src/metrics.py
|
enryH/specpride
|
1bedd87dc8f31a6b86426c6e03dc0c27706bc9aa
|
[
"Apache-2.0"
] | 2
|
2020-01-14T12:02:52.000Z
|
2020-01-14T14:03:30.000Z
|
src/metrics.py
|
enryH/specpride
|
1bedd87dc8f31a6b86426c6e03dc0c27706bc9aa
|
[
"Apache-2.0"
] | 5
|
2019-12-09T10:59:10.000Z
|
2020-01-16T14:32:00.000Z
|
src/metrics.py
|
enryH/specpride
|
1bedd87dc8f31a6b86426c6e03dc0c27706bc9aa
|
[
"Apache-2.0"
] | 9
|
2020-01-14T12:26:54.000Z
|
2020-01-16T08:26:06.000Z
|
import copy
from typing import Iterable
import numba as nb
import numpy as np
import spectrum_utils.spectrum as sus
def dot(spectrum1: sus.MsmsSpectrum, spectrum2: sus.MsmsSpectrum,
fragment_mz_tolerance: float) -> float:
"""
Compute the dot product between the given spectra.
Parameters
----------
spectrum1 : sus.MsmsSpectrum
The first spectrum.
spectrum2 : sus.MsmsSpectrum
The second spectrum.
fragment_mz_tolerance : float
The fragment m/z tolerance used to match peaks.
Returns
-------
float
The dot product similarity between the given spectra.
"""
return _dot(spectrum1.mz, _norm_intensity(np.copy(spectrum1.intensity)),
spectrum2.mz, _norm_intensity(np.copy(spectrum2.intensity)),
fragment_mz_tolerance)
@nb.njit
def _norm_intensity(spectrum_intensity: np.ndarray) -> np.ndarray:
"""
Normalize spectrum peak intensities.
Parameters
----------
spectrum_intensity : np.ndarray
The spectrum peak intensities to be normalized.
Returns
-------
np.ndarray
The normalized peak intensities.
"""
return spectrum_intensity / np.linalg.norm(spectrum_intensity)
@nb.njit
def _dot(mz: np.ndarray, intensity: np.ndarray, mz_other: np.ndarray,
intensity_other: np.ndarray, fragment_mz_tol: float) -> float:
"""
Compute the dot product between the given spectra.
Note: Spectrum intensities should be normalized prior to computing the dot
product.
Parameters
----------
mz : np.ndarray
The first spectrum's m/z values.
intensity : np.ndarray
The first spectrum's intensity values.
mz_other : np.ndarray
The second spectrum's m/z values.
intensity_other : np.ndarray
The second spectrum's intensity values.
fragment_mz_tol : float
The fragment m/z tolerance used to match peaks in both spectra with
each other.
Returns
-------
float
The dot product between both spectra.
"""
fragment_i, fragment_other_i, score = 0, 0, 0.
for fragment_i in range(len(mz)):
while (fragment_other_i < len(mz_other) - 1 and
mz_other[fragment_other_i] < mz[fragment_i] - fragment_mz_tol):
fragment_other_i += 1
if (abs(mz[fragment_i] - mz_other[fragment_other_i]) <= fragment_mz_tol
and fragment_other_i < len(mz_other)):
score += intensity[fragment_i] * intensity_other[fragment_other_i]
fragment_other_i += 1
return score
def avg_dot(representative: sus.MsmsSpectrum,
cluster_spectra: Iterable[sus.MsmsSpectrum],
fragment_mz_tolerance: float) -> float:
"""
Compute the average dot product between the cluster representative and all
cluster members.
Parameters
----------
representative : sus.MsmsSpectrum
The cluster representative spectrum.
cluster_spectra : Iterable[sus.MsmsSpectrum]
The cluster member spectra.
fragment_mz_tolerance : float
Fragment m/z tolerance used during spectrum comparison.
Returns
-------
float
The average dot product between the cluster representative and all
cluster members.
"""
return np.mean([dot(representative, spectrum, fragment_mz_tolerance)
for spectrum in cluster_spectra])
def fraction_by(representative: sus.MsmsSpectrum,
cluster_spectra: Iterable[sus.MsmsSpectrum],
fragment_mz_tolerance: float) -> float:
"""
Compute the fraction of intensity that is explained by the b and y-ions of
the representative spectrum.
This will be 0 if no peptide sequence is associated with the representative
spectrum.
Parameters
----------
representative : sus.MsmsSpectrum
The cluster representative spectrum.
cluster_spectra : Iterable[sus.MsmsSpectrum]
The cluster member spectra. Ignored.
fragment_mz_tolerance : float
Fragment m/z tolerance used to annotate the peaks of the representative
spectrum.
Returns
-------
float
The fraction of intensity that is explained by the b and y-ions of the
representative spectrum.
"""
if representative.peptide is None:
return 0
representative = (copy.copy(representative)
.remove_precursor_peak(fragment_mz_tolerance, 'Da')
.annotate_peptide_fragments(fragment_mz_tolerance, 'Da'))
annotated_peaks = [i for i, annot in enumerate(representative.annotation)
if annot is not None]
return (representative.intensity[annotated_peaks].sum()
/ representative.intensity.sum())
| 31.592105
| 79
| 0.660975
| 563
| 4,802
| 5.488455
| 0.197158
| 0.045307
| 0.061489
| 0.046602
| 0.469579
| 0.414887
| 0.370874
| 0.350162
| 0.350162
| 0.308738
| 0
| 0.004499
| 0.259475
| 4,802
| 151
| 80
| 31.801325
| 0.864454
| 0.465223
| 0
| 0.209302
| 0
| 0
| 0.001808
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116279
| false
| 0
| 0.116279
| 0
| 0.372093
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
deccbee42c5be781692fc226272ac89e27a4e7a6
| 797
|
py
|
Python
|
examples/multi-class_neural_network.py
|
sun1638650145/classicML
|
7e0c2155bccb6e491a150ee689d3786526b74565
|
[
"Apache-2.0"
] | 12
|
2020-05-10T12:11:06.000Z
|
2021-10-31T13:23:55.000Z
|
examples/multi-class_neural_network.py
|
sun1638650145/classicML
|
7e0c2155bccb6e491a150ee689d3786526b74565
|
[
"Apache-2.0"
] | null | null | null |
examples/multi-class_neural_network.py
|
sun1638650145/classicML
|
7e0c2155bccb6e491a150ee689d3786526b74565
|
[
"Apache-2.0"
] | 2
|
2021-01-17T06:22:05.000Z
|
2021-01-18T14:32:51.000Z
|
"""
这个例子将展示如何使用BP神经网络构建多分类的神经网络.
"""
import sys
import classicML as cml
DATASET_PATH = './datasets/iris_dataset.csv'
CALLBACKS = [cml.callbacks.History(loss_name='categorical_crossentropy',
metric_name='accuracy')]
# 读取数据
ds = cml.data.Dataset(label_mode='one-hot',
standardization=True,
name='iris')
ds.from_csv(DATASET_PATH)
# 生成神经网络
model = cml.BPNN(seed=2021)
model.compile(network_structure=[4, 2, 3],
optimizer='sgd',
loss='categorical_crossentropy',
metric='accuracy')
# 训练神经网络
model.fit(ds.x, ds.y, epochs=1000, verbose=True, callbacks=CALLBACKS)
# 可视化历史记录(如果您使用的是MacOS, 请注释掉此句, 这句是为了在CI上测试用的.)
if sys.platform != 'darwin':
cml.plots.plot_history(CALLBACKS[0])
| 28.464286
| 72
| 0.644918
| 89
| 797
| 5.651685
| 0.662921
| 0.043738
| 0.115308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019417
| 0.224592
| 797
| 27
| 73
| 29.518519
| 0.794498
| 0.117942
| 0
| 0
| 0
| 0
| 0.160405
| 0.108382
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dece77460bb0515a4dff433a0f6f8e80d7adc76c
| 3,735
|
py
|
Python
|
yiffscraper/downloader.py
|
ScraperT/yiffscraper
|
49482a544fc7f11e6ea5db2626dbc2404529d656
|
[
"MIT"
] | 42
|
2019-12-23T23:55:12.000Z
|
2022-02-07T04:12:59.000Z
|
yiffscraper/downloader.py
|
arin17bishwa/yiffscraper
|
49482a544fc7f11e6ea5db2626dbc2404529d656
|
[
"MIT"
] | 7
|
2020-01-12T13:04:56.000Z
|
2020-05-18T07:11:51.000Z
|
yiffscraper/downloader.py
|
arin17bishwa/yiffscraper
|
49482a544fc7f11e6ea5db2626dbc2404529d656
|
[
"MIT"
] | 7
|
2020-03-12T03:47:53.000Z
|
2020-07-26T08:05:55.000Z
|
import os
import platform
from datetime import datetime
import time
from pathlib import Path
import asyncio
from dateutil.parser import parse as parsedate
from dateutil import tz
import aiohttp
def longpath(p):
if p is None or platform.system() != "Windows":
return Path(p)
return Path("\\\\?\\" + str(Path.cwd() / p))
class UrlItem:
__slots__ = ("url", "size", "lastModified", "path")
def __init__(self, url, size, lastModified, path=None):
self.url = url
self.size = size
self.lastModified = lastModified
self.path = longpath(path)
def needsUpdate(self):
if self.path is None:
return False
fileLastModified = getFileTime(self.path)
if self.lastModified is None or fileLastModified is None:
return True
return self.lastModified > fileLastModified
@classmethod
async def fetchMetadata(cls, session, url, path):
async with session.head(url, allow_redirects=True) as response:
try:
response.raise_for_status()
except aiohttp.ClientResponseError as e:
# I don't like returning Exceptions, but I can't find a better way to pass a single error in an async loop
return (None, e)
size = int(response.headers.get("content-length", 0))
lastModified = parsedateOrNone(response.headers.get("last-modified", None))
return (cls(url, size, lastModified, path), None)
async def download(self, session, update):
if self.path is None:
return
if update and not await self.needsUpdate():
return
self.path.parent.mkdir(parents=True, exist_ok=True)
async with session.get(self.url) as response:
try:
response.raise_for_status()
except aiohttp.ClientResponseError as e:
# I don't like returning Exceptions, but I can't find a better way to pass a single error in an async loop
return (self, e)
with open(self.path, "wb") as out_file:
while True:
chunk = await response.content.read(8192)
if not chunk:
break
out_file.write(chunk)
url_timestamp = getTimestamp(self.lastModified)
os.utime(self.path, (url_timestamp, url_timestamp))
return (self, None)
@classmethod
async def fetchAllMetadata(cls, items):
async with newSession() as session:
tasks = [cls.fetchMetadata(session, i.url, i.path) for i in items]
for task in asyncio.as_completed(tasks):
urlitem = await task
yield urlitem
@classmethod
async def downloadAll(cls, urlitems, update):
async with newSession() as session:
tasks = [urlitem.download(session, update) for urlitem in urlitems]
for task in asyncio.as_completed(tasks):
yield await task
def __len__(self):
return self.size
def getFileTime(path):
try:
file_datetime = datetime.fromtimestamp(os.path.getmtime(path), tz=tz.tzutc())
except FileNotFoundError:
file_datetime = None
return file_datetime
def getTimestamp(t):
if t is None:
return None
timestamp = time.mktime(t.timetuple())
return timestamp
def parsedateOrNone(dateString):
if dateString is None:
return None
return parsedate(dateString)
def newSession():
connector = aiohttp.connector.TCPConnector(limit=25, limit_per_host=10)
timeout = aiohttp.ClientTimeout(total=None)
return aiohttp.ClientSession(connector=connector, timeout=timeout)
| 31.923077
| 122
| 0.626774
| 443
| 3,735
| 5.216704
| 0.309255
| 0.038944
| 0.025963
| 0.029857
| 0.234531
| 0.211164
| 0.163566
| 0.135872
| 0.135872
| 0.135872
| 0
| 0.003398
| 0.290763
| 3,735
| 116
| 123
| 32.198276
| 0.869007
| 0.055957
| 0
| 0.21978
| 0
| 0
| 0.018734
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087912
| false
| 0
| 0.098901
| 0.010989
| 0.406593
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ded4491d8cef57cccb094e0f83641638968be15a
| 3,066
|
py
|
Python
|
src/tests/attention_test.py
|
feperessim/attention_keras
|
322a16ee147122026b63305aaa5e899d9e5de883
|
[
"MIT"
] | 422
|
2019-03-17T13:08:59.000Z
|
2022-03-31T12:08:29.000Z
|
src/tests/attention_test.py
|
JKhodadadi/attention_keras
|
322a16ee147122026b63305aaa5e899d9e5de883
|
[
"MIT"
] | 51
|
2019-03-17T20:08:11.000Z
|
2022-03-18T03:51:42.000Z
|
src/tests/attention_test.py
|
JKhodadadi/attention_keras
|
322a16ee147122026b63305aaa5e899d9e5de883
|
[
"MIT"
] | 285
|
2019-03-17T19:06:22.000Z
|
2022-03-31T02:29:17.000Z
|
import pytest
from layers.attention import AttentionLayer
from tensorflow.keras.layers import Input, GRU, Dense, Concatenate, TimeDistributed
from tensorflow.keras.models import Model
import tensorflow as tf
def test_attention_layer_standalone_fixed_b_fixed_t():
"""
Tests fixed batch size and time steps
Encoder and decoder has variable seq length and latent dim
"""
inp1 = Input(batch_shape=(5,10,15))
inp2 = Input(batch_shape=(5,15,25))
out, e_out = AttentionLayer()([inp1, inp2])
assert out.shape == tf.TensorShape([inp2.shape[0], inp2.shape[1], inp1.shape[2]])
assert e_out.shape == tf.TensorShape([inp1.shape[0], inp2.shape[1], inp1.shape[1]])
def check_tensorshape_equal(shape1, shape2):
print(shape1, shape2)
equal = []
for s1, s2 in zip(shape1, shape2):
if (s1 == s2) == None:
equal.append(True)
else:
equal.append(s1==s2)
return all(equal)
def test_attention_layer_standalone_none_b_fixed_t():
inp1 = Input(shape=(10, 15))
inp2 = Input(shape=(15, 25))
out, e_out = AttentionLayer()([inp1, inp2])
assert check_tensorshape_equal(out.shape, tf.TensorShape([None, inp2.shape[1], inp1.shape[2]]))
assert check_tensorshape_equal(e_out.shape, tf.TensorShape([None, inp2.shape[1], inp1.shape[1]]))
def test_attention_layer_standalone_none_b_none_t():
inp1 = Input(shape=(None, 15))
inp2 = Input(shape=(None, 25))
out, e_out = AttentionLayer()([inp1, inp2])
assert check_tensorshape_equal(out.shape, tf.TensorShape([None, None, inp1.shape[2]]))
assert check_tensorshape_equal(e_out.shape, tf.TensorShape([None, None, None]))
'''def test_attention_layer_nmt_none_b_fixed_t():
encoder_inputs = Input(shape=(12, 75), name='encoder_inputs')
decoder_inputs = Input(shape=(16 - 1, 80), name='decoder_inputs')
# Encoder GRU
encoder_gru = GRU(32, return_sequences=True, return_state=True, name='encoder_gru')
encoder_out, encoder_state = encoder_gru(encoder_inputs)
# Set up the decoder GRU, using `encoder_states` as initial state.
decoder_gru = GRU(32, return_sequences=True, return_state=True, name='decoder_gru')
decoder_out, decoder_state = decoder_gru(decoder_inputs, initial_state=encoder_state)
# Attention layer
attn_layer = AttentionLayer(name='attention_layer')
attn_out, attn_states = attn_layer([encoder_out, decoder_out])
# Concat attention input and decoder GRU output
decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([decoder_out, attn_out])
# Dense layer
dense = Dense(80, activation='softmax', name='softmax_layer')
dense_time = TimeDistributed(dense, name='time_distributed_layer')
decoder_pred = dense_time(decoder_concat_input)
# Full model
full_model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_pred)
full_model.compile(optimizer='adam', loss='categorical_crossentropy')
assert decoder_pred.shape == tf.TensorShape([])
def test_attention_layer_nmt_none_b_none_t():
pass'''
| 37.390244
| 101
| 0.7182
| 427
| 3,066
| 4.915691
| 0.238876
| 0.046689
| 0.060029
| 0.060029
| 0.336827
| 0.311577
| 0.307766
| 0.216293
| 0.216293
| 0.19676
| 0
| 0.032571
| 0.158839
| 3,066
| 82
| 102
| 37.390244
| 0.781311
| 0.031311
| 0
| 0.09375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.125
| false
| 0
| 0.15625
| 0
| 0.3125
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ded5e7681d684ad45f836b0b523b89035ed45f16
| 1,572
|
py
|
Python
|
Python/9248_Suffix_Array/9248_suffix_array_lcp_array.py
|
ire4564/Baekjoon_Solutions
|
3e6689efa30d6b850cdc29570c76408a1e1b2b49
|
[
"Apache-2.0"
] | 4
|
2020-11-17T09:52:29.000Z
|
2020-12-13T11:36:14.000Z
|
Python/9248_Suffix_Array/9248_suffix_array_lcp_array.py
|
ire4564/Baekjoon_Solutions
|
3e6689efa30d6b850cdc29570c76408a1e1b2b49
|
[
"Apache-2.0"
] | 2
|
2020-11-19T11:21:02.000Z
|
2020-11-19T22:07:15.000Z
|
Python/9248_Suffix_Array/9248_suffix_array_lcp_array.py
|
ire4564/Baekjoon_Solutions
|
3e6689efa30d6b850cdc29570c76408a1e1b2b49
|
[
"Apache-2.0"
] | 12
|
2020-11-17T06:55:13.000Z
|
2021-05-16T14:39:37.000Z
|
from itertools import zip_longest, islice
def to_int_keys_best(l):
seen = set()
ls = []
for e in l:
if not e in seen:
ls.append(e)
seen.add(e)
ls.sort()
index = {v: i for i, v in enumerate(ls)}
return [index[v] for v in l]
def suffix_array_best(s):
n = len(s)
k = 1
line = to_int_keys_best(s)
while max(line) < n - 1:
line = to_int_keys_best(
[a * (n + 1) + b + 1
for (a, b) in
zip_longest(line, islice(line, k, None),
fillvalue=-1)])
k <<= 1
return line
def lcp_array(s, sa):
n = len(s)
k = 0
lcp = [0] * n
rank = [0] * n
for i in range(n):
rank[sa[i]] = i
for i in range(n):
if rank[i] == n - 1:
k = 0
continue
j = sa[rank[i] + 1]
while i + k < n and j + k < n and s[i + k] == s[j + k]:
k += 1
lcp[rank[i]] = k;
if k:
k -= 1
return lcp
def inverse_array(l):
n = len(l)
ans = [0] * n
for i in range(n):
ans[l[i]] = i
return ans
if __name__ == '__main__':
L = input()
inverse_suffix_array = suffix_array_best(L)
suffix_array = inverse_array(inverse_suffix_array)
for item in suffix_array:
print(item + 1, end=' ')
LCP = lcp_array(L, suffix_array)
LCP.pop()
LCP.insert(0, 'x')
print()
for item in LCP:
print(item, end=' ')
| 20.684211
| 64
| 0.448473
| 235
| 1,572
| 2.855319
| 0.259574
| 0.114754
| 0.040238
| 0.058122
| 0.113264
| 0.09538
| 0.041729
| 0
| 0
| 0
| 0
| 0.018785
| 0.4243
| 1,572
| 75
| 65
| 20.96
| 0.722652
| 0
| 0
| 0.118644
| 0
| 0
| 0.007353
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0
| 0.016949
| 0
| 0.152542
| 0.050847
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ded667020b68f181edc8b21f22dbb71557c2c7cc
| 1,329
|
py
|
Python
|
lgr/tools/compare/utils.py
|
ron813c/lgr-core
|
68ba730bf7f9e61cb97c9c08f61bc58b8ea24e7b
|
[
"BSD-3-Clause"
] | 7
|
2017-07-10T22:39:52.000Z
|
2021-06-25T20:19:28.000Z
|
lgr/tools/compare/utils.py
|
ron813c/lgr-core
|
68ba730bf7f9e61cb97c9c08f61bc58b8ea24e7b
|
[
"BSD-3-Clause"
] | 13
|
2016-10-26T19:42:00.000Z
|
2021-12-13T19:43:42.000Z
|
lgr/tools/compare/utils.py
|
ron813c/lgr-core
|
68ba730bf7f9e61cb97c9c08f61bc58b8ea24e7b
|
[
"BSD-3-Clause"
] | 8
|
2016-11-07T15:40:27.000Z
|
2020-09-22T13:48:52.000Z
|
# -*- coding: utf-8 -*-
"""
utils.py - Definition of utility functions.
"""
from collections import namedtuple
from lgr.utils import format_cp
VariantProperties = namedtuple('VariantProperties', ['cp', 'type',
'when', 'not_when',
'comment'])
def display_variant(variant):
"""
Nicely display a variant.
:param variant: The variant to display.
"""
return "Variant {}: type={} - when={} - not-when={} - comment={}".format(
format_cp(variant.cp), variant.type,
variant.when, variant.not_when,
variant.comment)
def compare_objects(first, second, cmp_fct):
"""
Compare two objects, possibly None.
:param first: First object.
:param second: Second object.
:param cmp_fct: A comparison function.
:return: The "greatest" object according to `cmp_fct`,
None if both values are None.
>>> compare_objects(1, 2, max)
2
>>> compare_objects(1, 2, min)
1
>>> compare_objects(None, None, max) is None
True
>>> compare_objects(1, None, min)
1
>>> compare_objects(None, 1, min)
1
"""
if first is None:
return second
if second is None:
return first
return cmp_fct(first, second)
| 24.611111
| 77
| 0.574116
| 152
| 1,329
| 4.921053
| 0.355263
| 0.112299
| 0.06016
| 0.040107
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011931
| 0.306245
| 1,329
| 53
| 78
| 25.075472
| 0.799349
| 0.413845
| 0
| 0
| 0
| 0
| 0.144756
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ded78378f0da72d7d6e0a021bbb1b4a6004db8f0
| 2,386
|
py
|
Python
|
tests/test__file_object.py
|
StateArchivesOfNorthCarolina/tomes_metadata
|
8b73096c1b16e0db2895a6c01d4fc4fd9621cf55
|
[
"MIT"
] | null | null | null |
tests/test__file_object.py
|
StateArchivesOfNorthCarolina/tomes_metadata
|
8b73096c1b16e0db2895a6c01d4fc4fd9621cf55
|
[
"MIT"
] | 2
|
2018-09-12T20:36:22.000Z
|
2018-09-13T20:14:50.000Z
|
tests/test__file_object.py
|
StateArchivesOfNorthCarolina/tomes-packager
|
8b73096c1b16e0db2895a6c01d4fc4fd9621cf55
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# import modules.
import sys; sys.path.append("..")
import hashlib
import json
import logging
import os
import plac
import unittest
import warnings
from tomes_packager.lib.directory_object import *
from tomes_packager.lib.file_object import *
# enable logging.
logging.basicConfig(level=logging.DEBUG)
class Test_FileObject(unittest.TestCase):
def setUp(self):
# set attributes.
self.sample_file = __file__
self.sample_dir = os.path.dirname(self.sample_file)
self.dir_obj = DirectoryObject(self.sample_dir)
self.file_obj = FileObject(self.sample_file, self.dir_obj, self.dir_obj, 0)
def test__mimetype(self):
""" Is the MIME type for @self.file_obj correct? """
# get mime via mimetypes.guess_type.
mime = mimetypes.guess_type(self.sample_file)[0]
# make sure the FileObject mimetype is the same.
self.assertEqual(mime, self.file_obj.mimetype())
def test__checksum(self):
""" Is the SHA-1 hash for @self.file_obj correct? """
# get SHA-1 value of @self.sample_file via hashlib.
sha1 = hashlib.sha1()
with open(self.sample_file, "rb") as f:
sha1.update(f.read())
sha1 = sha1.hexdigest()
# get FileObject SHA-1 hash and suppress ResourceWarning in unittest.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sha1_obj = self.file_obj.checksum("SHA-1")
# make sure hashes are equal.
self.assertEqual(sha1, sha1_obj)
# CLI.
def main(filepath:("file path")):
"Converts a file to a FolderObject and prints its attributes to screen as JSON.\
\nexample: `python3 test__file_object.py sample_files/sample_rdf.xlsx`"
# convert @filepath to a FileObject.
dir_obj = DirectoryObject(os.path.dirname(filepath))
file_obj = FileObject(filepath, dir_obj, dir_obj, 0)
# collect @file_obj attributes.
fdict = {}
for att in file_obj.__dict__:
if att[0] == "_":
continue
try:
val = getattr(file_obj, att)()
except TypeError:
val = getattr(file_obj, att)
fdict[att] = str(val)
# convert @fdict to JSON.
js = json.dumps(fdict, indent=2)
print(js)
if __name__ == "__main__":
plac.call(main)
| 27.744186
| 84
| 0.642079
| 311
| 2,386
| 4.733119
| 0.376206
| 0.047554
| 0.057065
| 0.027174
| 0.092391
| 0.065217
| 0
| 0
| 0
| 0
| 0
| 0.01068
| 0.254401
| 2,386
| 86
| 85
| 27.744186
| 0.816751
| 0.266555
| 0
| 0
| 0
| 0
| 0.017572
| 0
| 0
| 0
| 0
| 0
| 0.042553
| 1
| 0.085106
| false
| 0
| 0.212766
| 0
| 0.319149
| 0.042553
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
deda4206dc73f8dbe4b33d7d756e79510962b4d8
| 10,829
|
py
|
Python
|
game.py
|
IliketoTranslate/Pickaxe-clicker
|
e74ebd66842bd47c4ed1c4460e9f45e30a2ad1d7
|
[
"MIT"
] | null | null | null |
game.py
|
IliketoTranslate/Pickaxe-clicker
|
e74ebd66842bd47c4ed1c4460e9f45e30a2ad1d7
|
[
"MIT"
] | null | null | null |
game.py
|
IliketoTranslate/Pickaxe-clicker
|
e74ebd66842bd47c4ed1c4460e9f45e30a2ad1d7
|
[
"MIT"
] | null | null | null |
import pygame
icon = pygame.image.load("diamond_pickaxe.png")
screen_weight = 1750
screen_height = 980
pygame.init()
window = pygame.display.set_mode((screen_weight, screen_height))
pygame.display.set_caption('Pickaxe clicker')
pygame.display.set_icon(icon)
# zmienne
wytrzymałość_kilofa = 50
max_wytrzymałość_kilofa = 50
dodaj2 = 1
record = 0
game_version = "0.2.2"
last_update = "28.01.2022"
x_for_kilof = 400
y_for_kilof = 400
x_for_button1 = 1030
y_for_button1 = 80
x_for_button2 = 1030
y_for_button2 = 800
boost = 1
doswiadczenie = 0
dodaj = 1
max_dodaj = 1
kilof_upgrade = 100
choosed_kilof = 1
# obiekty
kilof = pygame.image.load("Drewniany_kilof.png")
kilof2 = pygame.image.load("Kamienny_kilof.png")
kilof3 = pygame.image.load("Zelazny_kilof.png")
kilof4 = pygame.image.load("Zloty_kilof.png")
kilof5 = pygame.image.load("Diamentowy_kilof.png")
button_upgrade = pygame.image.load("Button_upgrade.png")
button_upgrade_clicked = pygame.image.load("Button_upgrade_clicked.png")
button_upgrade2 = pygame.image.load("Button_upgrade2.png")
button_upgrade2_clicked = pygame.image.load("Button_upgrade2_clicked.png")
button_restart = pygame.image.load("Button_restart.png")
tlo = pygame.image.load("tlo.png")
tlo = pygame.transform.scale(tlo, (screen_weight, screen_height)) # skalowanie
# hitboxy
kilof_hitbox = pygame.rect.Rect(x_for_kilof, y_for_kilof, 160, 160) # tworzy hitbox do kilofa
button_upgrade_hitbox = pygame.rect.Rect(x_for_button1, y_for_button1, 650, 100) # tworzy hitbox do przycisku
button_upgrade2_hitbox = pygame.rect.Rect(x_for_button2, y_for_button2, 650, 100)
# funkcje
def draw_object(object, x, y) :
window.blit(object, (x, y)) # rysowanie objektu
def draw_hitbox(object) :
pygame.draw.rect(window, (93, 32, 32), object)
def zdarzenia_z_myszką() :
wytrzymałość_kilofa = 50
max_wytrzymałość_kilofa = 50
dodaj2 = 1
record = 0
game_version = "0.2.2"
last_update = "28.01.2022"
x_for_kilof = 400
y_for_kilof = 400
x_for_button1 = 1030
y_for_button1 = 80
x_for_button2 = 1030
y_for_button2 = 800
boost = 1
doswiadczenie = 0
dodaj = 1
max_dodaj = 1
kilof_upgrade = 100
choosed_kilof = 1
kilof_upgrade2 = kilof_upgrade - 1
if wytrzymałość_kilofa == 0 :
dodaj = 0
dodaj2 = 0
else :
dodaj2 = 1
dodaj = max_dodaj
if choosed_kilof > 0 and choosed_kilof < 5 :
if button_upgrade2_hitbox.collidepoint(pygame.mouse.get_pos()) and doswiadczenie > kilof_upgrade2 : # jeżeli mysz dotyka hitboxa
if pygame.mouse.get_pressed()[0]: # jeżeli naciśnieto lewy przycisk myszy
doswiadczenie = doswiadczenie - kilof_upgrade
if wytrzymałość_kilofa == 0 :
choosed_kilof = 1
kilof_upgrade = 100
dodaj = 0
dodaj2 = 0
wytrzymałość_kilofa = max_wytrzymałość_kilofa
else :
dodaj2 = 1
choosed_kilof += 1
max_wytrzymałość_kilofa = max_wytrzymałość_kilofa * 2
kilof_upgrade = kilof_upgrade * 2
wytrzymałość_kilofa = max_wytrzymałość_kilofa
pygame.time.wait(50)
else :
max_wytrzymałość_kilofa = 800
kilof_upgrade = 10000000000
if button_upgrade2_hitbox.collidepoint(pygame.mouse.get_pos()) and doswiadczenie > kilof_upgrade2 : # jeżeli mysz dotyka hitboxa
if pygame.mouse.get_pressed()[0]: # jeżeli naciśnieto lewy przycisk myszy
wytrzymałość_kilofa = max_wytrzymałość_kilofa
pygame.time.wait(50)
if kilof_hitbox.collidepoint(pygame.mouse.get_pos()):
if pygame.mouse.get_pressed()[0]:
pygame.time.wait(100)
doswiadczenie += dodaj
wytrzymałość_kilofa = wytrzymałość_kilofa - dodaj2
boost2 = boost - 1
if button_upgrade_hitbox.collidepoint(pygame.mouse.get_pos()) and doswiadczenie > boost2:
if pygame.mouse.get_pressed()[0]:
max_dodaj += choosed_kilof
doswiadczenie = doswiadczenie - boost
boost = boost * 2
pygame.time.wait(100)
if button_upgrade2_hitbox.collidepoint(pygame.mouse.get_pos()):
draw_object(button_upgrade2_clicked, x_for_button2, y_for_button2) # rysowanie przycisku
draw_object(text_kilof, 1040, 840) # rysowanie tekstu 2
else :
draw_object(button_upgrade2, x_for_button2, y_for_button2) # rysowanie przycisku
draw_object(text_kilof, 1040, 840) # rysowanie tekstu 2
if button_upgrade_hitbox.collidepoint(pygame.mouse.get_pos()):
draw_object(button_upgrade_clicked, x_for_button1, y_for_button1) # rysowanie przycisku
draw_object(text_ulepszenie, 1040, 110) # rysowanie tekstu 2
else :
draw_object(button_upgrade, x_for_button1, y_for_button1) # rysowanie przycisku
draw_object(text_ulepszenie, 1040, 110) # rysowanie tekstu 2
run = True
while run:
pygame.time.Clock().tick(100) # maksymalnie 100 fps
for event in pygame.event.get():
if event.type == pygame.QUIT: # jeśli gracz zamknie okienko
run = False
keys = pygame.key.get_pressed()
if keys[pygame.K_ESCAPE] :
run = False
# napisy
kilof_upgrade2 = kilof_upgrade - 1
text_wersja = pygame.font.Font.render(pygame.font.SysFont("Freemono", 50), f"Version : {game_version} | Last update : {last_update}", True, (255, 200, 100)) # generowanie tekstu
text_doswiadczenie = pygame.font.Font.render(pygame.font.SysFont("Dyuthi", 72), f"Doswiadczenie : {doswiadczenie}", True, (100, 100, 100)) # generowanie tekstu
text_kilof = pygame.font.Font.render(pygame.font.SysFont("Sawasdee", 25), f"Kup kilof | Koszt : {kilof_upgrade}", True, (255, 255, 255)) # generowanie tekstu
text_WIP = pygame.font.Font.render(pygame.font.SysFont("Waree", 25), f"W I P (WORK IN PROGRESS)", True, (255, 255, 255)) # generowanie tekstu 2
text_wytrzymałość_kilofa = pygame.font.Font.render(pygame.font.SysFont("Dyuthi", 50), f"Wytrzymalosc kilofa : {wytrzymałość_kilofa}", True, (255, 255, 255)) # generowanie tekstu 2
text_record = pygame.font.Font.render(pygame.font.SysFont("Liberation Serif", 50), f"Record : {record}", True, (150, 150, 150))
if choosed_kilof > 0 and choosed_kilof < 5 :
if doswiadczenie > kilof_upgrade2 :
text_kilof = pygame.font.Font.render(pygame.font.SysFont("Sawasdee", 25), f"Kup kilof | Koszt : {kilof_upgrade}, Dostepne", True, (255, 255, 255)) # generowanie tekstu 2
else :
text_kilof = pygame.font.Font.render(pygame.font.SysFont("Sawasdee", 25), f"Kup kilof | Koszt : {kilof_upgrade}, Niedostepne", True, (255, 255, 255)) # generowanie tekstu 2
elif choosed_kilof == 5 :
text_kilof = pygame.font.Font.render(pygame.font.SysFont("Sawasdee", 25), f"Nie ma wiecej dostepnych kilofow", True, (255, 255, 255)) # generowanie tekstu 2
boost2 = boost - 1
if doswiadczenie > boost2 :
text_ulepszenie = pygame.font.Font.render(pygame.font.SysFont("Sawasdee", 25), f"Ulepszenie kilofa | Koszt : {boost}, Dostepne", True, (255, 255, 255)) # generowanie tekstu
else :
text_ulepszenie = pygame.font.Font.render(pygame.font.SysFont("Sawasdee", 25), f"Ulepszenie kilofa | Koszt : {boost}, Niedostepne", True, (255, 255, 255)) # generowanie tekstu
window.blit(tlo, (0, 0)) # rysowanie tła
# rysowanie hitboxów
draw_hitbox(kilof_hitbox) # rysowanie hitboxu do kilofa
draw_hitbox(button_upgrade_hitbox) # rysowanie hitboxu do przycisku upgrade
draw_hitbox(button_upgrade2_hitbox) # rysowanie hitboxu do przycisku upgrade2
# rysowanie obiektów
if choosed_kilof == 1 : draw_object(kilof, x_for_kilof, y_for_kilof) # rysowanie kilofu
elif choosed_kilof == 2 : draw_object(kilof2, x_for_kilof, y_for_kilof)
elif choosed_kilof == 3 : draw_object(kilof3, x_for_kilof, y_for_kilof)
elif choosed_kilof == 4 : draw_object(kilof4, x_for_kilof, y_for_kilof)
elif choosed_kilof == 5 or choosed_kilof > 5 : draw_object(kilof5, x_for_kilof, y_for_kilof)
draw_object(button_upgrade, x_for_button1, y_for_button1) # rysowanie przycisku
draw_object(button_upgrade2, x_for_button2, y_for_button2) # rysowanie przycisku 2
draw_object(button_restart, 0, 0)
draw_object(text_doswiadczenie, 224, 100) # rysowanie tekstu
draw_object(text_ulepszenie, 1040, 110) # rysowanie tekstu 2
draw_object(text_wersja, 10, 5) # rysowanie tekstu 3
draw_object(text_kilof, 1040, 840)
draw_object(text_WIP, 1170, 750)
draw_object(text_wytrzymałość_kilofa, 250, 300)
draw_object(text_record, 1280, 0)
# sprawdzanie zdarzeń z myszką
zdarzenia_z_myszką()
# sprawdzanie
if doswiadczenie > record :
record = doswiadczenie
#if x_for_button1 > 80 :
#if x_for_button2 > 800 :
# wydrukuj
pygame.display.update()
| 49.447489
| 296
| 0.576138
| 1,196
| 10,829
| 4.982441
| 0.159699
| 0.040275
| 0.030206
| 0.036919
| 0.568216
| 0.513845
| 0.464004
| 0.411982
| 0.383118
| 0.298204
| 0
| 0.064236
| 0.341583
| 10,829
| 219
| 297
| 49.447489
| 0.771529
| 0.100286
| 0
| 0.494118
| 0
| 0
| 0.080367
| 0.007634
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017647
| false
| 0
| 0.005882
| 0
| 0.023529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dedba85b4c2428f8778fd3f7f0d4d19fee14a759
| 4,383
|
py
|
Python
|
tests/test_predictor.py
|
WeijieChen2017/pytorch-3dunet
|
15c782481cb7bc3e2083a80bcc8b114cc8697c20
|
[
"MIT"
] | 1
|
2021-08-04T04:03:37.000Z
|
2021-08-04T04:03:37.000Z
|
tests/test_predictor.py
|
LalithShiyam/pytorch-3dunet
|
f6b6c13cb0bb6194e95976b0245b76aaa9e9a496
|
[
"MIT"
] | null | null | null |
tests/test_predictor.py
|
LalithShiyam/pytorch-3dunet
|
f6b6c13cb0bb6194e95976b0245b76aaa9e9a496
|
[
"MIT"
] | 1
|
2022-03-14T04:43:24.000Z
|
2022-03-14T04:43:24.000Z
|
import os
from tempfile import NamedTemporaryFile
import h5py
import numpy as np
import torch
from skimage.metrics import adapted_rand_error
from torch.utils.data import DataLoader
from pytorch3dunet.datasets.hdf5 import StandardHDF5Dataset
from pytorch3dunet.datasets.utils import prediction_collate, get_test_loaders
from pytorch3dunet.predict import _get_output_file, _get_predictor
from pytorch3dunet.unet3d.model import get_model
from pytorch3dunet.unet3d.predictor import EmbeddingsPredictor
from pytorch3dunet.unet3d.utils import remove_halo
class FakePredictor(EmbeddingsPredictor):
def __init__(self, model, loader, output_file, config, clustering, iou_threshold=0.7, **kwargs):
super().__init__(model, loader, output_file, config, clustering, iou_threshold=iou_threshold, **kwargs)
def _embeddings_to_segmentation(self, embeddings):
return embeddings
class FakeModel:
def __call__(self, input):
return input
def eval(self):
pass
class TestPredictor:
def test_stanard_predictor(self, tmpdir, test_config):
# Add output dir
test_config['loaders']['output_dir'] = tmpdir
# create random dataset
tmp = NamedTemporaryFile(delete=False)
with h5py.File(tmp.name, 'w') as f:
shape = (32, 64, 64)
f.create_dataset('raw', data=np.random.rand(*shape))
# Add input file
test_config['loaders']['test']['file_paths'] = [tmp.name]
# Create the model with random weights
model = get_model(test_config)
# Create device and update config
device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
test_config['device'] = device
model = model.to(device)
for test_loader in get_test_loaders(test_config):
output_file = _get_output_file(dataset=test_loader.dataset, output_dir=tmpdir)
predictor = _get_predictor(model, test_loader, output_file, test_config)
# run the model prediction on the entire dataset and save to the 'output_file' H5
predictor.predict()
def test_embeddings_predictor(self, tmpdir):
config = {
'model': {'output_heads': 1},
'device': torch.device('cpu')
}
slice_builder_config = {
'name': 'SliceBuilder',
'patch_shape': (64, 200, 200),
'stride_shape': (40, 150, 150)
}
transformer_config = {
'raw': [
{'name': 'ToTensor', 'expand_dims': False, 'dtype': 'long'}
]
}
gt_file = 'resources/sample_ovule.h5'
output_file = os.path.join(tmpdir, 'output_segmentation.h5')
dataset = StandardHDF5Dataset(gt_file, phase='test',
slice_builder_config=slice_builder_config,
transformer_config=transformer_config,
mirror_padding=None,
raw_internal_path='label')
loader = DataLoader(dataset, batch_size=1, num_workers=1, shuffle=False, collate_fn=prediction_collate)
predictor = FakePredictor(FakeModel(), loader, output_file, config, clustering='meanshift', bandwidth=0.5)
predictor.predict()
with h5py.File(gt_file, 'r') as f:
with h5py.File(output_file, 'r') as g:
gt = f['label'][...]
segm = g['segmentation/meanshift'][...]
arand_error = adapted_rand_error(gt, segm)[0]
assert arand_error < 0.1
def test_remove_halo(self):
patch_halo = (4, 4, 4)
shape = (128, 128, 128)
input = np.random.randint(0, 10, size=(1, 16, 16, 16))
index = (slice(0, 1), slice(12, 28), slice(16, 32), slice(16, 32))
u_patch, u_index = remove_halo(input, index, shape, patch_halo)
assert np.array_equal(input[:, 4:12, 4:12, 4:12], u_patch)
assert u_index == (slice(0, 1), slice(16, 24), slice(20, 28), slice(20, 28))
index = (slice(0, 1), slice(112, 128), slice(112, 128), slice(112, 128))
u_patch, u_index = remove_halo(input, index, shape, patch_halo)
assert np.array_equal(input[:, 4:16, 4:16, 4:16], u_patch)
assert u_index == (slice(0, 1), slice(116, 128), slice(116, 128), slice(116, 128))
| 35.346774
| 114
| 0.62423
| 536
| 4,383
| 4.897388
| 0.287313
| 0.038095
| 0.024381
| 0.018286
| 0.16
| 0.136762
| 0.113524
| 0.113524
| 0.07619
| 0.053333
| 0
| 0.049643
| 0.264659
| 4,383
| 123
| 115
| 35.634146
| 0.764815
| 0.045631
| 0
| 0.04878
| 0
| 0
| 0.060105
| 0.016523
| 0
| 0
| 0
| 0
| 0.060976
| 1
| 0.085366
| false
| 0.012195
| 0.158537
| 0.02439
| 0.304878
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dedbd6180bc5f6b44a69dd4d23b7983f144a3239
| 2,560
|
py
|
Python
|
catalog/views.py
|
DigimundoTesca/Tv-Mundo
|
09904759d1f4f9bf2d5c7c31b97af82c3c963bfd
|
[
"MIT"
] | null | null | null |
catalog/views.py
|
DigimundoTesca/Tv-Mundo
|
09904759d1f4f9bf2d5c7c31b97af82c3c963bfd
|
[
"MIT"
] | 6
|
2017-09-19T07:26:14.000Z
|
2017-09-27T10:06:49.000Z
|
catalog/views.py
|
DigimundoTesca/Tv-Mundo
|
09904759d1f4f9bf2d5c7c31b97af82c3c963bfd
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from catalog.models import Videos, Category, Docs, Subscriber
from django.contrib.auth.decorators import login_required
@login_required
def home(request):
template = 'home.html'
category = Category.objects.all()
videos = Videos.objects.all()
grade = Subscriber.objects.get(user=request.user)
context = {
'grade': grade,
'videos': videos,
'title': "Tv Mundo",
'category' : category,
}
return render(request, template, context)
@login_required
def block(request, name):
template = 'block.html'
cat = Category.objects.all()
selCat = cat.get(title=name)
title = name
context = {
'title': title,
'category': cat,
'selCat': selCat,
}
return render(request, template, context)
@login_required
def catalog(request):
template = 'catalog.html'
category = Category.objects.all()
videos = Videos.objects.all()
docs = Docs.objects.all()
title = 'Catalogo'
context = {
'docs' : docs,
'videos' : videos,
'category' : category,
'title': title,
}
return render(request, template, context)
@login_required
def videos(request, name, pk=0):
template = 'videos.html'
videos = Videos.objects.filter(category__title=name).filter(status=True)
category = Category.objects.all()
docs = Docs.objects.filter(category__title=name)
title = name
if pk == '0':
s_vid = videos[:1].get()
else:
s_vid = videos.filter(pk=pk)
s_vid = s_vid[:1].get()
context = {
's_vid': s_vid,
'videos': videos,
'docs':docs,
'category': category,
'title': title,
}
return render(request, template, context)
@login_required
def images(request, name, pk=None):
template = 'images.html'
docs = Docs.objects.filter(category__title=name).filter(kind="IMG")
category = Category.objects.all()
title = name
context = {
'category': category,
'docs': docs,
'title': title,
}
return render(request, template, context)
@login_required
def docs(request, name, pk=None):
template = 'documents.html'
docs = Docs.objects.all().filter(category__title=name)
category = Category.objects.all()
title = name
context = {
'category': category,
'docs': docs,
'title': title,
}
return render(request, template, context)
| 24.380952
| 76
| 0.622656
| 288
| 2,560
| 5.447917
| 0.1875
| 0.101976
| 0.061185
| 0.10325
| 0.564054
| 0.516252
| 0.489484
| 0.441045
| 0.313576
| 0.247291
| 0
| 0.003642
| 0.249219
| 2,560
| 104
| 77
| 24.615385
| 0.812695
| 0
| 0
| 0.551724
| 0
| 0
| 0.083984
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.045977
| 0
| 0.183908
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dedc38f09d494832d839db3e999852609e6a45ac
| 519
|
py
|
Python
|
python/database/get_twitter_predict_by_order.py
|
visdata/DeepClue
|
8d80ecd783919c97ba225db67664a0dfe5f3fb37
|
[
"Apache-2.0"
] | 1
|
2020-12-06T08:04:32.000Z
|
2020-12-06T08:04:32.000Z
|
python/database/get_twitter_predict_by_order.py
|
visdata/DeepClue
|
8d80ecd783919c97ba225db67664a0dfe5f3fb37
|
[
"Apache-2.0"
] | null | null | null |
python/database/get_twitter_predict_by_order.py
|
visdata/DeepClue
|
8d80ecd783919c97ba225db67664a0dfe5f3fb37
|
[
"Apache-2.0"
] | null | null | null |
import MySQLdb
db = MySQLdb.connect('localhost', 'root', 'vis_2014', 'FinanceVis')
cursor = db.cursor()
sql = 'select predict_news_word from all_twitter where symbol=%s order by predict_news_word+0 desc'
cursor.execute(sql, ('AAPL', ))
results = cursor.fetchall()
file_twitter_predict = open('twitter_predict_AAPL.csv', 'wb')
for row in results:
predict = row[0]
if row[0] is None:
predict = 'NULL'
file_twitter_predict.write(predict+'\n')
file_twitter_predict.close()
cursor.close()
db.close()
| 25.95
| 99
| 0.714836
| 75
| 519
| 4.76
| 0.573333
| 0.156863
| 0.151261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015766
| 0.144509
| 519
| 20
| 100
| 25.95
| 0.788288
| 0
| 0
| 0
| 0
| 0
| 0.303846
| 0.046154
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dedeaccf1b8d4bb294ba8b9e2278d86179d43f0e
| 405
|
py
|
Python
|
kattis/solutions/alphabetspam.py
|
yifeng-pan/competitive_programming
|
c59edb1e08aa2db2158a814e3d34f4302658d98e
|
[
"Unlicense"
] | null | null | null |
kattis/solutions/alphabetspam.py
|
yifeng-pan/competitive_programming
|
c59edb1e08aa2db2158a814e3d34f4302658d98e
|
[
"Unlicense"
] | null | null | null |
kattis/solutions/alphabetspam.py
|
yifeng-pan/competitive_programming
|
c59edb1e08aa2db2158a814e3d34f4302658d98e
|
[
"Unlicense"
] | null | null | null |
# https://open.kattis.com/problems/alphabetspam
import sys
import math
xs = input()
white = 0
lower = 0
higher =0
other = 0
for i in xs:
if i == '_':
white += 1
elif ('a' <= i) & (i <= 'z'):
lower += 1
elif ('A' <= i) & (i <= "Z"):
higher += 1
else:
other += 1
print(white / len(xs))
print(lower / len(xs))
print(higher /len(xs))
print(other / len(xs))
| 15.576923
| 47
| 0.511111
| 61
| 405
| 3.377049
| 0.442623
| 0.097087
| 0.145631
| 0.067961
| 0.087379
| 0.087379
| 0
| 0
| 0
| 0
| 0
| 0.02807
| 0.296296
| 405
| 26
| 48
| 15.576923
| 0.694737
| 0.111111
| 0
| 0
| 0
| 0
| 0.013928
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dee0061d48e6e49cac68657f95ed5ac4927eaa8e
| 3,813
|
py
|
Python
|
src/chain_orientation_three_vars_symbolic.py
|
Scriddie/Varsortability
|
357213d5ceefb6362060c56e12c18b41dc689306
|
[
"MIT"
] | 4
|
2021-12-08T07:54:00.000Z
|
2022-03-09T07:55:21.000Z
|
src/chain_orientation_three_vars_symbolic.py
|
Scriddie/Varsortability
|
357213d5ceefb6362060c56e12c18b41dc689306
|
[
"MIT"
] | null | null | null |
src/chain_orientation_three_vars_symbolic.py
|
Scriddie/Varsortability
|
357213d5ceefb6362060c56e12c18b41dc689306
|
[
"MIT"
] | 1
|
2022-03-09T07:55:43.000Z
|
2022-03-09T07:55:43.000Z
|
import numpy as np
from sympy import simplify, sqrt, symbols
from sympy.stats import Normal, covariance as cov, variance as var
def regcoeffs(x, y, z):
covxy = cov(x, y)
covyz = cov(y, z)
varx = var(x)
vary = var(y)
varz = var(z)
# forward
f1 = simplify(covxy / varx)
f2 = simplify(covyz / vary)
# backward
b1 = simplify(covyz / varz)
b2 = simplify(covxy / vary)
return f1, f2, b1, b2
if __name__ == "__main__":
ab, bc, a, b, c = symbols([
"beta_{A_to_B}",
"beta_{B_to_C}",
"sigma_A",
"sigma_B",
"sigma_C"])
Na = Normal('Na', 0, 1)
Nb = Normal('Nb', 0, 1)
Nc = Normal('Nc', 0, 1)
# SEM
# A -> B -> C
# raw
A = a * Na
B = ab * A + b * Nb
C = bc * B + c * Nc
# standardized
As = A / sqrt(var(A))
Bs = B / sqrt(var(B))
Cs = C / sqrt(var(C))
# scale-harmonized
Am = a * Na
Bm = (ab / (ab**2 + 1)**(1/2)) * Am + b * Nb
Cm = (bc / (bc**2 + 1)**(1/2)) * Bm + c * Nc
# forward/backward coefficients in raw setting
f1, f2, b1, b2 = regcoeffs(A, B, C)
# forward/backward coefficients in standardized setting
f1s, f2s, b1s, b2s = regcoeffs(As, Bs, Cs)
# forward/backward coefficients in scale-harmonized setting
f1m, f2m, b1m, b2m = regcoeffs(Am, Bm, Cm)
for weight_range in [(0.5, 2),
(0.5, .9),
(.1, .9)]:
raw = {
'f1<f2,b1>b2': 0,
'f1>f2,b1<b2': 0,
'other': 0
}
std = {
'f1<f2,b1>b2': 0,
'f1>f2,b1<b2': 0,
'other': 0
}
moj = {
'f1<f2,b1>b2': 0,
'f1>f2,b1<b2': 0,
'other': 0
}
for _ in range(100000):
# draw model parameters
a_to_b, b_to_c = np.random.uniform(*weight_range, size=2)
sA, sB, sC = np.random.uniform(0.5, 2, size=3)
a_to_b *= np.random.choice([-1, 1])
b_to_c *= np.random.choice([-1, 1])
subs = {
ab: a_to_b,
bc: b_to_c,
a: sA,
b: sB,
c: sC,
}
# raw
if (abs(f1.subs(subs)) < abs(f2.subs(subs))
and abs(b1.subs(subs)) > abs(b2.subs(subs))):
raw['f1<f2,b1>b2'] += 1
elif (abs(f1.subs(subs)) > abs(f2.subs(subs))
and abs(b1.subs(subs)) < abs(b2.subs(subs))):
raw['f1>f2,b1<b2'] += 1
else:
raw['other'] += 1
# standardized
if (abs(f1s.subs(subs)) < abs(f2s.subs(subs))
and abs(b1s.subs(subs)) > abs(b2s.subs(subs))):
std['f1<f2,b1>b2'] += 1
elif (abs(f1s.subs(subs)) > abs(f2s.subs(subs))
and abs(b1s.subs(subs)) < abs(b2s.subs(subs))):
std['f1>f2,b1<b2'] += 1
else:
std['other'] += 1
# scale-harmonized
if (abs(f1m.subs(subs)) < abs(f2m.subs(subs))
and abs(b1m.subs(subs)) > abs(b2m.subs(subs))):
moj['f1<f2,b1>b2'] += 1
elif (abs(f1m.subs(subs)) > abs(f2m.subs(subs))
and abs(b1m.subs(subs)) < abs(b2m.subs(subs))):
moj['f1>f2,b1<b2'] += 1
else:
moj['other'] += 1
print('weight_range', weight_range)
raw['correct'] = raw['f1<f2,b1>b2'] + raw['other'] / 2
print('raw\t\t', raw)
std['correct'] = std['f1<f2,b1>b2'] + std['other'] / 2
print('standardized\t', std)
moj['correct'] = moj['f1<f2,b1>b2'] + moj['other'] / 2
print('Mooij-scaled\t', moj)
print()
| 28.455224
| 69
| 0.441385
| 529
| 3,813
| 3.117202
| 0.189036
| 0.116434
| 0.061856
| 0.082474
| 0.366283
| 0.311704
| 0.304427
| 0.29715
| 0.29715
| 0.29715
| 0
| 0.069379
| 0.387621
| 3,813
| 133
| 70
| 28.669173
| 0.636831
| 0.073171
| 0
| 0.122449
| 0
| 0
| 0.096334
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010204
| false
| 0
| 0.030612
| 0
| 0.05102
| 0.05102
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dee0dfeab71167aee2a17e14945c71c0e31e66be
| 1,762
|
py
|
Python
|
jaffalearn/logging.py
|
tqbl/jaffalearn
|
a5bb79fcb3e84fd6e17b6356429e5885386a5a58
|
[
"0BSD"
] | null | null | null |
jaffalearn/logging.py
|
tqbl/jaffalearn
|
a5bb79fcb3e84fd6e17b6356429e5885386a5a58
|
[
"0BSD"
] | null | null | null |
jaffalearn/logging.py
|
tqbl/jaffalearn
|
a5bb79fcb3e84fd6e17b6356429e5885386a5a58
|
[
"0BSD"
] | null | null | null |
from pathlib import Path
import pandas as pd
from torch.utils.tensorboard import SummaryWriter
class Logger:
def __init__(self, system, log_dir, overwrite=False):
self.log_path = Path(log_dir) / 'history.csv'
self.system = system
self.tb_writer = None
# Remove any previous TensorBoard log files
if overwrite:
for path in self.log_path.parent.glob('*tfevents*'):
print(f'Deleting {path}')
path.unlink()
# Read from existing log file if applicable
if overwrite or not self.log_path.exists():
self.history = pd.DataFrame()
self.history.index.name = 'epoch'
else:
self.history = pd.read_csv(self.log_path, index_col=0)
def __call__(self):
self.step()
def step(self):
# Print results to stdout
results = self.system.summarize_results()
print(', '.join(['{}: {:.4f}'.format(k, v)
for k, v in results.items()]))
# Write results to TensorBoard log file
epoch = len(self.history)
if self.tb_writer is None:
self.tb_writer = SummaryWriter(self.log_path.parent)
for key, value in results.items():
self.tb_writer.add_scalar(key, value, epoch)
self.tb_writer.file_writer.flush()
# Write results to CSV file
self.history = self.history.append(results, ignore_index=True)
self.history.to_csv(self.log_path)
self.system.clear_results()
def truncate(self, epoch):
self.history = self.history.iloc[:epoch]
self.history.to_csv(self.log_path)
def close(self):
if self.tb_writer is not None:
self.tb_writer.close()
| 30.37931
| 70
| 0.605562
| 227
| 1,762
| 4.555066
| 0.356828
| 0.106383
| 0.074468
| 0.040619
| 0.083172
| 0.052224
| 0.052224
| 0
| 0
| 0
| 0
| 0.0016
| 0.290579
| 1,762
| 57
| 71
| 30.912281
| 0.8256
| 0.097049
| 0
| 0.052632
| 0
| 0
| 0.033438
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131579
| false
| 0
| 0.078947
| 0
| 0.236842
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dee0ea830b4e14533eb75ccbf58b75a95766df8d
| 3,369
|
py
|
Python
|
python/soma_workflow/constants.py
|
denisri/soma-workflow
|
bc6f2f50d34437e86e850cb0d05ff26b041d560d
|
[
"CECILL-B"
] | null | null | null |
python/soma_workflow/constants.py
|
denisri/soma-workflow
|
bc6f2f50d34437e86e850cb0d05ff26b041d560d
|
[
"CECILL-B"
] | 44
|
2018-10-30T16:57:10.000Z
|
2022-03-15T10:54:57.000Z
|
python/soma_workflow/constants.py
|
populse/soma-workflow
|
e6d3e3c33ad41107ee3c959adc4832e6edd047f4
|
[
"CECILL-B"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
author: Soizic Laguitton
organization: I2BM, Neurospin, Gif-sur-Yvette, France
organization: CATI, France
organization: IFR 49
License: `CeCILL version 2 <http://www.cecill.info/licences/Licence_CeCILL_V2-en.html>`_
'''
#
# Soma-workflow constants #
#
'''
Job status:
'''
NOT_SUBMITTED = "not_submitted"
UNDETERMINED = "undetermined"
QUEUED_ACTIVE = "queued_active"
SYSTEM_ON_HOLD = "system_on_hold"
USER_ON_HOLD = "user_on_hold"
USER_SYSTEM_ON_HOLD = "user_system_on_hold"
RUNNING = "running"
SYSTEM_SUSPENDED = "system_suspended"
USER_SUSPENDED = "user_suspended"
USER_SYSTEM_SUSPENDED = "user_system_suspended"
DONE = "done"
FAILED = "failed"
DELETE_PENDING = "delete_pending"
KILL_PENDING = "kill_pending"
SUBMISSION_PENDING = "submission_pending"
WARNING = "warning"
JOB_STATUS = [NOT_SUBMITTED,
UNDETERMINED,
QUEUED_ACTIVE,
SYSTEM_ON_HOLD,
USER_ON_HOLD,
USER_SYSTEM_ON_HOLD,
RUNNING,
SYSTEM_SUSPENDED,
USER_SUSPENDED,
USER_SYSTEM_SUSPENDED,
DONE,
FAILED,
DELETE_PENDING,
KILL_PENDING,
SUBMISSION_PENDING,
WARNING]
'''
Exit job status:
'''
EXIT_UNDETERMINED = "exit_status_undetermined"
EXIT_ABORTED = "aborted"
EXIT_NOTRUN = "aborted_before_running"
FINISHED_REGULARLY = "finished_regularly"
FINISHED_TERM_SIG = "finished_signal"
FINISHED_UNCLEAR_CONDITIONS = "finished_unclear_condition"
USER_KILLED = "killed_by_user"
JOB_EXIT_STATUS = [EXIT_UNDETERMINED,
EXIT_ABORTED,
FINISHED_REGULARLY,
FINISHED_TERM_SIG,
FINISHED_UNCLEAR_CONDITIONS,
USER_KILLED,
EXIT_NOTRUN]
'''
File transfer status:
'''
FILES_DO_NOT_EXIST = "do not exist"
FILES_ON_CLIENT = "on client side"
FILES_ON_CR = "on computing resource side"
FILES_ON_CLIENT_AND_CR = "on both sides"
TRANSFERING_FROM_CLIENT_TO_CR = "transfering client->cr"
TRANSFERING_FROM_CR_TO_CLIENT = "transfering cr->client"
FILES_UNDER_EDITION = "under edition"
FILE_TRANSFER_STATUS = [FILES_DO_NOT_EXIST,
FILES_ON_CLIENT,
FILES_ON_CR,
FILES_ON_CLIENT_AND_CR,
TRANSFERING_FROM_CLIENT_TO_CR,
TRANSFERING_FROM_CR_TO_CLIENT,
FILES_UNDER_EDITION]
'''
Transfer type
'''
TR_FILE_C_TO_CR = "file transfer form client to cr"
TR_DIR_C_TO_CR = "dir transfer from client to cr"
TR_MFF_C_TO_CR = "multi file format from client to cr"
TR_FILE_CR_TO_C = "file transfer form cr to client"
TR_DIR_CR_TO_C = "dir transfer from cr to client"
TR_MFF_CR_TO_C = "multi file format from cr to client"
TRANSFER_TYPES = [TR_FILE_C_TO_CR,
TR_DIR_C_TO_CR,
TR_MFF_C_TO_CR,
TR_FILE_CR_TO_C,
TR_DIR_CR_TO_C,
TR_MFF_CR_TO_C]
'''
Workflow status:
'''
WORKFLOW_NOT_STARTED = "worklflow_not_started"
WORKFLOW_IN_PROGRESS = "workflow_in_progress"
WORKFLOW_DONE = "workflow_done"
WORKFLOW_STATUS = [WORKFLOW_NOT_STARTED,
WORKFLOW_IN_PROGRESS,
WORKFLOW_DONE,
DELETE_PENDING,
WARNING]
| 28.310924
| 88
| 0.655091
| 407
| 3,369
| 4.95086
| 0.235872
| 0.021836
| 0.035732
| 0.027792
| 0.493797
| 0.319107
| 0.129032
| 0.031762
| 0.031762
| 0
| 0
| 0.002442
| 0.270703
| 3,369
| 118
| 89
| 28.550847
| 0.817664
| 0.078362
| 0
| 0.05
| 0
| 0
| 0.23395
| 0.038319
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dee46fc1a2825aedf140afa6a83cd03a303bce36
| 1,980
|
py
|
Python
|
lab4_2/helpers/scanner.py
|
cinnamonbreakfast/flcd
|
f9168c1965976e9ae9477ee6b163a026f61acb1b
|
[
"MIT"
] | null | null | null |
lab4_2/helpers/scanner.py
|
cinnamonbreakfast/flcd
|
f9168c1965976e9ae9477ee6b163a026f61acb1b
|
[
"MIT"
] | null | null | null |
lab4_2/helpers/scanner.py
|
cinnamonbreakfast/flcd
|
f9168c1965976e9ae9477ee6b163a026f61acb1b
|
[
"MIT"
] | null | null | null |
res_words = []
seps = []
ops = []
def load_dom():
with open('data/tokens', 'r') as f:
for i in range(7):
separator = f.readline().strip()
if separator == "_": # Special case [SPACE]
separator = " "
seps.append(separator)
for i in range(15):
ops.append(f.readline().strip())
for i in range(21):
res_words.append(f.readline().strip())
def getStringToken(line, index):
token = ''
quotes = 0
while index < len(line) and quotes < 2:
if line[index] == '\'':
quotes += 1
token += line[index]
index += 1
return token, index
def isPartOfOperator(char):
for op in ops:
if char in op:
return True
return False
def getOperatorToken(line, index):
token = ''
try:
num = int(line[index:])
token +=line
index += 1
return token, index
except:
pass
while index < len(line) and isPartOfOperator(line[index]):
token += line[index]
index += 1
return token, index
def tokenize(line):
token = ''
index = 0
tokens = []
while index < len(line):
if isPartOfOperator(line[index]):
if token:
tokens.append(token)
token, index = getOperatorToken(line, index)
tokens.append(token)
token = ''
elif line[index] == '\'':
if token:
tokens.append(token)
token, index = getStringToken(line, index)
tokens.append(token)
token = ''
elif line[index] in seps:
if token:
tokens.append(token)
token, index = line[index], index + 1
tokens.append(token)
token = ''
else:
token += line[index]
index += 1
if token:
tokens.append(token)
return tokens
| 22.5
| 74
| 0.491414
| 209
| 1,980
| 4.636364
| 0.263158
| 0.139319
| 0.122807
| 0.136223
| 0.423117
| 0.285862
| 0.285862
| 0.250774
| 0.250774
| 0
| 0
| 0.011657
| 0.393434
| 1,980
| 88
| 75
| 22.5
| 0.795171
| 0.010101
| 0
| 0.385714
| 0
| 0
| 0.008172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0.014286
| 0
| 0
| 0.157143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dee8b0a49fcef498a3468a8ea4df153befa037f5
| 26,370
|
py
|
Python
|
src/third_party/wiredtiger/test/suite/run.py
|
benety/mongo
|
203430ac9559f82ca01e3cbb3b0e09149fec0835
|
[
"Apache-2.0"
] | null | null | null |
src/third_party/wiredtiger/test/suite/run.py
|
benety/mongo
|
203430ac9559f82ca01e3cbb3b0e09149fec0835
|
[
"Apache-2.0"
] | null | null | null |
src/third_party/wiredtiger/test/suite/run.py
|
benety/mongo
|
203430ac9559f82ca01e3cbb3b0e09149fec0835
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# [TEST_TAGS]
# ignored_file
# [END_TAGS]
#
# run.py
# Command line test runner
#
from __future__ import print_function
import glob, json, os, random, re, sys
if sys.version_info[0] <= 2:
print('WiredTiger requires Python version 3.0 or above')
sys.exit(1)
# Set paths
suitedir = sys.path[0]
wt_disttop = os.path.dirname(os.path.dirname(suitedir))
wt_3rdpartydir = os.path.join(wt_disttop, 'test', '3rdparty')
# Check for a local build that contains the wt utility. First check if the
# supplied an explicit build directory ('WT_BUILDDIR'), then the current
# working directory, and finally in the disttop directory.
# This isn't ideal - if a user has multiple builds in a tree we
# could pick the wrong one. We also need to account for the fact that there
# may be an executable 'wt' file the build directory.
env_builddir = os.getenv('WT_BUILDDIR')
curdir = os.getcwd()
if env_builddir and os.path.isfile(os.path.join(env_builddir, 'wt')):
wt_builddir = env_builddir
elif os.path.isfile(os.path.join(curdir, 'wt')):
wt_builddir = curdir
elif os.path.isfile(os.path.join(curdir, 'wt.exe')):
wt_builddir = curdir
elif os.path.isfile(os.path.join(wt_disttop, 'wt')):
wt_builddir = wt_disttop
elif os.path.isfile(os.path.join(wt_disttop, 'wt.exe')):
wt_builddir = wt_disttop
else:
print('Unable to find useable WiredTiger build')
sys.exit(1)
# Cannot import wiredtiger and supporting utils until we set up paths
# We want our local tree in front of any installed versions of WiredTiger.
# Don't change sys.path[0], it's the dir containing the invoked python script.
sys.path.insert(1, os.path.join(wt_builddir, 'lang', 'python'))
# Append to a colon separated path in the environment
def append_env_path(name, value):
path = os.environ.get(name)
if path == None:
v = value
else:
v = path + ':' + value
os.environ[name] = v
# If we built with libtool, explicitly put its install directory in our library
# search path. This only affects library loading for subprocesses, like 'wt'.
libsdir = os.path.join(wt_builddir, '.libs')
if os.path.isdir(libsdir):
append_env_path('LD_LIBRARY_PATH', libsdir)
if sys.platform == "darwin":
append_env_path('DYLD_LIBRARY_PATH', libsdir)
# Add all 3rd party directories: some have code in subdirectories
for d in os.listdir(wt_3rdpartydir):
for subdir in ('lib', 'python', ''):
if os.path.exists(os.path.join(wt_3rdpartydir, d, subdir)):
sys.path.insert(1, os.path.join(wt_3rdpartydir, d, subdir))
break
# unittest will be imported later, near to when it is needed.
unittest = None
def usage():
print('Usage:\n\
$ cd build\n\
$ python ../test/suite/run.py [ options ] [ tests ]\n\
\n\
Options:\n\
--asan run with an ASAN enabled shared library\n\
-b K/N | --batch K/N run batch K of N, 0 <= K < N. The tests\n\
are split into N batches and the Kth is run.\n\
-C file | --configcreate file create a config file for controlling tests\n\
-c file | --config file use a config file for controlling tests\n\
-D dir | --dir dir use dir rather than WT_TEST.\n\
dir is removed/recreated as a first step.\n\
-d | --debug run with \'pdb\', the python debugger\n\
-n | --dry-run perform a dry-run, listing all scenarios to\n\
be run without executing any.\n\
-g | --gdb all subprocesses (like calls to wt) use gdb\n\
-h | --help show this message\n\
| --hook name[=arg] set up hooks from hook_<name>.py, with optional arg\n\
-j N | --parallel N run all tests in parallel using N processes\n\
-l | --long run the entire test suite\n\
| --noremove do not remove WT_TEST or -D target before run\n\
-p | --preserve preserve output files in WT_TEST/<testname>\n\
-r N | --random-sample N randomly sort scenarios to be run, then\n\
execute every Nth (2<=N<=1000) scenario.\n\
-s N | --scenario N use scenario N (N can be symbolic, number, or\n\
list of numbers and ranges in the form 1,3-5,7)\n\
-t | --timestamp name WT_TEST according to timestamp\n\
-v N | --verbose N set verboseness to N (0<=N<=3, default=1)\n\
-i | --ignore-stdout dont fail on unexpected stdout or stderr\n\
-R | --randomseed run with random seeds for generates random numbers\n\
-S | --seed run with two seeds that generates random numbers, \n\
format "seed1.seed2", seed1 or seed2 can\'t be zero\n\
-z | --zstd run the zstd tests\n\
\n\
Tests:\n\
may be a file name in test/suite: (e.g. test_base01.py)\n\
may be a subsuite name (e.g. \'base\' runs test_base*.py)\n\
\n\
When -C or -c are present, there may not be any tests named.\n\
When -s is present, there must be a test named.\n\
')
# Find an executable of the given name in the execution path.
def which(name):
path = os.getenv('PATH')
for pathdir in path.split(os.path.pathsep):
fname = os.path.join(pathdir, name)
if os.path.exists(fname) and os.access(fname, os.X_OK):
return fname
return None
# Follow a symbolic link, returning the target
def follow_symlinks(pathname):
return os.path.realpath(pathname)
# Find all instances of a filename under a directory
def find(topdir, filename):
results = []
for root, dirs, files in os.walk(topdir, followlinks=True):
if filename in files:
results.append(os.path.join(root, filename))
return results
# Show an environment variable if verbose enough.
def show_env(verbose, envvar):
if verbose >= 2:
print(envvar + "=" + os.getenv(envvar))
# capture the category (AKA 'subsuite') part of a test name,
# e.g. test_util03 -> util
reCatname = re.compile(r"test_([^0-9]+)[0-9]*")
# Look for a list of the form 0-9,11,15-17.
def parse_int_list(str):
# Use a dictionary as the result set to avoid repeated list scans.
# (Only the keys are used; the values are ignored.)
ret = {}
# Divide the input into ranges separated by commas.
for r in str.split(","):
# Split the range we got (if it is one).
bounds = r.split("-")
if len(bounds) == 1 and bounds[0].isdigit():
# It's a single number with no dash.
scenario = int(bounds[0])
ret[scenario] = True
continue
if len(bounds) == 2 and bounds[0].isdigit() and bounds[1].isdigit():
# It's two numbers separated by a dash.
for scenario in range(int(bounds[0]), int(bounds[1]) + 1):
ret[scenario] = True
continue
# It's not valid syntax; give up.
return None
return ret
def restrictScenario(testcases, restrict):
if restrict == '':
return testcases
else:
scenarios = parse_int_list(restrict)
if scenarios is not None:
return [t for t in testcases
if hasattr(t, 'scenario_number') and t.scenario_number in scenarios]
else:
return [t for t in testcases
if hasattr(t, 'scenario_name') and t.scenario_name == restrict]
def addScenarioTests(tests, loader, testname, scenario):
loaded = loader.loadTestsFromName(testname)
tests.addTests(restrictScenario(generate_scenarios(loaded), scenario))
def configRecord(cmap, tup):
"""
Records this tuple in the config. It is marked as None
(appearing as null in json), so it can be easily adjusted
in the output file.
"""
tuplen = len(tup)
pos = 0
for name in tup:
last = (pos == tuplen - 1)
pos += 1
if not name in cmap:
if last:
cmap[name] = {"run":None}
else:
cmap[name] = {"run":None, "sub":{}}
if not last:
cmap = cmap[name]["sub"]
def configGet(cmap, tup):
"""
Answers the question, should we do this test, given this config file?
Following the values of the tuple through the map,
returning the first non-null value. If all values are null,
return True (handles tests that may have been added after the
config was generated).
"""
for name in tup:
if not name in cmap:
return True
run = cmap[name]["run"] if "run" in cmap[name] else None
if run != None:
return run
cmap = cmap[name]["sub"] if "sub" in cmap[name] else {}
return True
def configApplyInner(suites, configmap, configwrite):
newsuite = unittest.TestSuite()
for s in suites:
if type(s) is unittest.TestSuite:
newsuite.addTest(configApplyInner(s, configmap, configwrite))
else:
modname = s.__module__
catname = re.sub(reCatname, r"\1", modname)
classname = s.__class__.__name__
methname = s._testMethodName
tup = (catname, modname, classname, methname)
add = True
if configwrite:
configRecord(configmap, tup)
else:
add = configGet(configmap, tup)
if add:
newsuite.addTest(s)
return newsuite
def configApply(suites, configfilename, configwrite):
configmap = None
if not configwrite:
with open(configfilename, 'r') as f:
line = f.readline()
while line != '\n' and line != '':
line = f.readline()
configmap = json.load(f)
else:
configmap = {}
newsuite = configApplyInner(suites, configmap, configwrite)
if configwrite:
with open(configfilename, 'w') as f:
f.write("""# Configuration file for wiredtiger test/suite/run.py,
# generated with '-C filename' and consumed with '-c filename'.
# This shows the hierarchy of tests, and can be used to rerun with
# a specific subset of tests. The value of "run" controls whether
# a test or subtests will be run:
#
# true turn on a test and all subtests (overriding values beneath)
# false turn on a test and all subtests (overriding values beneath)
# null do not effect subtests
#
# If a test does not appear, or is marked as '"run": null' all the way down,
# then the test is run.
#
# The remainder of the file is in JSON format.
# !!! There must be a single blank line following this line!!!
""")
json.dump(configmap, f, sort_keys=True, indent=4)
return newsuite
def testsFromArg(tests, loader, arg, scenario):
# If a group of test is mentioned, do all tests in that group
# e.g. 'run.py base'
groupedfiles = glob.glob(suitedir + os.sep + 'test_' + arg + '*.py')
if len(groupedfiles) > 0:
for file in groupedfiles:
testsFromArg(tests, loader, os.path.basename(file), scenario)
return
# Explicit test class names
if not arg[0].isdigit():
if arg.endswith('.py'):
arg = arg[:-3]
addScenarioTests(tests, loader, arg, scenario)
return
# Deal with ranges
if '-' in arg:
start, end = (int(a) for a in arg.split('-'))
else:
start, end = int(arg), int(arg)
for t in xrange(start, end+1):
addScenarioTests(tests, loader, 'test%03d' % t, scenario)
def error(exitval, prefix, msg):
print('*** ERROR: {}: {}'.format(prefix, msg.replace('\n', '\n*** ')))
sys.exit(exitval)
if __name__ == '__main__':
# Turn numbers and ranges into test module names
preserve = timestamp = debug = dryRun = gdbSub = lldbSub = longtest = zstdtest = ignoreStdout = False
removeAtStart = True
asan = False
parallel = 0
random_sample = 0
batchtotal = batchnum = 0
seed = seedw = seedz = 0
configfile = None
configwrite = False
dirarg = None
scenario = ''
verbose = 1
args = sys.argv[1:]
testargs = []
hook_names = []
while len(args) > 0:
arg = args.pop(0)
from unittest import defaultTestLoader as loader
# Command line options
if arg[0] == '-':
option = arg[1:]
if option == '-asan':
asan = True
continue
if option == '-batch' or option == 'b':
if batchtotal != 0 or len(args) == 0:
usage()
sys.exit(2)
# Batch expects an argument that has int slash int.
# For example "-b 4/12"
try:
left, right = args.pop(0).split('/')
batchnum = int(left)
batchtotal = int(right)
except:
print('batch argument should be nnn/nnn')
usage()
sys.exit(2)
if batchtotal <= 0 or batchnum < 0 or batchnum >= batchtotal:
usage()
sys.exit(2)
continue
if option == '-dir' or option == 'D':
if dirarg != None or len(args) == 0:
usage()
sys.exit(2)
dirarg = args.pop(0)
continue
if option == '-debug' or option == 'd':
debug = True
continue
if option == '-dry-run' or option == 'n':
dryRun = True
continue
if option == '-gdb' or option == 'g':
gdbSub = True
continue
if option == '-lldb':
lldbSub = True
continue
if option == '-help' or option == 'h':
usage()
sys.exit(0)
if option == '-hook':
if len(args) == 0:
usage()
sys.exit(2)
hook_names.append(args.pop(0))
continue
if option == '-long' or option == 'l':
longtest = True
continue
if option == '-zstd' or option == 'z':
zstdtest = True
continue
if option == '-noremove':
removeAtStart = False
continue
if option == '-random-sample' or option == 'r':
if len(args) == 0:
usage()
sys.exit(2)
random_sample = int(args.pop(0))
if random_sample < 2 or random_sample > 1000:
usage()
sys.exit(2)
continue
if option == '-parallel' or option == 'j':
if parallel != 0 or len(args) == 0:
usage()
sys.exit(2)
parallel = int(args.pop(0))
continue
if option == '-preserve' or option == 'p':
preserve = True
continue
if option == '-scenario' or option == 's':
if scenario != '' or len(args) == 0:
usage()
sys.exit(2)
scenario = args.pop(0)
continue
if option == '-timestamp' or option == 't':
timestamp = True
continue
if option == '-verbose' or option == 'v':
if len(args) == 0:
usage()
sys.exit(2)
verbose = int(args.pop(0))
if verbose > 3:
verbose = 3
if verbose < 0:
verbose = 0
continue
if option == '--ignore-stdout' or option == 'i':
ignoreStdout = True
continue
if option == '-config' or option == 'c':
if configfile != None or len(args) == 0:
usage()
sys.exit(2)
configfile = args.pop(0)
continue
if option == '-configcreate' or option == 'C':
if configfile != None or len(args) == 0:
usage()
sys.exit(2)
configfile = args.pop(0)
configwrite = True
continue
if option == '-randomseed' or option == 'R':
seedw = random.randint(1, 0xffffffff)
seedz = random.randint(1, 0xffffffff)
continue
if option == '-seed' or option == 'S':
if seed != 0 or len(args) == 0:
usage()
sys.exit(2)
seed = args.pop(0)
[seedw, seedz] = seed.split('.')
if seedw == 0 or seedz == 0:
usage()
sys.exit(2)
continue
print('unknown arg: ' + arg)
usage()
sys.exit(2)
testargs.append(arg)
if asan:
# To run ASAN, we need to ensure these environment variables are set:
# ASAN_SYMBOLIZER_PATH full path to the llvm-symbolizer program
# LD_LIBRARY_PATH includes path with wiredtiger shared object
# LD_PRELOAD includes the ASAN runtime library
#
# Note that LD_LIBRARY_PATH has already been set above. The trouble with
# simply setting these variables in the Python environment is that it's
# too late. LD_LIBRARY_PATH is commonly cached by the shared library
# loader at program startup, and that's already been done before Python
# begins execution. Likewise, any preloading indicated by LD_PRELOAD
# has already been done.
#
# Our solution is to set the variables as appropriate, and then restart
# Python with the same argument list. The shared library loader will
# have everything it needs on the second go round.
#
# Note: If the ASAN stops the program with the error:
# Shadow memory range interleaves with an existing memory mapping.
# ASan cannot proceed correctly.
#
# try rebuilding with the clang options:
# "-mllvm -asan-force-dynamic-shadow=1"
# and make sure that clang is used for all compiles.
#
# We'd like to show this as a message, but there's no good way to
# detect this error from here short of capturing/parsing all output
# from the test run.
ASAN_ENV = "__WT_TEST_SUITE_ASAN" # if set, we've been here before
ASAN_SYMBOLIZER_PROG = "llvm-symbolizer"
ASAN_SYMBOLIZER_ENV = "ASAN_SYMBOLIZER_PATH"
LD_PRELOAD_ENV = "LD_PRELOAD"
SO_FILE_NAME = "libclang_rt.asan-x86_64.so"
if not os.environ.get(ASAN_ENV):
if verbose >= 2:
print('Enabling ASAN environment and rerunning python')
os.environ[ASAN_ENV] = "1"
show_env(verbose, "LD_LIBRARY_PATH")
if not os.environ.get(ASAN_SYMBOLIZER_ENV):
os.environ[ASAN_SYMBOLIZER_ENV] = which(ASAN_SYMBOLIZER_PROG)
if not os.environ.get(ASAN_SYMBOLIZER_ENV):
error(ASAN_SYMBOLIZER_ENV,
'symbolizer program not found in PATH')
show_env(verbose, ASAN_SYMBOLIZER_ENV)
if not os.environ.get(LD_PRELOAD_ENV):
symbolizer = follow_symlinks(os.environ[ASAN_SYMBOLIZER_ENV])
bindir = os.path.dirname(symbolizer)
sofiles = []
if os.path.basename(bindir) == 'bin':
libdir = os.path.join(os.path.dirname(bindir), 'lib')
sofiles = find(libdir, SO_FILE_NAME)
if len(sofiles) != 1:
if len(sofiles) == 0:
fmt = 'ASAN shared library file not found.\n' + \
'Set {} to the file location and rerun.'
error(3, SO_FILE_NAME, fmt.format(LD_PRELOAD_ENV))
else:
fmt = 'multiple ASAN shared library files found\n' + \
'under {}, expected just one.\n' + \
'Set {} to the correct file location and rerun.'
error(3, SO_FILE_NAME, fmt.format(libdir, LD_PRELOAD_ENV))
os.environ[LD_PRELOAD_ENV] = sofiles[0]
show_env(verbose, LD_PRELOAD_ENV)
# Restart python!
python = sys.executable
os.execl(python, python, *sys.argv)
elif verbose >= 2:
print('Python restarted for ASAN')
# We don't import wttest until after ASAN environment variables are set.
import wttest
# Use the same version of unittest found by wttest.py
unittest = wttest.unittest
tests = unittest.TestSuite()
from testscenarios.scenarios import generate_scenarios
import wthooks
hookmgr = wthooks.WiredTigerHookManager(hook_names)
# All global variables should be set before any test classes are loaded.
# That way, verbose printing can be done at the class definition level.
wttest.WiredTigerTestCase.globalSetup(preserve, removeAtStart, timestamp, gdbSub, lldbSub,
verbose, wt_builddir, dirarg, longtest, zstdtest,
ignoreStdout, seedw, seedz, hookmgr)
# Without any tests listed as arguments, do discovery
if len(testargs) == 0:
if scenario != '':
sys.stderr.write(
'run.py: specifying a scenario requires a test name\n')
usage()
sys.exit(2)
from discover import defaultTestLoader as loader
suites = loader.discover(suitedir)
# If you have an empty Python file, it comes back as an empty entry in suites
# and then the sort explodes. Drop empty entries first. Note: this converts
# suites to a list, but the sort does that anyway. Also note: there seems to be
# no way to count other than iteration; there's a count method but it also
# returns zero for test files that contain a test class with no test functions,
# and it's not clear that dropping those here is correct.
def isempty(s):
count = 0
for c in s:
count += 1
return (count == 0)
suites = [s for s in suites if not isempty(s)]
suites = sorted(suites, key=lambda c: str(list(c)[0]))
if configfile != None:
suites = configApply(suites, configfile, configwrite)
tests.addTests(restrictScenario(generate_scenarios(suites), ''))
else:
for arg in testargs:
testsFromArg(tests, loader, arg, scenario)
tests = hookmgr.filter_tests(tests)
# Shuffle the tests and create a new suite containing every Nth test from
# the original suite
if random_sample > 0:
random_sample_tests = []
for test in tests:
random_sample_tests.append(test)
random.shuffle(random_sample_tests)
tests = unittest.TestSuite(random_sample_tests[::random_sample])
if debug:
import pdb
pdb.set_trace()
if batchtotal != 0:
# For test batching, we want to split up all the tests evenly, and
# spread out the tests, so each batch contains tests of all kinds. We'd
# like to prioritize the lowest scenario numbers first, so if there's a
# failure, we won't have to do all X thousand of some test's scenarios
# before we see a failure in the next test. To that end, we define a
# sort function that sorts by scenario first, and test name second.
hugetests = set()
def get_sort_keys(test):
s = 0
name = test.simpleName()
if hasattr(test, 'scenario_number'):
s = test.scenario_number
if s > 1000:
hugetests.add(name) # warn for too many scenarios
return (s, test.simpleName()) # sort by scenario number first
all_tests = sorted(tests, key = get_sort_keys)
if not longtest:
for name in hugetests:
print("WARNING: huge test " + name + " has > 1000 scenarios.\n" +
"That is only appropriate when using the --long option.\n" +
"The number of scenarios for the test should be pruned")
# At this point we have an ordered list of all the tests.
# Break it into just our batch.
tests = unittest.TestSuite(all_tests[batchnum::batchtotal])
if dryRun:
for line in tests:
print(line)
else:
result = wttest.runsuite(tests, parallel)
sys.exit(0 if result.wasSuccessful() else 1)
sys.exit(0)
| 40.631741
| 105
| 0.573834
| 3,387
| 26,370
| 4.41364
| 0.213463
| 0.01164
| 0.022476
| 0.013914
| 0.106228
| 0.080674
| 0.070306
| 0.059068
| 0.044886
| 0.035454
| 0
| 0.010474
| 0.333826
| 26,370
| 648
| 106
| 40.694444
| 0.840496
| 0.237619
| 0
| 0.234177
| 0
| 0.010549
| 0.095704
| 0.001305
| 0
| 0
| 0.001004
| 0
| 0
| 1
| 0.035865
| false
| 0
| 0.016878
| 0.00211
| 0.090717
| 0.025316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
deeb28c75145a6bebc3771235fab7a32732db4c0
| 684
|
py
|
Python
|
models/t_complex_gateway.py
|
THM-MA/XSDATA-waypoint
|
dd94442f9d6677c525bf3ebb03c15fec52fa1079
|
[
"MIT"
] | null | null | null |
models/t_complex_gateway.py
|
THM-MA/XSDATA-waypoint
|
dd94442f9d6677c525bf3ebb03c15fec52fa1079
|
[
"MIT"
] | null | null | null |
models/t_complex_gateway.py
|
THM-MA/XSDATA-waypoint
|
dd94442f9d6677c525bf3ebb03c15fec52fa1079
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import Optional
from .t_expression import TExpression
from .t_gateway import TGateway
__NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
@dataclass
class TComplexGateway(TGateway):
class Meta:
name = "tComplexGateway"
activation_condition: Optional[TExpression] = field(
default=None,
metadata={
"name": "activationCondition",
"type": "Element",
"namespace": "http://www.omg.org/spec/BPMN/20100524/MODEL",
}
)
default: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
}
)
| 24.428571
| 71
| 0.622807
| 66
| 684
| 6.348485
| 0.530303
| 0.023866
| 0.076372
| 0.090692
| 0.205251
| 0.205251
| 0.205251
| 0.205251
| 0.205251
| 0
| 0
| 0.031746
| 0.263158
| 684
| 27
| 72
| 25.333333
| 0.799603
| 0
| 0
| 0.173913
| 0
| 0
| 0.229532
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.173913
| 0
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
deedff750596df4bfdfcd2656752ec59911b5e80
| 2,713
|
py
|
Python
|
crawler/page_fetcher.py
|
AssisRaphael/PageColector
|
6753376996f12ee1cced96b89a3e34d6fdf66529
|
[
"MIT"
] | null | null | null |
crawler/page_fetcher.py
|
AssisRaphael/PageColector
|
6753376996f12ee1cced96b89a3e34d6fdf66529
|
[
"MIT"
] | null | null | null |
crawler/page_fetcher.py
|
AssisRaphael/PageColector
|
6753376996f12ee1cced96b89a3e34d6fdf66529
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
from threading import Thread
import requests
from urllib.parse import urlparse,urljoin
from urllib import parse
class PageFetcher(Thread):
def __init__(self, obj_scheduler):
self.obj_scheduler = obj_scheduler
def request_url(self,obj_url):
"""
Faz a requisição e retorna o conteúdo em binário da URL passada como parametro
obj_url: Instancia da classe ParseResult com a URL a ser requisitada.
"""
url = parse.urlunparse(obj_url)
if "http" not in url:
url = "http:" + url
response = requests.get(url)
response.headers['User-Agent'] = self.obj_scheduler.str_usr_agent
if response.headers['content-type'].find('text/html') == -1:
return None
return response.content
def discover_links(self,obj_url,int_depth,bin_str_content):
"""
Retorna os links do conteúdo bin_str_content da página já requisitada obj_url
"""
soup = BeautifulSoup(bin_str_content,features="lxml")
for link in soup.select('a'):
try:
obj_new_url = urlparse(link['href'])
except:
continue
if obj_new_url.netloc == '':
if "http" in obj_new_url.path:
obj_new_url = urlparse(obj_new_url.path)
else:
obj_new_url = urlparse(urljoin(parse.urlunparse(obj_url), parse.urlunparse(obj_new_url)))
# print('rrr: ', obj_new_url.netloc+obj_new_url.path)
if obj_new_url.netloc != obj_url.netloc:
int_new_depth = 0
else:
int_new_depth = int_depth + 1
yield obj_new_url,int_new_depth
def crawl_new_url(self):
"""
Coleta uma nova URL, obtendo-a do escalonador
"""
obj_url, int_depth = self.obj_scheduler.get_next_url()
bin_str_content = self.request_url(obj_url)
if bin_str_content is not None:
#print(obj_url)
multi_obj = self.discover_links(obj_url, int_depth, bin_str_content)
while True:
try:
url, depth = next(multi_obj)
#print(url)
print(parse.urlunparse(url))
self.obj_scheduler.add_new_page(url, depth)
except StopIteration:
break
def run(self):
"""
Executa coleta enquanto houver páginas a serem coletadas
"""
while not self.obj_scheduler.has_finished_crawl():
self.crawl_new_url()
| 33.085366
| 109
| 0.570586
| 325
| 2,713
| 4.504615
| 0.338462
| 0.053279
| 0.067623
| 0.028689
| 0.074454
| 0.036885
| 0.036885
| 0
| 0
| 0
| 0
| 0.00227
| 0.350534
| 2,713
| 81
| 110
| 33.493827
| 0.828604
| 0.150018
| 0
| 0.081633
| 0
| 0
| 0.024234
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102041
| false
| 0
| 0.102041
| 0
| 0.265306
| 0.020408
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
def0d455f3332a2d6ded90d585855fcbfa88a92a
| 2,098
|
py
|
Python
|
simublocks/dialog/importCodeDialog.py
|
bentoavb/simublocks
|
9d4a5600b8aecd2d188e9191d78789a1bd725ab8
|
[
"MIT"
] | 2
|
2020-05-14T12:34:43.000Z
|
2020-06-11T23:48:09.000Z
|
simublocks/dialog/importCodeDialog.py
|
bentoavb/simublocks
|
9d4a5600b8aecd2d188e9191d78789a1bd725ab8
|
[
"MIT"
] | null | null | null |
simublocks/dialog/importCodeDialog.py
|
bentoavb/simublocks
|
9d4a5600b8aecd2d188e9191d78789a1bd725ab8
|
[
"MIT"
] | 1
|
2020-05-12T07:01:28.000Z
|
2020-05-12T07:01:28.000Z
|
# MIT License
#
# Copyright (c) 2020 Anderson Vitor Bento
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tkinter as tk
from tkinter.scrolledtext import ScrolledText
from simublocks.dialog.dialogTools import dialogTools
class importCodeDialog(object):
def __init__(self, code):
root = self.root = tk.Tk()
root.resizable(0,0)
root.title("Import Code and Packages")
self.inputCode = ScrolledText(root, height=5,width=50)
self.inputCode.insert(tk.END, code)
self.inputCode.grid(row=0, column=0,columnspan=2)
tk.Button(root, width=11, text="Save", command=self.save_button).grid(row=1, column=0)
tk.Button(root, width=11, text="Cancel", command=self.cancel_button).grid(row=1, column=1)
dialogTools.center(root)
def save_button(self):
self.returning = {
'code': self.inputCode.get(1.0, tk.END),
'status': 'ok'
}
self.root.quit()
def cancel_button(self):
self.returning = {
'status': 'cancel'
}
self.root.quit()
| 38.851852
| 98
| 0.704957
| 296
| 2,098
| 4.969595
| 0.47973
| 0.059823
| 0.017675
| 0.023114
| 0.058464
| 0.031271
| 0
| 0
| 0
| 0
| 0
| 0.013213
| 0.206387
| 2,098
| 54
| 99
| 38.851852
| 0.87027
| 0.510963
| 0
| 0.16
| 0
| 0
| 0.057711
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.2
| 0
| 0.36
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
def2f40bc3a8f54d1a406e95811076ed0688d708
| 658
|
py
|
Python
|
delete_unuse_callkit.py
|
eyolo2021/ios-ui-sdk-set
|
a8897320c356ddd6dbfe964ef68eb76701759f03
|
[
"MIT"
] | 14
|
2021-03-06T08:47:30.000Z
|
2022-02-11T09:42:24.000Z
|
delete_unuse_callkit.py
|
eyolo2021/ios-ui-sdk-set
|
a8897320c356ddd6dbfe964ef68eb76701759f03
|
[
"MIT"
] | 3
|
2021-03-19T11:12:42.000Z
|
2021-11-29T14:56:33.000Z
|
delete_unuse_callkit.py
|
Zuzi007/ios-ui-sdk-set
|
2e51added5d697b4d1ab1ba2887ad297b408e7b0
|
[
"MIT"
] | 12
|
2021-07-02T02:44:52.000Z
|
2022-03-01T05:15:22.000Z
|
#coding=utf-8
import os
delete_files=["RCCall.mm","RCCXCall.m"]
start_key = "RCCallKit_Delete_Start"
end_key = "RCCallKit_Delete_end"
def delete_used(file_path):
print(file_path)
f = open(file_path,"r")
lines = f.readlines()
f.close()
# print(lines)
result = []
flag = False
for l in lines:
if start_key in l:
flag = True
elif end_key in l:
flag = False
if flag is True:
continue
result.append(l)
f = open(file_path,"w")
f.writelines(result)
f.close()
for root,dirs,files in os.walk("./CallKit"):
for file in files:
if file in delete_files:
print("will delete %s" % file)
delete_used(os.path.join(root,file))
| 15.666667
| 44
| 0.674772
| 109
| 658
| 3.926606
| 0.431193
| 0.074766
| 0.084112
| 0.060748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001873
| 0.18845
| 658
| 41
| 45
| 16.04878
| 0.799625
| 0.037994
| 0
| 0.148148
| 0
| 0
| 0.136508
| 0.034921
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.037037
| 0
| 0.074074
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
def7ae196a0259e7e64d4dfd6522b1ee72138646
| 16,178
|
py
|
Python
|
api/yolo_minimal/utils.py
|
simonsmh/www
|
1741545e636540b9eb250840347f091082fe301a
|
[
"MIT"
] | 5
|
2015-12-19T11:18:54.000Z
|
2016-08-27T02:21:59.000Z
|
api/yolo_minimal/utils.py
|
simonsmh/www
|
1741545e636540b9eb250840347f091082fe301a
|
[
"MIT"
] | null | null | null |
api/yolo_minimal/utils.py
|
simonsmh/www
|
1741545e636540b9eb250840347f091082fe301a
|
[
"MIT"
] | 1
|
2020-10-30T13:25:33.000Z
|
2020-10-30T13:25:33.000Z
|
import math
import os
import random
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
def xyxy2xywh(x):
# Transform box coordinates from [x1, y1, x2, y2] (where xy1=top-left, xy2=bottom-right) to [x, y, w, h]
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Transform box coordinates from [x, y, w, h] to [x1, y1, x2, y2] (where xy1=top-left, xy2=bottom-right)
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
gain = max(img1_shape) / max(img0_shape) # gain = old / new
pad = (
(img1_shape[1] - img0_shape[1] * gain) / 2,
(img1_shape[0] - img0_shape[0] * gain) / 2,
) # wh padding
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
coords[:, 2] -= coords[:, 0] # xyxy2xywh
coords[:, 3] -= coords[:, 1]
return coords
def clip_coords(boxes, img_shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
boxes[:, 0].clamp_(0, img_shape[1]) # x1
boxes[:, 1].clamp_(0, img_shape[0]) # y1
boxes[:, 2].clamp_(0, img_shape[1]) # x2
boxes[:, 3].clamp_(0, img_shape[0]) # y2
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.t()
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * (
torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)
).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
union = (w1 * h1 + 1e-16) + w2 * h2 - inter
iou = inter / union # iou
if GIoU or DIoU or CIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(
b1_x1, b2_x1
) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + 1e-16 # convex area
return iou - (c_area - union) / c_area # GIoU
if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
# convex diagonal squared
c2 = cw ** 2 + ch ** 2 + 1e-16
# centerpoint distance squared
rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + (
(b2_y1 + b2_y2) - (b1_y1 + b1_y2)
) ** 2 / 4
if DIoU:
return iou - rho2 / c2 # DIoU
elif (
CIoU
): # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(
torch.atan(w2 / h2) - torch.atan(w1 / h1), 2
)
with torch.no_grad():
alpha = v / (1 - iou + v)
return iou - (rho2 / c2 + v * alpha) # CIoU
return iou
def box_iou(box1, box2):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
box1 (Tensor[N, 4])
box2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.t())
area2 = box_area(box2.t())
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
inter = (
(
torch.min(box1[:, None, 2:], box2[:, 2:])
- torch.max(box1[:, None, :2], box2[:, :2])
)
.clamp(0)
.prod(2)
)
return inter / (
area1[:, None] + area2 - inter
) # iou = inter / (area1 + area2 - inter)
def wh_iou(wh1, wh2):
# Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
wh1 = wh1[:, None] # [N,1,2]
wh2 = wh2[None] # [1,M,2]
inter = torch.min(wh1, wh2).prod(2) # [N,M]
return inter / (
wh1.prod(2) + wh2.prod(2) - inter
) # iou = inter / (area1 + area2 - inter)
def non_max_suppression(
prediction,
conf_thres=0.1,
iou_thres=0.6,
multi_label=True,
classes=None,
agnostic=False,
):
"""
Performs Non-Maximum Suppression on inference results
Returns detections with shape:
nx6 (x1, y1, x2, y2, conf, cls)
"""
# Box constraints
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
method = "merge"
nc = prediction[0].shape[1] - 5 # number of classes
multi_label &= nc > 1 # multiple labels per box
output = [None] * len(prediction)
for xi, x in enumerate(prediction): # image index, image inference
# Apply conf constraint
x = x[x[:, 4] > conf_thres]
# Apply width-height constraint
x = x[((x[:, 2:4] > min_wh) & (x[:, 2:4] < max_wh)).all(1)]
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[..., 5:] *= x[..., 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero().t()
x = torch.cat((box[i], x[i, j + 5].unsqueeze(1), j.float().unsqueeze(1)), 1)
else: # best class only
conf, j = x[:, 5:].max(1)
x = torch.cat((box, conf.unsqueeze(1), j.float().unsqueeze(1)), 1)
# Filter by class
if classes:
x = x[(j.view(-1, 1) == torch.tensor(classes, device=j.device)).any(1)]
# Apply finite constraint
if not torch.isfinite(x).all():
x = x[torch.isfinite(x).all(1)]
# If none remain process next image
n = x.shape[0] # number of boxes
if not n:
continue
# Sort by confidence
# if method == 'fast_batch':
# x = x[x[:, 4].argsort(descending=True)]
# Batched NMS
c = x[:, 5] * 0 if agnostic else x[:, 5] # classes
boxes, scores = (
x[:, :4].clone() + c.view(-1, 1) * max_wh,
x[:, 4],
) # boxes (offset by class), scores
if method == "merge": # Merge NMS (boxes merged using weighted mean)
i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
if n < 1e4: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
# weights = (box_iou(boxes, boxes).tril_() > iou_thres) * scores.view(-1, 1) # box weights
# weights /= weights.sum(0) # normalize
# x[:, :4] = torch.mm(weights.T, x[:, :4])
weights = (box_iou(boxes[i], boxes) > iou_thres) * scores[
None
] # box weights
x[i, :4] = torch.mm(
weights / weights.sum(1, keepdim=True), x[:, :4]
).float() # merged boxes
elif method == "vision":
i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
elif method == "fast": # FastNMS from https://github.com/dbolya/yolact
iou = box_iou(boxes, boxes).triu_(diagonal=1) # upper triangular iou matrix
i = iou.max(0)[0] < iou_thres
output[xi] = x[i]
return output
def model_info(model, verbose=False):
# Plots a line-by-line description of a PyTorch model
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(
x.numel() for x in model.parameters() if x.requires_grad
) # number gradients
if verbose:
print(
"%5s %40s %9s %12s %20s %10s %10s"
% ("layer", "name", "gradient", "parameters", "shape", "mu", "sigma")
)
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace("module_list.", "")
print(
"%5g %40s %9s %12g %20s %10.3g %10.3g"
% (
i,
name,
p.requires_grad,
p.numel(),
list(p.shape),
p.mean(),
p.std(),
)
)
try: # FLOPS
from thop import profile
macs, _ = profile(model, inputs=(torch.zeros(1, 3, 480, 640),))
fs = ", %.1f GFLOPS" % (macs / 1e9 * 2)
except:
fs = ""
if verbose:
print(
"Model Summary: %g layers, %g parameters, %g gradients%s"
% (len(list(model.parameters())), n_p, n_g, fs)
)
def fuse_conv_and_bn(conv, bn):
# https://tehnokv.com/posts/fusing-batchnorm-and-conv/
with torch.no_grad():
# init
fusedconv = torch.nn.Conv2d(
conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
bias=True,
)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
# prepare spatial bias
if conv.bias is not None:
b_conv = conv.bias
else:
b_conv = torch.zeros(conv.weight.size(0))
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(
torch.sqrt(bn.running_var + bn.eps)
)
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def scale_img(img, ratio=1.0, same_shape=True): # img(16,3,256,416), r=ratio
# scales img(bs,3,y,x) by ratio
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode="bilinear", align_corners=False) # resize
if not same_shape: # pad/crop img
gs = 64 # (pixels) grid size
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
def parse_model_cfg(path):
# Parse the yolo *.cfg file and return module definitions path may be 'cfg/yolov3.cfg', 'yolov3.cfg', or 'yolov3'
if not path.endswith(".cfg"): # add .cfg suffix if omitted
path += ".cfg"
if not os.path.exists(path) and os.path.exists(
"cfg" + os.sep + path
): # add cfg/ prefix if omitted
path = "cfg" + os.sep + path
with open(path, "r") as f:
lines = f.read().split("\n")
lines = [x for x in lines if x and not x.startswith("#")]
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
mdefs = [] # module definitions
for line in lines:
if line.startswith("["): # This marks the start of a new block
mdefs.append({})
mdefs[-1]["type"] = line[1:-1].rstrip()
if mdefs[-1]["type"] == "convolutional":
mdefs[-1][
"batch_normalize"
] = 0 # pre-populate with zeros (may be overwritten later)
else:
key, val = line.split("=")
key = key.rstrip()
if key == "anchors": # return nparray
mdefs[-1][key] = np.array([float(x) for x in val.split(",")]).reshape(
(-1, 2)
) # np anchors
elif (key in ["from", "layers", "mask"]) or (
key == "size" and "," in val
): # return array
mdefs[-1][key] = [int(x) for x in val.split(",")]
else:
val = val.strip()
if val.isnumeric(): # return int or float
mdefs[-1][key] = (
int(val) if (int(val) - float(val)) == 0 else float(val)
)
else:
mdefs[-1][key] = val # return string
# Check all fields are supported
supported = [
"type",
"batch_normalize",
"filters",
"size",
"stride",
"pad",
"activation",
"layers",
"groups",
"from",
"mask",
"anchors",
"classes",
"num",
"jitter",
"ignore_thresh",
"truth_thresh",
"random",
"stride_x",
"stride_y",
"weights_type",
"weights_normalization",
"scale_x_y",
"beta_nms",
"nms_kind",
"iou_loss",
"iou_normalizer",
"cls_normalizer",
"iou_thresh",
]
f = [] # fields
for x in mdefs[1:]:
[f.append(k) for k in x if k not in f]
u = [x for x in f if x not in supported] # unsupported fields
assert not any(u), (
"Unsupported fields %s in %s. See https://github.com/ultralytics/yolov3/issues/631"
% (u, path)
)
return mdefs
def letterbox(
img,
new_shape=(416, 416),
color=(114, 114, 114),
auto=True,
scaleFill=False,
scaleup=True,
):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(
img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color
) # add border
return img, ratio, (dw, dh)
def get_file_location(path):
if not os.path.exists(path) and os.path.exists(
os.path.split(os.path.realpath(__file__))[0] + os.sep + path
): # add $PWD/ prefix if omitted
return os.path.split(os.path.realpath(__file__))[0] + os.sep + path
else:
return
| 34.49467
| 117
| 0.52627
| 2,299
| 16,178
| 3.616355
| 0.207916
| 0.00866
| 0.006495
| 0.005773
| 0.148545
| 0.095261
| 0.082752
| 0.067116
| 0.052201
| 0.042098
| 0
| 0.055911
| 0.323402
| 16,178
| 468
| 118
| 34.568376
| 0.703636
| 0.23532
| 0
| 0.093023
| 0
| 0.002907
| 0.051162
| 0.001725
| 0
| 0
| 0
| 0
| 0.002907
| 1
| 0.043605
| false
| 0
| 0.02907
| 0.002907
| 0.122093
| 0.008721
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
def8727d101b934efb5715bc01f3842eeeee3ee3
| 4,934
|
py
|
Python
|
ec2stack/__init__.py
|
sureshanaparti/cloudstack-ec2stack
|
8e07435d3d04357995f2a5d337adef62ecbfdd8d
|
[
"Apache-2.0"
] | 13
|
2015-05-06T13:38:13.000Z
|
2021-11-09T21:39:01.000Z
|
ec2stack/__init__.py
|
sureshanaparti/cloudstack-ec2stack
|
8e07435d3d04357995f2a5d337adef62ecbfdd8d
|
[
"Apache-2.0"
] | 3
|
2015-08-21T17:31:20.000Z
|
2021-07-07T08:39:11.000Z
|
ec2stack/__init__.py
|
sureshanaparti/cloudstack-ec2stack
|
8e07435d3d04357995f2a5d337adef62ecbfdd8d
|
[
"Apache-2.0"
] | 17
|
2015-07-24T06:00:59.000Z
|
2021-11-09T21:38:52.000Z
|
#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module creates the flask application.
"""
import os
import sys
import argparse
from alembic import command
from alembic.config import Config as AlembicConfig
from flask import Flask
from ConfigParser import SafeConfigParser
from ec2stack.controllers import *
from ec2stack.core import DB
from ec2stack.models import User
def create_app(settings=None):
"""
Creates a flask application.
@param settings: Settings override object.
@return: The flask application.
"""
app = Flask(__name__)
if settings:
app.config.from_object(settings)
else:
args = _generate_args()
profile = args.pop('profile')
app.config['DEBUG'] = args.pop('debug')
config_file = _load_config_file()
database_uri = _load_database()
_config_from_config_profile(config_file, profile, app)
app.config['SQLALCHEMY_DATABASE_URI'] = database_uri
DB.init_app(app)
default_controller = __import__(
'ec2stack.controllers.' + 'default', None, None, 'DEFAULT'
)
default_controller = getattr(default_controller, 'DEFAULT')
app.register_blueprint(default_controller)
return app
def _generate_args():
"""
Generate command line arguments for ec2stack-configure.
@return: args.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-p',
'--profile',
required=False,
help='The profile to run ec2stack with, default is initial',
default='initial'
)
parser.add_argument(
'-d',
'--debug',
required=False,
help='Turn debug on for application',
default=False
)
args = parser.parse_args()
return vars(args)
def _load_config_file():
"""
Checks that the user's configuration file exists and returns its path.
@return: The path to the user's configuration file.
"""
config_file = os.path.join(
os.path.expanduser('~'),
'.ec2stack/ec2stack.conf'
)
if not os.path.exists(config_file):
sys.exit('No configuration found, please run ec2stack-configure')
return config_file
def _config_from_config_profile(config_file, profile, app):
"""
Configures ec2stack app based on configuration profile.
@param config_file: current config file configuration.
@param profile: the profile to set the attribute in.
"""
config = SafeConfigParser()
config.read(config_file)
if not config.has_section(profile):
sys.exit('No profile matching ' + profile +
' found in configuration, please run ec2stack-configure -p ' + profile)
for attribute in config.options(profile):
app.config[attribute.upper()] = config.get(profile, attribute)
instance_type_map = {}
instance_section = profile + "instancemap"
if config.has_section(instance_section):
for attribute in config.options(instance_section):
instance_type_map[attribute] = config.get(
instance_section, attribute)
app.config['INSTANCE_TYPE_MAP'] = instance_type_map
resource_type_map = {}
resource_section = profile + "resourcemap"
if config.has_section(resource_section):
for attribute in config.options(resource_section):
resource_type_map[attribute] = config.get(
resource_section, attribute)
app.config['RESOURCE_TYPE_MAP '] = resource_type_map
def _load_database():
"""
Checks that the user's database exists and returns its uri.
@return: The uri to the user's database.
"""
database_file = os.path.join(
os.path.expanduser('~'),
'.ec2stack/ec2stack.sqlite'
)
if not os.path.exists(database_file):
directory = os.path.join(os.path.dirname(__file__), '../migrations')
config = AlembicConfig(os.path.join(
directory,
'alembic.ini'
))
config.set_main_option('script_location', directory)
command.upgrade(config, 'head', sql=False, tag=None)
return 'sqlite:///' + database_file
| 28.356322
| 88
| 0.676125
| 598
| 4,934
| 5.428094
| 0.316054
| 0.033888
| 0.009858
| 0.011091
| 0.152495
| 0.075786
| 0.054837
| 0.054837
| 0.028343
| 0
| 0
| 0.004737
| 0.229834
| 4,934
| 173
| 89
| 28.520231
| 0.849474
| 0.286583
| 0
| 0.065934
| 0
| 0
| 0.141846
| 0.027131
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054945
| false
| 0
| 0.120879
| 0
| 0.21978
| 0.010989
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
def98cf0f4126cdcda2bee2e5c8d96a01bc4937b
| 1,351
|
py
|
Python
|
solutions/5/guillaume/LookAhead.py
|
larsbratholm/champs_kaggle
|
fda4f213d02fd5e0138a86c52b4140c9f94fec6e
|
[
"MIT"
] | 9
|
2020-08-14T23:11:16.000Z
|
2021-08-09T16:23:43.000Z
|
solutions/5/guillaume/LookAhead.py
|
larsbratholm/champs_kaggle
|
fda4f213d02fd5e0138a86c52b4140c9f94fec6e
|
[
"MIT"
] | 1
|
2020-11-19T09:29:14.000Z
|
2020-11-19T09:29:14.000Z
|
solutions/5/guillaume/LookAhead.py
|
larsbratholm/champs_kaggle
|
fda4f213d02fd5e0138a86c52b4140c9f94fec6e
|
[
"MIT"
] | 2
|
2020-09-09T02:53:57.000Z
|
2020-12-06T08:20:52.000Z
|
import itertools as it
from torch.optim import Optimizer
class LookAhead(Optimizer):
def __init__(self, base_optimizer,alpha=0.5, k=6):
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
self.optimizer = base_optimizer
self.param_groups = self.optimizer.param_groups
self.alpha = alpha
self.k = k
for group in self.param_groups:
group["step_counter"] = 0
self.slow_weights = [[p.clone().detach() for p in group['params']]
for group in self.param_groups]
for w in it.chain(*self.slow_weights):
w.requires_grad = False
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
loss = self.optimizer.step()
for group,slow_weights in zip(self.param_groups,self.slow_weights):
group['step_counter'] += 1
if group['step_counter'] % self.k != 0:
continue
for p,q in zip(group['params'],slow_weights):
if p.grad is None:
continue
q.data.add_(self.alpha,p.data - q.data)
p.data.copy_(q.data)
return loss
| 37.527778
| 75
| 0.559585
| 177
| 1,351
| 4.146893
| 0.327684
| 0.074932
| 0.081744
| 0.06267
| 0.06812
| 0.06812
| 0
| 0
| 0
| 0
| 0
| 0.012263
| 0.336047
| 1,351
| 35
| 76
| 38.6
| 0.80602
| 0
| 0
| 0.060606
| 0
| 0
| 0.080681
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.060606
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
defcc91baa71d0c94f476ef6cc3d35765b3516a0
| 2,263
|
py
|
Python
|
addexp.py
|
Shajm44n/Expense
|
db3355d4d81d5dd57ceea81b1170724b8893e523
|
[
"MIT"
] | null | null | null |
addexp.py
|
Shajm44n/Expense
|
db3355d4d81d5dd57ceea81b1170724b8893e523
|
[
"MIT"
] | null | null | null |
addexp.py
|
Shajm44n/Expense
|
db3355d4d81d5dd57ceea81b1170724b8893e523
|
[
"MIT"
] | null | null | null |
from tkinter import *
# import expdate
import mysql.connector
db_connect=mysql.connector.connect(host="localhost",user="root",password="maan",database="expense")
db_cursor=db_connect.cursor()
def add_expense(day,month,year):
print("add exp")
window=Tk()
window.title("Expense list")
l_message=Label(window)
l_msg=Label(window)
print(day)
print(month)
print(year)
l_trans=Label(window,text="Transport :")
e_trans=Entry(window)
l_food=Label(window,text="Food :")
e_food=Entry(window)
l_home=Label(window,text="Home :")
e_home=Entry(window)
l_ent=Label(window,text="Entertainment :")
e_ent=Entry(window)
l_utl=Label(window,text="Utilities :")
e_utl=Entry(window)
l_health=Label(window,text="Health :")
e_health=Entry(window)
l_oth=Label(window,text="Others :")
e_oth=Entry(window)
def enter_data():
trans=int(e_trans.get())
food=int(e_food.get())
home=int(e_home.get())
ent=int(e_ent.get())
utl=int(e_utl.get())
health=int(e_health.get())
other=int(e_oth.get())
total=trans+food+home+ent+utl+health+other
print(total)
db_cursor.execute(f"insert into daily(day,month,year,Transport,Food,Home,Entertainment,Utilities,Health,Others,Total)values('{day}','{month}','{year}','{trans}','{food}','{home}','{ent}','{utl}','{health}','{other}','{total}')")
db_connect.commit()
db_connect.close()
l_msg.config(text=" Data has been Updated!")
add_exp= Button(window, text= "add expense", command= enter_data)
add_exp.pack(pady=30)
l_trans.place(x =20,y=50)
e_trans.place(x =120,y=50)
l_food.place(x =20,y=70)
e_food.place(x =120,y=70)
l_home.place(x =20,y=90)
e_home.place(x =120,y=90)
l_ent.place(x =20,y=110)
e_ent.place(x =120,y=110)
l_utl.place(x =20,y=130)
e_utl.place(x =120,y=130)
l_health.place(x =20,y=150)
e_health.place(x =120,y=150)
l_oth.place(x =20,y=170)
e_oth.place(x =120,y=170)
l_message.place(x=50,y=100)
l_msg.place(x=120,y=170)
exit_button = Button(window, text="Exit", command=window.destroy)
exit_button.pack(pady=200)
window.geometry("800x800")
window.mainloop()
| 32.797101
| 236
| 0.643836
| 363
| 2,263
| 3.867769
| 0.242424
| 0.068376
| 0.051282
| 0.05698
| 0.061254
| 0.042735
| 0.042735
| 0
| 0
| 0
| 0
| 0.049973
| 0.17764
| 2,263
| 68
| 237
| 33.279412
| 0.70446
| 0.006186
| 0
| 0
| 0
| 0.015873
| 0.15984
| 0.086376
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031746
| false
| 0.015873
| 0.031746
| 0
| 0.063492
| 0.079365
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
defde4b16a7fe68a1c0b7ba26a303a5bb6a695bc
| 12,389
|
py
|
Python
|
cma-evolve.py
|
simondlevy/CMA-Gym
|
ce0056873d42eae2b6769fe22fcf872459694f30
|
[
"Apache-2.0"
] | null | null | null |
cma-evolve.py
|
simondlevy/CMA-Gym
|
ce0056873d42eae2b6769fe22fcf872459694f30
|
[
"Apache-2.0"
] | null | null | null |
cma-evolve.py
|
simondlevy/CMA-Gym
|
ce0056873d42eae2b6769fe22fcf872459694f30
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import gym
import torch
import numpy as np
import multiprocessing as mp
import os
import pickle
import sys
import time
import logging
import cma
import argparse
from torchmodel import StandardFCNet
def _makedir(name):
if not os.path.exists(name):
os.makedirs(name)
def get_logger():
_makedir('log')
_makedir('data')
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s: %(message)s')
logger = logging.getLogger('MAIN')
logger.setLevel(logging.DEBUG)
return logger
class Task:
def __init__(self, envname, hidden_size, max_steps, target, pop_size, reps, test_reps, weight_decay, noise_std, sigma):
self.task = envname
self.env_fn = lambda: gym.make(self.task)
self.repetitions = reps
self.test_repetitions = test_reps
env = self.env_fn()
self.action_dim = env.action_space.shape[0]
self.state_dim = env.observation_space.shape[0]
self.reward_to_fitness = lambda r: r
self.max_steps = max_steps
self.pop_size = pop_size
self.num_workers = mp.cpu_count()
self.action_clip = lambda a: np.clip(a, -1, 1)
self.target = target
self.hidden_size = hidden_size
self.model_fn = lambda: StandardFCNet(self.state_dim, self.action_dim, self.hidden_size)
model = self.model_fn()
self.initial_weight = model.get_weight()
self.weight_decay = weight_decay
self.action_noise_std = noise_std
self.sigma = sigma
self.tag = 'CMA-%d' % (hidden_size)
class BaseModel:
def get_weight(self):
weight = []
for param in self.parameters():
weight.append(param.data.numpy().flatten())
weight = np.concatenate(weight, 0)
return weight
def set_weight(self, solution):
offset = 0
for param in self.parameters():
param_shape = param.data.numpy().shape
param_size = np.prod(param_shape)
src_param = solution[offset: offset + param_size]
if len(param_shape) > 1:
src_param = src_param.reshape(param_shape)
param.data = torch.FloatTensor(src_param)
offset += param_size
assert offset == len(solution)
class Normalizer:
def __init__(self, filter_mean=True):
self.m = 0
self.v = 0
self.n = 0.
self.filter_mean = filter_mean
def state_dict(self):
return {'m': self.m,
'v': self.v,
'n': self.n}
def load_state_dict(self, saved):
self.m = saved['m']
self.v = saved['v']
self.n = saved['n']
def __call__(self, o):
self.m = self.m * (self.n / (self.n + 1)) + o * 1 / (1 + self.n)
self.v = self.v * (self.n / (self.n + 1)) + (o - self.m) ** 2 * 1 / (1 + self.n)
self.std = (self.v + 1e-6) ** .5 # std
self.n += 1
if self.filter_mean:
o_ = (o - self.m) / self.std
else:
o_ = o / self.std
return o_
class StaticNormalizer:
def __init__(self, o_size):
self.offline_stats = SharedStats(o_size)
self.online_stats = SharedStats(o_size)
def __call__(self, o_):
o = torch.FloatTensor([o_] if np.isscalar(o_) else o_)
self.online_stats.feed(o)
if self.offline_stats.n[0] == 0:
return o_
std = (self.offline_stats.v + 1e-6) ** .5
o = (o - self.offline_stats.m) / std
o = o.numpy()
if np.isscalar(o_):
o = np.asscalar(o)
else:
o = o.reshape(o_.shape)
return o
class SharedStats:
def __init__(self, o_size):
self.m = torch.zeros(o_size)
self.v = torch.zeros(o_size)
self.n = torch.zeros(1)
self.m.share_memory_()
self.v.share_memory_()
self.n.share_memory_()
def feed(self, o):
n = self.n[0]
new_m = self.m * (n / (n + 1)) + o / (n + 1)
self.v.copy_(self.v * (n / (n + 1)) + (o - self.m) * (o - new_m) / (n + 1))
self.m.copy_(new_m)
self.n.add_(1)
def zero(self):
self.m.zero_()
self.v.zero_()
self.n.zero_()
def load(self, stats):
self.m.copy_(stats.m)
self.v.copy_(stats.v)
self.n.copy_(stats.n)
def merge(self, B):
A = self
n_A = self.n[0]
n_B = B.n[0]
n = n_A + n_B
delta = B.m - A.m
m = A.m + delta * n_B / n
v = A.v * n_A + B.v * n_B + delta * delta * n_A * n_B / n
v /= n
self.m.copy_(m)
self.v.copy_(v)
self.n.add_(B.n)
def state_dict(self):
return {'m': self.m.numpy(),
'v': self.v.numpy(),
'n': self.n.numpy()}
def load_state_dict(self, saved):
self.m = torch.FloatTensor(saved['m'])
self.v = torch.FloatTensor(saved['v'])
self.n = torch.FloatTensor(saved['n'])
def fitness_shift(x):
x = np.asarray(x).flatten()
ranks = np.empty(len(x))
ranks[x.argsort()] = np.arange(len(x))
ranks /= (len(x) - 1)
ranks -= .5
return ranks
class Worker(mp.Process):
def __init__(self, id, task_q, result_q, stop):
mp.Process.__init__(self)
self.id = id
self.task_q = task_q
self.result_q = result_q
self.stop = stop
def run(self):
np.random.seed()
while not self.stop.value:
if self.task_q.empty():
continue
id, solution = self.task_q.get()
fitness, steps = self.evalfun(solution)
self.result_q.put([id, fitness, steps])
class Evaluator:
def __init__(self, config, state_normalizer):
self.model = config.model_fn()
self.repetitions = config.repetitions
self.env = config.env_fn()
self.state_normalizer = state_normalizer
self.config = config
def eval(self, solution):
self.model.set_weight(solution)
rewards = []
steps = []
for i in range(self.repetitions):
reward, step = self.single_run()
rewards.append(reward)
steps.append(step)
return -np.mean(rewards), np.sum(steps)
def single_run(self):
state = self.env.reset()
total_reward = 0
steps = 0
while True:
state = self.state_normalizer(state)
action = self.model(np.stack([state])).data.numpy().flatten()
action += np.random.randn(len(action)) * self.config.action_noise_std
action = self.config.action_clip(action)
state, reward, done, info = self.env.step(action)
steps += 1
total_reward += reward
if done:
return total_reward, steps
class CMAWorker(Worker):
def __init__(self, id, state_normalizer, task_q, result_q, stop, config):
Worker.__init__(self, id, task_q, result_q, stop)
self.evalfun = Evaluator(config, state_normalizer).eval
def train(config, logger):
task_queue = mp.SimpleQueue()
result_queue = mp.SimpleQueue()
stop = mp.Value('i', False)
stats = SharedStats(config.state_dim)
normalizers = [StaticNormalizer(config.state_dim) for _ in range(config.num_workers)]
for normalizer in normalizers:
normalizer.offline_stats.load(stats)
workers = [CMAWorker(id, normalizers[id], task_queue, result_queue, stop, config) for id in range(config.num_workers)]
for w in workers: w.start()
opt = cma.CMAOptions()
opt['tolfun'] = -config.target
opt['popsize'] = config.pop_size
opt['verb_disp'] = 0
opt['verb_log'] = 0
opt['maxiter'] = sys.maxsize
es = cma.CMAEvolutionStrategy(config.initial_weight, config.sigma, opt)
total_steps = 0
initial_time = time.time()
training_rewards = []
training_steps = []
training_timestamps = []
test_mean, test_std = test(config, config.initial_weight, stats)
logger.info('total steps %8d, %+4.0f(%+4.0f)' % (total_steps, test_mean, test_std))
training_rewards.append(test_mean)
training_steps.append(0)
training_timestamps.append(0)
while True:
solutions = es.ask()
for id, solution in enumerate(solutions):
task_queue.put((id, solution))
while not task_queue.empty():
continue
result = []
while len(result) < len(solutions):
if result_queue.empty():
continue
result.append(result_queue.get())
result = sorted(result, key=lambda x: x[0])
total_steps += np.sum([r[2] for r in result])
cost = [r[1] for r in result]
best_solution = solutions[np.argmin(cost)]
elapsed_time = time.time() - initial_time
test_mean, test_std = test(config, best_solution, stats)
best = -np.min(cost)
logger.info('total steps = %8d test = %+4.0f (%4.0f) best = %+4.0f (%+4.0f) elapased time = %4.0f sec' %
(total_steps, test_mean, test_std, best, config.target, elapsed_time))
training_rewards.append(test_mean)
training_steps.append(total_steps)
training_timestamps.append(elapsed_time)
#with open('data/%s-best_solution_%s.bin' % (TAG, config.task), 'wb') as f: # XXX gets stuck
# pickle.dump(solutions[np.argmin(result)], f)
if best > config.target:
logger.info('Best score of %f exceeds target %f' % (best, config.target))
break
if config.max_steps and total_steps > config.max_steps:
logger.info('Maximum number of steps exceeded')
stop.value = True
break
cost = fitness_shift(cost)
es.tell(solutions, cost)
# es.disp()
for normalizer in normalizers:
stats.merge(normalizer.online_stats)
normalizer.online_stats.zero()
for normalizer in normalizers:
normalizer.offline_stats.load(stats)
stop.value = True
for w in workers: w.join()
return [training_rewards, training_steps, training_timestamps]
def test(config, solution, stats):
normalizer = StaticNormalizer(config.state_dim)
normalizer.offline_stats.load_state_dict(stats.state_dict())
evaluator = Evaluator(config, normalizer)
evaluator.model.set_weight(solution)
rewards = []
for i in range(config.test_repetitions):
reward, _ = evaluator.single_run()
rewards.append(reward)
return np.mean(rewards), np.std(rewards) / config.repetitions
def multi_runs(task, logger, runs=1):
if not os.path.exists('log'):
os.makedirs('log')
fh = logging.FileHandler('log/%s-%s.txt' % (task.tag, task.task))
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
stats = []
for run in range(runs):
logger.info('Run %3d/%3d' % (run+1, runs))
stats.append(train(task, logger))
with open('data/%s-stats-%s.bin' % (task.tag, task.task), 'wb') as f:
pickle.dump(stats, f)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env', help='environment ID', type=str, default='Pendulum-v0')
parser.add_argument('--nhid', help='# of hidden units', type=int, default=64)
parser.add_argument('--target', help='reward goal', type=float, default=-np.inf)
parser.add_argument('--max-steps', help='maximum number of steps', type=int, default=int(2e7))
parser.add_argument('--pop-size', help='population size', type=int, default=64)
parser.add_argument('--reps', help='repetitions', type=int, default=10)
parser.add_argument('--test-reps', help='test repetitions', type=int, default=10)
parser.add_argument('--weight-decay', help='weight decay', type=float, default=0.005)
parser.add_argument('--noise-std', help='noise standard deviation', type=float, default=0)
parser.add_argument('--sigma', help='sigma', type=float, default=1)
args = parser.parse_args()
task = Task(args.env, args.nhid, args.max_steps, args.target, args.pop_size, args.reps, args.test_reps,
args.weight_decay, args.noise_std, args.sigma)
logger = get_logger()
p = mp.Process(target=multi_runs, args=(task,logger))
p.start()
p.join()
if __name__ == '__main__':
main()
| 33.574526
| 123
| 0.600291
| 1,669
| 12,389
| 4.281606
| 0.158778
| 0.014694
| 0.02379
| 0.008396
| 0.172544
| 0.117128
| 0.074447
| 0.065211
| 0.015953
| 0
| 0
| 0.009484
| 0.26806
| 12,389
| 368
| 124
| 33.665761
| 0.778562
| 0.013964
| 0
| 0.102564
| 0
| 0.003205
| 0.050778
| 0
| 0
| 0
| 0
| 0
| 0.003205
| 1
| 0.092949
| false
| 0
| 0.038462
| 0.00641
| 0.195513
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
defeff29d76d14fa0aceaad7cd54a55164f7136c
| 2,386
|
py
|
Python
|
rastervision/data/label_store/default.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | 4
|
2019-03-11T12:38:15.000Z
|
2021-04-06T14:57:52.000Z
|
rastervision/data/label_store/default.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | null | null | null |
rastervision/data/label_store/default.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | 1
|
2019-10-29T09:22:09.000Z
|
2019-10-29T09:22:09.000Z
|
from abc import (ABC, abstractmethod)
import os
import rastervision as rv
class LabelStoreDefaultProvider(ABC):
@staticmethod
@abstractmethod
def is_default_for(task_type):
"""Returns True if this label store is the default for this tasks_type"""
pass
@staticmethod
@abstractmethod
def handles(task_type, s):
"""Returns True of this provider is a default for this task_type and string"""
pass
@abstractmethod
def construct(s=None):
"""Construts a default LabelStore based on the string.
"""
pass
class ObjectDetectionGeoJSONStoreDefaultProvider(LabelStoreDefaultProvider):
@staticmethod
def is_default_for(task_type):
return task_type == rv.OBJECT_DETECTION
@staticmethod
def handles(task_type, uri):
if task_type == rv.OBJECT_DETECTION:
ext = os.path.splitext(uri)[1]
return ext.lower() in ['.json', '.geojson']
return False
@staticmethod
def construct(uri=None):
b = rv.LabelStoreConfig.builder(rv.OBJECT_DETECTION_GEOJSON)
if uri:
b = b.with_uri(uri)
return b.build()
class ChipClassificationGeoJSONStoreDefaultProvider(LabelStoreDefaultProvider):
@staticmethod
def is_default_for(task_type):
return task_type == rv.CHIP_CLASSIFICATION
@staticmethod
def handles(task_type, uri):
if task_type == rv.CHIP_CLASSIFICATION:
ext = os.path.splitext(uri)[1]
return ext.lower() in ['.json', '.geojson']
return False
@staticmethod
def construct(uri=None):
b = rv.LabelStoreConfig.builder(rv.CHIP_CLASSIFICATION_GEOJSON)
if uri:
b = b.with_uri(uri)
return b.build()
class SemanticSegmentationRasterStoreDefaultProvider(
LabelStoreDefaultProvider):
@staticmethod
def is_default_for(task_type):
return task_type == rv.SEMANTIC_SEGMENTATION
@staticmethod
def handles(task_type, uri):
if task_type == rv.SEMANTIC_SEGMENTATION:
ext = os.path.splitext(uri)[1]
return ext.lower() in ['.tiff', '.tif']
return False
@staticmethod
def construct(uri=None):
b = rv.LabelStoreConfig.builder(rv.SEMANTIC_SEGMENTATION_RASTER)
if uri:
b = b.with_uri(uri)
return b.build()
| 27.425287
| 86
| 0.65088
| 267
| 2,386
| 5.670412
| 0.250936
| 0.07926
| 0.03963
| 0.03963
| 0.624835
| 0.554822
| 0.53963
| 0.53963
| 0.53963
| 0.53963
| 0
| 0.0017
| 0.260268
| 2,386
| 86
| 87
| 27.744186
| 0.856091
| 0.084241
| 0
| 0.6875
| 0
| 0
| 0.016136
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0.046875
| 0.046875
| 0.046875
| 0.484375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7202ced44b536e7785d48d42a3fe09355e98fc12
| 448
|
py
|
Python
|
guestbook/models.py
|
Bespolezniy/geek-world
|
8fbaf451b4e87e48e73eb289035ec0ea68ea0e68
|
[
"MIT"
] | null | null | null |
guestbook/models.py
|
Bespolezniy/geek-world
|
8fbaf451b4e87e48e73eb289035ec0ea68ea0e68
|
[
"MIT"
] | null | null | null |
guestbook/models.py
|
Bespolezniy/geek-world
|
8fbaf451b4e87e48e73eb289035ec0ea68ea0e68
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class GuestBook(models.Model):
user = models.CharField(max_length=15, verbose_name="User")
date = models.DateTimeField(db_index=True, auto_now_add=True, verbose_name="Published")
content = models.TextField(verbose_name="Content")
class Meta:
ordering = ["-date"]
verbose_name = "Guest book entry"
verbose_name_plural = "Guest book entries"
| 37.333333
| 92
| 0.694196
| 56
| 448
| 5.375
| 0.625
| 0.182724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005587
| 0.200893
| 448
| 12
| 93
| 37.333333
| 0.835196
| 0.053571
| 0
| 0
| 0
| 0
| 0.143204
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72043f3633eddba64964dbbdb6f17d84cf1d6267
| 34,859
|
py
|
Python
|
PA1/PA1_Q2/P21CS007_VGG16.py
|
aryachiranjeev/Dependable-AI
|
750570572c1baaa2590a89c0982e2f71b15b48b9
|
[
"MIT"
] | null | null | null |
PA1/PA1_Q2/P21CS007_VGG16.py
|
aryachiranjeev/Dependable-AI
|
750570572c1baaa2590a89c0982e2f71b15b48b9
|
[
"MIT"
] | null | null | null |
PA1/PA1_Q2/P21CS007_VGG16.py
|
aryachiranjeev/Dependable-AI
|
750570572c1baaa2590a89c0982e2f71b15b48b9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import numpy as np
import pandas as pd
import random
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense,Flatten,GlobalAveragePooling2D,Input,Lambda
from tensorflow.keras.models import Model,load_model
import tensorflow.keras.backend as K
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.vgg16 import preprocess_input
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import accuracy_score,confusion_matrix
from skimage.color import rgb2gray
import cv2
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
# In[110]:
def brute_vgg16():
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_train = to_categorical(y_train_without_one_hot)
y_test = to_categorical(y_test_without_one_hot)
x_train,x_valid,y_train,y_valid = train_test_split(x_train,y_train,test_size = 0.2,shuffle=True,random_state = 42)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
print(x_valid.shape)
print(y_valid.shape)
vgg16 = VGG16(include_top=True,weights = None, input_shape = (32,32,3))
out = Dense(10,activation='softmax',name = 'fc3')(vgg16.get_layer('fc2').output)
brute_model = Model(inputs = vgg16.input,outputs = out)
epochs = 20
learning_rate = 0.1
decay_rate = learning_rate/epochs
sgd = tf.keras.optimizers.SGD(lr=learning_rate, decay=decay_rate, momentum=0.9, nesterov=False)
brute_model.compile(loss = 'categorical_crossentropy',optimizer = 'sgd',metrics=['accuracy']) #tf.keras.optimizers.Adam(learning_rate=0.0001)
history = brute_model.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=epochs, batch_size=16)
brute_model.save("vgg16_cifar10")
y_pred_train = brute_model.predict(x_train)
predictions_train = np.argmax(y_pred_train,axis=1)
print("training accuracy:",accuracy_score(np.argmax(y_train,axis=1),predictions_train))
y_pred_test = brute_model.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
print("test accuracy:",accuracy_score(np.argmax(y_test,axis=1),prediction_test))
# plot loss during training
plt.subplot(211)
plt.title('Loss')
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
# plot accuracy during training
plt.subplot(212)
plt.title('Accuracy')
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='test')
plt.legend()
plt.show()
return brute_model
# In[150]:
def test_brute_model_on_gray_scale_test_images(brute_model):
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_test = to_categorical(y_test_without_one_hot)
def gray_images(x_test):
gray_x_test = []
for i in x_test:
gray_scale = rgb2gray(i)
gray_x_test.append(np.dstack((gray_scale,gray_scale,gray_scale)))
gray_x_test = np.array(gray_x_test)
print(gray_x_test.shape)
return gray_x_test
gray_x_test = gray_images(x_test)
y_pred_test = brute_model.predict(gray_x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
print("test accuracy:",accuracy_score(np.argmax(y_test,axis=1),prediction_test)*100,"%")
print("gray scale confusion matrix:\n",confusion_matrix(np.argmax(y_test,axis=1),prediction_test))
# In[112]:
def class_wise_accuracy(models):
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_test = to_categorical(y_test_without_one_hot)
y_pred_test = models.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
print("test accuracy:",accuracy_score(np.argmax(y_test,axis=1),prediction_test)*100,"%")
confus_matrix = confusion_matrix(np.argmax(y_test,axis=1),prediction_test)
print("confusion_matrix:\n",confus_matrix)
class_accuracy = []
class_TP = []
for i in range(confus_matrix.shape[0]):
for j in range(confus_matrix.shape[1]):
if i == j:
TP = confus_matrix[i][j]
class_TP.append(TP)
for k in range(confus_matrix.shape[1]):
ca = (class_TP[k] / confus_matrix[:,k].sum())*100
class_accuracy.append(ca)
print("class ",k," accuracy ",labels[k]," :",ca,"%")
class_accuracy = np.array(class_accuracy)
return class_accuracy
# In[113]:
def bias_metrics(class_accuracy,models):
dob = np.std(class_accuracy)
print("Degree of Bias:",dob)
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_pred_test = models.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
from sklearn.metrics import confusion_matrix
confuse_matrix = confusion_matrix(y_test_without_one_hot, prediction_test)
print("confusion_matrix:\n",confuse_matrix)
FP = confuse_matrix.sum(axis=0) - np.diag(confuse_matrix)
FN = confuse_matrix.sum(axis=1) - np.diag(confuse_matrix)
TP = np.diag(confuse_matrix)
TN = confuse_matrix.sum() - (FP+FN+TP)
FP=FP.astype(float)
TP=TP.astype(float)
FN=FN.astype(float)
TN=TN.astype(float)
FNR = FN/(TP+FN)
FPR = FP/(TN+FN)
print("FPR:",FPR)
print("FNR:",FNR)
AFR = ((FPR.sum()/10)+(FNR.sum()/10))/2
print("AFR:",AFR)
# In[151]:
test_brute_model_on_gray_scale_test_images(brute_model)
# In[115]:
#brute model
print("/nbrute model/n")
brute_model = brute_vgg16()
test_brute_model_on_gray_scale_test_images(brute_model)
class_accuracy_brute_model = class_wise_accuracy(brute_model)
bias_metrics(class_accuracy_brute_model,brute_model)
# In[40]:
def create_results(brute_model):
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_train = to_categorical(y_train_without_one_hot)
y_test = to_categorical(y_test_without_one_hot)
y_pred_test = brute_model.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
df = pd.DataFrame(np.hstack((y_test_without_one_hot,prediction_test.reshape(len(prediction_test),1))),columns=['y_test','y_test_pred'],index=None)
print(df.head())
df.to_csv("y_test_prediction_test.csv",index=False)
correct_idxes = []
incorrect_idxes = []
for i in range(len(prediction_test)):
if y_test_without_one_hot[i] == prediction_test[i]:
correct_idxes.append(i)
elif y_test_without_one_hot[i] != prediction_test[i]:
incorrect_idxes.append(i)
cv2.imwrite("correct"+str(int(y_test_without_one_hot[correct_idxes[0]][0]))+".jpg",x_test[correct_idxes[0]])
cv2.imwrite("correct"+str(int(y_test_without_one_hot[correct_idxes[1]][0]))+".jpg",x_test[correct_idxes[1]])
cv2.imwrite("incorrect"+str(int(y_test_without_one_hot[incorrect_idxes[0]][0]))+".jpg",x_test[incorrect_idxes[0]])
cv2.imwrite("incorrect"+str(int(y_test_without_one_hot[incorrect_idxes[1]][0]))+".jpg",x_test[incorrect_idxes[1]])
# In[68]:
class GradCAM:
def __init__(self, model, classIdx, layerName=None):
self.model = model
self.classIdx = classIdx
self.layerName = layerName
if self.layerName is None:
self.layerName = self.find_target_layer()
def find_target_layer(self):
for layer in reversed(self.model.layers):
if len(layer.output_shape) == 4:
return layer.name
raise ValueError("Could not find 4D layer. Cannot apply GradCAM.")
def compute_heatmap(self, image, eps=1e-8):
gradModel = Model(inputs=[self.model.inputs],outputs=[self.model.get_layer(self.layerName).output, self.model.output])
with tf.GradientTape() as tape:
inputs = tf.cast(image, tf.float32)
(convOutputs, predictions) = gradModel(inputs)
loss = predictions[:, tf.argmax(predictions[0])]
grads = tape.gradient(loss, convOutputs)
castConvOutputs = tf.cast(convOutputs > 0, "float32")
castGrads = tf.cast(grads > 0, "float32")
guidedGrads = castConvOutputs * castGrads * grads
convOutputs = convOutputs[0]
guidedGrads = guidedGrads[0]
weights = tf.reduce_mean(guidedGrads, axis=(0, 1))
cam = tf.reduce_sum(tf.multiply(weights, convOutputs), axis=-1)
(w, h) = (image.shape[2], image.shape[1])
heatmap = cv2.resize(cam.numpy(), (w, h))
numer = heatmap - np.min(heatmap)
denom = (heatmap.max() - heatmap.min()) + eps
heatmap = numer / denom
heatmap = (heatmap * 255).astype("uint8")
return heatmap
def overlay_heatmap(self, heatmap, image, alpha=0.5,colormap=cv2.COLORMAP_VIRIDIS):
heatmap = cv2.applyColorMap(heatmap, colormap)
output = cv2.addWeighted(image, alpha, heatmap, 1 - alpha, 0)
return (heatmap, output)
def make_gradCAM(img_path,brute_model,classified,layer_name="block5_conv3"):
image = cv2.imread(img_path)
image = cv2.resize(image, (32, 32))
image = np.expand_dims(image, axis=0)
preds = brute_model.predict(image)
i = np.argmax(preds[0])
icam = GradCAM(brute_model, i,layer_name)
heatmap = icam.compute_heatmap(image)
heatmap = cv2.resize(heatmap, (32, 32))
image = cv2.imread(img_path)
image = cv2.resize(image, (32, 32))
(heatmap, output) = icam.overlay_heatmap(heatmap, image, alpha=0.5)
fig, ax = plt.subplots(1, 3)
ax[0].imshow(heatmap)
ax[1].imshow(image)
ax[2].imshow(output)
plt.savefig("GradCAM_"+ str(classified)+str(img_path[-5])+".jpg")
plt.show()
plt.close()
layer_names = ["block5_conv3","block4_conv2"]
for l in layer_names:
print("layer name:",l)
make_gradCAM("/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/correct_actual7pred7.jpg",brute_model,classified="correct",layer_name=l)
make_gradCAM("/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/correct_actual8pred8.jpg",brute_model,classified="correct",layer_name=l)
make_gradCAM("/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/incorrect_actual3pred0.jpg",brute_model,classified="incorrect",layer_name=l)
make_gradCAM("/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/incorrect_actual5pred4.jpg",brute_model,classified="incorrect",layer_name =l)
# In[161]:
def grad_cam_pp(model, img,layer_name="block5_conv3", label_name=None,category_id=None):
img_tensor = np.expand_dims(img, axis=0)
conv_layer = model.get_layer(layer_name)
heatmap_model = Model([model.inputs], [conv_layer.output, model.output])
with tf.GradientTape() as gtape1:
with tf.GradientTape() as gtape2:
with tf.GradientTape() as gtape3:
conv_output, predictions = heatmap_model(img_tensor)
if category_id==None:
category_id = np.argmax(predictions[0])
output = predictions[:, category_id]
conv_first_grad = gtape3.gradient(output, conv_output)
conv_second_grad = gtape2.gradient(conv_first_grad, conv_output)
conv_third_grad = gtape1.gradient(conv_second_grad, conv_output)
global_sum = np.sum(conv_output, axis=(0, 1, 2))
alpha_num = conv_second_grad[0]
alpha_denom = conv_second_grad[0]*2.0 + conv_third_grad[0]*global_sum
alpha_denom = np.where(alpha_denom != 0.0, alpha_denom, 1e-10)
alphas = alpha_num/alpha_denom
alpha_normalization_constant = np.sum(alphas, axis=(0,1))
alphas /= alpha_normalization_constant
weights = np.maximum(conv_first_grad[0], 0.0)
deep_linearization_weights = np.sum(weights*alphas, axis=(0,1))
grad_CAM_map = np.sum(deep_linearization_weights*conv_output[0], axis=2)
heatmap = np.maximum(grad_CAM_map, 0)
max_heat = np.max(heatmap)
if max_heat == 0:
max_heat = 1e-10
heatmap /= max_heat
return heatmap
def superimpose(img, cam):
heatmap = cv2.resize(cam, (img.shape[1], img.shape[0]))
heatmap = np.uint8(255 * heatmap)
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
heatmap = cv2.cvtColor(heatmap,cv2.COLOR_BGR2RGB)
superimposed_img = heatmap * .5 + img * .5
superimposed_img = np.minimum(superimposed_img, 255.0).astype(np.uint8)
return img, heatmap, superimposed_img
def plot(img,cam):
img = cv2.resize(img, (32, 32))
img, heatmap, superimposed_img = superimpose(img, cam)
fig, axs = plt.subplots(ncols=3, figsize=(9, 4))
axs[0].imshow(img)
axs[0].set_title('original image')
axs[0].axis('off')
axs[1].imshow(heatmap)
axs[1].set_title('heatmap')
axs[1].axis('off')
axs[2].imshow(superimposed_img)
axs[2].set_title('superimposed image')
axs[2].axis('off')
plt.show()
plt.close()
layer_names = ["block5_conv3","block4_conv2"]
for l in layer_names:
print("layer name:",l)
img_path1 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/correct_actual7pred7.jpg"
img_path2 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/correct_actual8pred8.jpg"
img_path3 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/incorrect_actual3pred0.jpg"
img_path4 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/incorrect_actual5pred4.jpg"
img1 = cv2.imread(img_path1)
cam1 = grad_cam_pp(brute_model, img1,layer_name=l, label_name=labels,category_id=int(img_path1[-5]))
plot(img1,cam1)
img2 = cv2.imread(img_path2)
cam2 = grad_cam_pp(brute_model, img2,layer_name=l, label_name=labels,category_id=int(img_path2[-5]))
plot(img2,cam2)
img3 = cv2.imread(img_path3)
cam3 = grad_cam_pp(brute_model, img3,layer_name=l, label_name=labels,category_id=int(img_path3[-5]))
plot(img3,cam3)
img4 = cv2.imread(img_path4)
cam4 = grad_cam_pp(brute_model, img4,layer_name=l, label_name=labels,category_id=int(img_path4[-5]))
plot(img4,cam4)
# In[162]:
def preprocessed_data_model():
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_train = to_categorical(y_train_without_one_hot)
y_test = to_categorical(y_test_without_one_hot)
x_train,x_valid,y_train,y_valid = train_test_split(x_train,y_train,test_size = 0.2,shuffle=True,random_state = 42)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
print(x_valid.shape)
print(y_valid.shape)
train_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True,horizontal_flip=True, rotation_range=20)
train_datagen.fit(x_train)
valid_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True)
valid_datagen.fit(x_valid)
test_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True)
test_datagen.fit(x_test)
train_generator = train_datagen.flow(x_train, y_train, batch_size=16)
valid_generator = valid_datagen.flow(x_valid, y_valid, batch_size=16)
test_generator = test_datagen.flow(x_test, y_test, batch_size=16)
vgg16 = VGG16(include_top=True,weights = None, input_shape = (32,32,3))
out = Dense(10,activation='softmax',name = 'fc3')(vgg16.get_layer('fc2').output)
preprocessed_model = Model(inputs = vgg16.input,outputs = out)
epochs = 20
learning_rate = 0.1
decay_rate = learning_rate/epochs
sgd = tf.keras.optimizers.SGD(lr=learning_rate, decay=decay_rate, momentum=0.9, nesterov=False)
preprocessed_model.compile(loss = 'categorical_crossentropy',optimizer = 'sgd',metrics=['accuracy'])
history = preprocessed_model.fit(x=train_generator,steps_per_epoch=len(train_generator),validation_data=valid_generator,validation_steps=len(valid_generator),epochs=epochs)
# model evaluation
_, test_accuracy = preprocessed_model.evaluate_generator(test_generator, steps=len(test_generator),verbose=0)
print("test accuracy:",test_accuracy)
train_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True,horizontal_flip=True, rotation_range=20)
train_datagen.fit(x_train)
test_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True)
test_datagen.fit(x_test)
train_generator = train_datagen.flow(x_train, y_train, batch_size=16)
test_generator = test_datagen.flow(x_test, y_test, batch_size=16)
_, train_accuracy = preprocessed_model.evaluate_generator(train_generator, steps=len(train_generator),verbose=0)
print("train accuracy:",train_accuracy)
y_pred_test = preprocessed_model.predict(x=test_generator, steps=len(test_generator))
predictions_test = np.argmax(y_pred_test, axis=1)
preprocessed_model.save("vgg16_cifar10_preprocessed_rot_new")
plt.subplot(211)
plt.title('Loss')
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.savefig("loss_preprocess_flip_rot.png")
plt.close()
plt.subplot(212)
plt.title('Accuracy')
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='test')
plt.legend()
plt.savefig("accuracy_preprocess_flip_rot.png")
plt.close()
return preprocessed_model
# In[38]:
def preprocess_helper():
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_test = to_categorical(y_test_without_one_hot)
test_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True)
test_datagen.fit(x_test)
test_generator = test_datagen.flow(x_test, y_test, batch_size=16)
x_test_preprocessed = []
y_test_preprocessed = []
for i in range(len(test_generator)):
for img in test_generator[i][0]:
x_test_preprocessed.append(img)
for lb in test_generator[i][1]:
y_test_preprocessed.append(lb)
x_test_preprocessed = np.array(x_test_preprocessed)
y_test_preprocessed = np.array(y_test_preprocessed)
return x_test_preprocessed,y_test_preprocessed
def class_wise_accuracy_preprocess(models,x_test,y_test):
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
y_test_without_one_hot = np.argmax(y_test,axis=1)
y_pred_test = models.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
print("test accuracy:",accuracy_score(np.argmax(y_test,axis=1),prediction_test)*100,"%")
confus_matrix = confusion_matrix(np.argmax(y_test,axis=1),prediction_test)
print("confusion_matrix:\n",confus_matrix)
class_accuracy = []
class_TP = []
for i in range(confus_matrix.shape[0]):
for j in range(confus_matrix.shape[1]):
if i == j:
TP = confus_matrix[i][j]
class_TP.append(TP)
for k in range(confus_matrix.shape[1]):
ca = (class_TP[k] / confus_matrix[:,k].sum())*100
class_accuracy.append(ca)
print("class ",k," accuracy ",labels[k]," :",ca,"%")
class_accuracy = np.array(class_accuracy)
return class_accuracy
def bias_metrics_preprocess(class_accuracy,models,x_test,y_test):
dob = np.std(class_accuracy)
print("Degree of Bias:",dob)
y_test_without_one_hot = np.argmax(y_test,axis=1)
y_pred_test = models.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
from sklearn.metrics import confusion_matrix
confuse_matrix = confusion_matrix(y_test_without_one_hot, prediction_test)
print("confusion_matrix:\n",confuse_matrix)
FP = confuse_matrix.sum(axis=0) - np.diag(confuse_matrix)
FN = confuse_matrix.sum(axis=1) - np.diag(confuse_matrix)
TP = np.diag(confuse_matrix)
TN = confuse_matrix.sum() - (FP+FN+TP)
FP=FP.astype(float)
TP=TP.astype(float)
FN=FN.astype(float)
TN=TN.astype(float)
FNR = FN/(TP+FN)
FPR = FP/(TN+FN)
print("FPR:",FPR)
print("FNR:",FNR)
AFR = ((FPR.sum()/10)+(FNR.sum()/10))/2
print("AFR:",AFR)
def create_results_preprocess(models,x_test,y_test):
y_test_without_one_hot = np.argmax(y_test,axis=1)
print(y_test.shape)
print(x_test.shape)
y_pred_test = models.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
df = pd.DataFrame(np.hstack((y_test_without_one_hot.reshape(len(y_test_without_one_hot),1),prediction_test.reshape(len(prediction_test),1))),columns=['y_test','y_test_pred'],index=None)
print(df.head())
df.to_csv("y_test_prediction_test.csv",index=False)
correct_idxes = []
incorrect_idxes = []
for i in range(len(prediction_test)):
if y_test_without_one_hot[i] == prediction_test[i]:
correct_idxes.append(i)
elif y_test_without_one_hot[i] != prediction_test[i]:
incorrect_idxes.append(i)
# In[39]:
#preporocess model
print("\npreporocess model\n")
preprocessed_model = preprocessed_data_model()
x_test_preprocessed,y_test_preprocessed = preprocess_helper()
preprocessed_model1 = tf.keras.models.load_model("/home/euclid/Desktop/Chiranjeev/DAI/vgg16_cifar10_preprocessed_rot_new")
class_accuracy_preprocessed_model1 = class_wise_accuracy_preprocess(preprocessed_model1, x_test_preprocessed,y_test_preprocessed)
bias_metrics_preprocess(class_accuracy_preprocessed_model1,preprocessed_model1, x_test_preprocessed,y_test_preprocessed)
create_results_preprocess(preprocessed_model1, x_test_preprocessed,y_test_preprocessed)
# In[118]:
def method_model():
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_train = to_categorical(y_train_without_one_hot)
y_test = to_categorical(y_test_without_one_hot)
x_train,x_valid,y_train,y_valid = train_test_split(x_train,y_train,test_size = 0.2,shuffle=True,random_state = 42)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
print(x_valid.shape)
print(y_valid.shape)
vgg16 = VGG16(include_top=True,weights = None, input_shape = (32,32,3))
out = Dense(10,activation='softmax',name = 'fc3')(vgg16.get_layer('fc2').output)
kl_model = Model(inputs = vgg16.input,outputs = out)
epochs = 20
learning_rate = 0.01
decay_rate = learning_rate/epochs
sgd = tf.keras.optimizers.SGD(lr=learning_rate, decay=decay_rate, momentum=0.9, nesterov=False)
kl_model.compile(loss = 'kullback_leibler_divergence',optimizer = 'sgd',metrics=['accuracy']) #tf.keras.optimizers.Adam(learning_rate=0.0001)
history = kl_model.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=epochs, batch_size=16)
kl_model.save("vgg16_cifar10_method")
y_pred_train = kl_model.predict(x_train)
predictions_train = np.argmax(y_pred_train,axis=1)
print("training accuracy:",accuracy_score(np.argmax(y_train,axis=1),predictions_train))
y_pred_test = kl_model.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
print("test accuracy:",accuracy_score(np.argmax(y_test,axis=1),prediction_test))
plt.subplot(211)
plt.title('Loss')
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.subplot(212)
plt.title('Accuracy')
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='test')
plt.legend()
plt.show()
return kl_model
# In[119]:
#method model
print("/nmethod model/n")
kl_model = method_model()
class_accuracy_kl_model = class_wise_accuracy(kl_model)
bias_metrics(class_accuracy_kl_model,kl_model)
create_results(kl_model)
# In[120]:
print("\npreprocessed model\n")
class_accuracy_preprocessed = class_wise_accuracy(preprocessed_model)
print("each class accuracies preprocessed",class_accuracy_preprocessed)
bias_metrics(class_accuracy_preprocessed,preprocessed_model)
print("\nmethod model\n")
class_accuracy_method = class_wise_accuracy(kl_model)
print("each class accuracies mehtod",class_accuracy_method)
bias_metrics(class_accuracy_method,kl_model)
# In[187]:
def check_bias_by_counting(filename):
df = pd.read_csv(filename)
#gender 0 (g1) male
race1_correct_g1 = 0
race2_correct_g1 = 0
race3_correct_g1 = 0
race4_correct_g1 = 0
age_0_28_correct_g1 = 0
age_29_56_correct_g1 = 0
age_57_84_correct_g1 = 0
age_85_116_correct_g1 = 0
race1_incorrect_g1 = 0
race2_incorrect_g1 = 0
race3_incorrect_g1 = 0
race4_incorrect_g1 = 0
age_0_28_incorrect_g1 = 0
age_29_56_incorrect_g1 = 0
age_57_84_incorrect_g1 = 0
age_85_116_incorrect_g1 = 0
#gender 1 (g2) female
race1_correct_g2 = 0
race2_correct_g2 = 0
race3_correct_g2 = 0
race4_correct_g2 = 0
age_0_28_correct_g2 = 0
age_29_56_correct_g2 = 0
age_57_84_correct_g2 = 0
age_85_116_correct_g2 = 0
race1_incorrect_g2 = 0
race2_incorrect_g2 = 0
race3_incorrect_g2 = 0
race4_incorrect_g2 = 0
age_0_28_incorrect_g2 = 0
age_29_56_incorrect_g2 = 0
age_57_84_incorrect_g2 = 0
age_85_116_incorrect_g2 = 0
df_np = df.iloc[:,:].values
for i in range(len(df_np)):
#correct predictions
if df_np[i][2] == df_np[i][4]:
#male
if df_np[i][2] == 0:
# age groups
if df_np[i][1] == 0:
age_0_28_correct_g1 += 1
elif df_np[i][1] == 1:
age_29_56_correct_g1+=1
elif df_np[i][1] == 2:
age_57_84_correct_g1 += 1
elif df_np[i][1] == 3:
age_85_116_correct_g1 += 1
#race groups
if df_np[i][3] == 0:
race1_correct_g1 += 1
elif df_np[i][3] == 1:
race2_correct_g1+=1
elif df_np[i][3] == 2:
race3_correct_g1 += 1
elif df_np[i][3] == 3:
race4_correct_g1 += 1
#female
elif df_np[i][2] == 1:
# age groups
if df_np[i][1] == 0:
age_0_28_correct_g2 += 1
elif df_np[i][1] == 1:
age_29_56_correct_g2+=1
elif df_np[i][1] == 2:
age_57_84_correct_g2 += 1
elif df_np[i][1] == 3:
age_85_116_correct_g2 += 1
#race groups
if df_np[i][3] == 0:
race1_correct_g2 += 1
elif df_np[i][3] == 1:
race2_correct_g2+=1
elif df_np[i][3] == 2:
race3_correct_g2 += 1
elif df_np[i][3] == 3:
race4_correct_g2 += 1
elif df_np[i][2] != df_np[i][4]:
#male
if df_np[i][2] == 0:
# age groups
if df_np[i][1] == 0:
age_0_28_incorrect_g1 += 1
elif df_np[i][1] == 1:
age_29_56_incorrect_g1+=1
elif df_np[i][1] == 2:
age_57_84_incorrect_g1 += 1
elif df_np[i][1] == 3:
age_85_116_incorrect_g1 += 1
#race groups
if df_np[i][3] == 0:
race1_incorrect_g1 += 1
elif df_np[i][3] == 1:
race2_incorrect_g1+=1
elif df_np[i][3] == 2:
race3_incorrect_g1 += 1
elif df_np[i][3] == 3:
race4_incorrect_g1 += 1
#female
elif df_np[i][2] == 1:
# age groups
if df_np[i][1] == 0:
age_0_28_incorrect_g2 += 1
elif df_np[i][1] == 1:
age_29_56_incorrect_g2+=1
elif df_np[i][1] == 2:
age_57_84_incorrect_g2 += 1
elif df_np[i][1] == 3:
age_85_116_incorrect_g2 += 1
#race groups
if df_np[i][3] == 0:
race1_incorrect_g2 += 1
elif df_np[i][3] == 1:
race2_incorrect_g2+=1
elif df_np[i][3] == 2:
race3_incorrect_g2 += 1
elif df_np[i][3] == 3:
race4_incorrect_g2 += 1
print("DoB")
#gender 1
race1_accuracy_g1 = (race1_correct_g1/(race1_correct_g1+race1_incorrect_g1))*100
race2_accuracy_g1 = (race2_correct_g1/(race2_correct_g1+race2_incorrect_g1))*100
race3_accuracy_g1 = (race3_correct_g1/(race3_correct_g1+race3_incorrect_g1))*100
race4_accuracy_g1 = (race4_correct_g1/(race4_correct_g1+race4_incorrect_g1))*100
print("race1_accuracy_g1:",race1_accuracy_g1)
print("race2_accuracy_g1:",race2_accuracy_g1)
print("race3_accuracy_g1:",race3_accuracy_g1)
print("race4_accuracy_g1:",race4_accuracy_g1)
age_0_28_accuracy_g1 = (age_0_28_correct_g1/(age_0_28_correct_g1+age_0_28_incorrect_g1))*100
age_29_56_accuracy_g1 = (age_29_56_correct_g1/(age_29_56_correct_g1+age_29_56_incorrect_g1))*100
age_57_84_accuracy_g1 = (age_57_84_correct_g1/(age_57_84_correct_g1+age_57_84_incorrect_g1))*100
age_85_116_accuracy_g1 = (age_85_116_correct_g1/(age_85_116_correct_g1+age_85_116_incorrect_g1))*100
print("age_0_28_accuracy_g1:",age_0_28_accuracy_g1)
print("age_29_56_accuracy_g1:",age_29_56_accuracy_g1)
print("age_57_84_accuracy_g1:",age_57_84_accuracy_g1)
print("age_85_116_accuracy_g1:",age_85_116_accuracy_g1)
#gender2
race1_accuracy_g2 = (race1_correct_g2/(race1_correct_g2+race1_incorrect_g2))*100
race2_accuracy_g2 = (race2_correct_g2/(race2_correct_g2+race2_incorrect_g2))*100
race3_accuracy_g2 = (race3_correct_g2/(race3_correct_g2+race3_incorrect_g2))*100
race4_accuracy_g2 = (race4_correct_g2/(race4_correct_g2+race4_incorrect_g2))*100
print("race1_accuracy_g2:",race1_accuracy_g2)
print("race2_accuracy_g2:",race2_accuracy_g2)
print("race3_accuracy_g2:",race3_accuracy_g2)
print("race4_accuracy_g2:",race4_accuracy_g2)
age_0_28_accuracy_g2 = (age_0_28_correct_g2/(age_0_28_correct_g2+age_0_28_incorrect_g2))*100
age_29_56_accuracy_g2 = (age_29_56_correct_g2/(age_29_56_correct_g2+age_29_56_incorrect_g2))*100
age_57_84_accuracy_g2 = (age_57_84_correct_g2/(age_57_84_correct_g2+age_57_84_incorrect_g2))*100
age_85_116_accuracy_g2 = (age_85_116_correct_g2/(age_85_116_correct_g2+age_85_116_incorrect_g2))*100
print("age_0_28_accuracy_g2:",age_0_28_accuracy_g2)
print("age_29_56_accuracy_g2:",age_29_56_accuracy_g2)
print("age_57_84_accuracy_g2:",age_57_84_accuracy_g2)
print("age_85_116_accuracy_g2:",age_85_116_accuracy_g2)
print("DoB across race")
dob_across_race1 = np.std(np.array([race1_accuracy_g1,race1_accuracy_g2]))
dob_across_race2 = np.std(np.array([race2_accuracy_g1,race2_accuracy_g2]))
dob_across_race3 = np.std(np.array([race3_accuracy_g1,race3_accuracy_g2]))
dob_across_race4 = np.std(np.array([race4_accuracy_g1,race4_accuracy_g2]))
dob_across_race_overall = (dob_across_race1+dob_across_race2+dob_across_race3+dob_across_race4)/4
dob_across_race_overall
print("dob_across_race_overall:",dob_across_race_overall)
print("DoB across age")
dob_across_age_0_28 = np.std(np.array([age_0_28_accuracy_g1,age_0_28_accuracy_g2]))
dob_across_age_29_56 = np.std(np.array([age_29_56_accuracy_g1,age_29_56_accuracy_g2]))
dob_across_age_57_84 = np.std(np.array([age_57_84_accuracy_g1,age_57_84_accuracy_g2]))
dob_across_age_85_116 = np.std(np.array([age_85_116_accuracy_g1,age_85_116_accuracy_g2]))
dob_across_age_overall = (dob_across_age_0_28+dob_across_age_29_56+dob_across_age_57_84+dob_across_age_85_116)/4
print("dob_across_age_overall:",dob_across_age_overall)
return dob_across_race1,dob_across_race2,dob_across_race3,dob_across_race4,dob_across_age_0_28,dob_across_age_29_56,dob_across_age_57_84,dob_across_age_85_116,dob_across_race_overall,dob_across_age_overall
# In[200]:
print("cross entropy loss")
filename1 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q1/categorical_cross_entropy/test_gender_across_race_age_y_test_pred2_optimizer2_45.csv"
dob_across_race1,dob_across_race2,dob_across_race3,dob_across_race4,dob_across_age_0_28,dob_across_age_29_56,dob_across_age_57_84,dob_across_age_85_116,dob_across_race_overall,dob_across_age_overall = check_bias_by_counting(filename1)
print("\nfocal loss")
filename2 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q1/focal_loss/test_gender_across_race_age_y_test_pred2_optimizer2_45_focal_loss.csv"
dob_across_race1,dob_across_race2,dob_across_race3,dob_across_race4,dob_across_age_0_28,dob_across_age_29_56,dob_across_age_57_84,dob_across_age_85_116,dob_across_race_overall,dob_across_age_overall = check_bias_by_counting(filename2)
print("\nLinearsvm")
filename3 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q1/svm/test_gender_across_race_age_y_test_pred2_optimizer2_svm.csv"
dob_across_race1,dob_across_race2,dob_across_race3,dob_across_race4,dob_across_age_0_28,dob_across_age_29_56,dob_across_age_57_84,dob_across_age_85_116,dob_across_race_overall,dob_across_age_overall = check_bias_by_counting(filename3)
| 33.16746
| 235
| 0.685304
| 5,083
| 34,859
| 4.336612
| 0.084399
| 0.017466
| 0.02536
| 0.021095
| 0.694642
| 0.612802
| 0.579867
| 0.572744
| 0.55047
| 0.493445
| 0
| 0.053156
| 0.200207
| 34,859
| 1,050
| 236
| 33.199048
| 0.737482
| 0.016495
| 0
| 0.437107
| 0
| 0
| 0.085928
| 0.044191
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033019
| false
| 0
| 0.033019
| 0
| 0.08805
| 0.125786
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72045094280bf8b19ef8956f47fe38ea87d738b3
| 1,027
|
py
|
Python
|
notebooks/general.py
|
transientlunatic/grasshopper
|
1d3822427970d200341ff9d2823949fb4b27e001
|
[
"0BSD"
] | 3
|
2020-09-26T01:27:13.000Z
|
2020-09-30T05:47:42.000Z
|
notebooks/general.py
|
transientlunatic/gravpy
|
1d3822427970d200341ff9d2823949fb4b27e001
|
[
"0BSD"
] | null | null | null |
notebooks/general.py
|
transientlunatic/gravpy
|
1d3822427970d200341ff9d2823949fb4b27e001
|
[
"0BSD"
] | null | null | null |
import numpy as np
import astropy.units as u
def snr(signal, detector):
"""
Calculate the SNR of a signal in a given detector,
assuming that it has been detected with an optimal filter.
See e.g. arxiv.org/abs/1408.0740
Parameters
----------
signal : Source
A Source object which describes the source producing the
signal, e.g. a CBC.
detector : Detector
A Detector object describing the instrument making the observation
e.g. aLIGO.
Returns
-------
SNR : float
The signal-to-noise ratio of the signal in the detector.
"""
if signal.ncycles():
ncycles = np.sqrt(2*signal.ncycles(detector.frequencies))
else:
ncycles = 1
noise = detector.psd(detector.frequencies)
ampli = signal.raw_strain(detector.frequencies) * ncycles
fraction = 4*(np.abs(ampli)**2 / noise)
fraction[np.isnan(fraction)]=0
return np.sqrt(np.trapz(fraction, x=detector.frequencies, dx=0.01*u.hertz))
| 30.205882
| 79
| 0.635833
| 138
| 1,027
| 4.724638
| 0.528986
| 0.116564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021108
| 0.261928
| 1,027
| 33
| 80
| 31.121212
| 0.83905
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72082ffdc0eb8ab81095d7d094328792a40cbcea
| 6,898
|
py
|
Python
|
dlfairness/original_code/FairALM/Experiments-CelebA/results/quantitative_results/plot_celeba.py
|
lin-tan/fairness-variance
|
7f6aee23160707ffe78f429e5d960022ea1c9fe4
|
[
"BSD-3-Clause"
] | null | null | null |
dlfairness/original_code/FairALM/Experiments-CelebA/results/quantitative_results/plot_celeba.py
|
lin-tan/fairness-variance
|
7f6aee23160707ffe78f429e5d960022ea1c9fe4
|
[
"BSD-3-Clause"
] | null | null | null |
dlfairness/original_code/FairALM/Experiments-CelebA/results/quantitative_results/plot_celeba.py
|
lin-tan/fairness-variance
|
7f6aee23160707ffe78f429e5d960022ea1c9fe4
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Script to plot the accuracy and the fairness measures for different algorithms
from the log files
'''
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
import os
print(os.getcwd())
import numpy as np
plt.style.use('ggplot')
def create_acc_lists(filepath):
train_acc = []
train_ddp = []
train_deo = []
valid_acc = []
valid_ddp = []
valid_deo = []
with open(filepath) as fp:
line = fp.readline()
cnt = 1
while line:
#if 'Epoch: 040/100' in line:
# break
if 'Train Acc' in line:
line = line.strip()
linesegs = line.split(' | ')
train_acc.append(float(linesegs[1].split(': ')[1].strip('%')))
train_ddp.append(float(linesegs[2].split(': ')[1].strip('%')))
train_deo.append(float(linesegs[3].split(': ')[1].strip('%')))
elif 'Valid Acc' in line:
line = line.strip()
linesegs = line.split(' | ')
valid_acc.append(float(linesegs[1].split(': ')[1].strip('%')))
valid_ddp.append(float(linesegs[2].split(': ')[1].strip('%')))
valid_deo.append(float(linesegs[3].split(': ')[1].strip('%')))
line = fp.readline()
cnt += 1
return train_acc, train_ddp, train_deo, valid_acc, valid_ddp, valid_deo
def color(R, G, B):
return (float(R)/255, float(G)/255, float(B)/255)
def BLUE():
return color(0, 77, 128)
def RED():
return color(181, 23, 0)
def make_plot_helper(arr, legends, xlabel, ylabel, outname):
epoch_list = np.arange(1, arr.shape[1] + 1)
fig, axs = plt.subplots(1, 1, figsize=(5,4), sharey=False)
fig.patch.set_visible(False)
axs.set_facecolor(color(240, 240, 240))
axs.tick_params(axis='x', colors='black')
axs.tick_params(axis='y', colors='black')
axs.xaxis.label.set_color('black')
axs.yaxis.label.set_color('black')
axs.set_ylim([0, arr.max() + 15])
#plt.gca().set_color_cycle(['red', 'blue', 'green', 'yellow'])
colors=[RED(), BLUE()]
for value, legend, c in zip(arr, legends, colors):
plt.plot(epoch_list, value, label=legend, color=c)
axs.set_xlabel(xlabel, fontweight='bold')
axs.set_ylabel(ylabel, fontweight='bold')
title = ylabel.replace("%", "").upper()
#plt.title(title, fontweight='bold')#, x=0.7, y=0.1)
leg = axs.legend(loc='upper right', frameon=False)
for line in leg.get_lines():
line.set_linewidth(4.0)
fig.tight_layout()
outname.replace('$', '_')
fig.savefig(outname, bbox_inches='tight')
print('Plotted ' + outname)
def make_plot(list1, list2, legend1, legend2, plot_type, suffix=None):
arr1 = np.array(list1)
arr2 = np.array(list2)
legend = [legend1, legend2]
arr = np.array([arr1, arr2])
xlabel = 'Epochs'
if plot_type == 'acc':
arr = 100 - arr
ylabel = 'Error %' if plot_type == 'acc' else 'DEO'
legend1 = '_'.join(legend1.split(' '))
legend2 = '_'.join(legend2.split(' '))
#pdb.set_trace()
if 'penalty' in legend2:
legend2 = 'l2_penalty'
if 'penalty' in legend1:
legend1 = 'l2_penalty'
outname = '_'.join([legend1, legend2, plot_type])
if suffix is not None:
outname += '_' + suffix
make_plot_helper(arr, legend, xlabel, ylabel, outname)
def gen_main_plots():
# Used in the main paper for generating plots
file_name = 'no_1p_lr0p01.txt'
_, _, _, no_acc, _, no_deo = create_acc_lists(file_name)
file_name = 'with_1p_fairalm_eta60_inner5_lr0p01.txt'
_, _, _, fair_acc, _, fair_deo = create_acc_lists(file_name)
file_name = 'with_1e_L2_PENALTY_eta0p01_lr0p01.txt'
_, _, _, l2_acc, _, l2_deo = create_acc_lists(file_name)
MEDIUM_SIZE = 12
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
make_plot(no_acc, fair_acc, 'Unconstrained', 'FairALM', 'acc')
make_plot(no_deo, fair_deo, 'Unconstrained', 'FairALM', 'deo')
make_plot(no_acc, l2_acc, 'Unconstrained', '$\ell_2$ penalty', 'acc')
make_plot(no_deo, l2_deo, 'Unconstrained', '$\ell_2$ penalty', 'deo')
def gen_fair_alm_plots(no_filename, fair_alm_filename, suffix):
_, _, _, no_acc, _, no_deo = create_acc_lists(no_filename)
_, _, _, fair_acc, _, fair_deo = create_acc_lists(fair_alm_filename)
make_plot(no_acc, fair_acc, 'Unconstrained', 'FairALM', 'acc', suffix)
make_plot(no_deo, fair_deo, 'Unconstrained', 'FairALM', 'deo', suffix)
def gen_l2_plots(no_filename, l2_filename, suffix):
_, _, _, no_acc, _, no_deo = create_acc_lists(no_filename)
_, _, _, l2_acc, _, l2_deo = create_acc_lists(l2_filename)
make_plot(no_acc, l2_acc, 'Unconstrained', "$\ell_2$ penalty", 'acc', suffix)
make_plot(no_deo, l2_deo, 'Unconstrained', "$\ell_2$ penalty", 'deo', suffix)
def gen_l2_fair_alm_plots(l2_filename, fair_alm_filename, suffix):
_, _, _, l2_acc, _, l2_deo = create_acc_lists(l2_filename)
_, _, _, fair_acc, _, fair_deo = create_acc_lists(fair_alm_filename)
make_plot(l2_acc, fair_acc, "$\ell_2$ penalty", 'FairALM', 'acc', suffix)
make_plot(l2_deo, fair_deo, "$\ell_2$ penalty", 'FairALM', 'deo', suffix)
def gen_all_plots():
MEDIUM_SIZE = 14
BIGGER_SIZE = 16
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE)
file_name = 'no_1p_lr0p01.txt'
fair_alm_filenames = {'eta60': 'FAIR_ALM_eta60_inner5_lr0p01.txt',
'eta40': 'FAIR_ALM_eta40_inner5_lr0p01.txt',
'eta45': 'FAIR_ALM_eta45_lr0p01.txt',
'eta50': 'FAIR_ALM_eta50_lr0p01.txt',
'eta80': 'FAIR_ALM_eta80_inner5_lr0p01.txt',
'eta20': 'FAIR_ALM_eta20_inner5_lr0p01.txt'}
l2_filenames = {'eta0p01': 'L2_PENALTY_eta0p01_lr0p01.txt',
'eta0p001': 'L2_PENALTY_eta0p001_lr0p01.txt',
'eta0p1': 'L2_PENALTY_eta0p1_lr0p01.txt',
'eta1': 'L2_PENALTY_eta1_lr0p01.txt'}
for eta, name in fair_alm_filenames.items():
gen_fair_alm_plots(file_name, name, eta)
for eta, name in l2_filenames.items():
gen_l2_plots(file_name, name, eta)
for l2_eta, l2_name in l2_filenames.items():
for alm_eta, alm_name in fair_alm_filenames.items():
gen_l2_fair_alm_plots(l2_name, alm_name, l2_eta+'_'+alm_eta)
if __name__ == "__main__":
gen_all_plots()
| 36.691489
| 81
| 0.621919
| 938
| 6,898
| 4.259062
| 0.216418
| 0.029787
| 0.035044
| 0.038298
| 0.471089
| 0.39975
| 0.363955
| 0.338423
| 0.287359
| 0.193242
| 0
| 0.044768
| 0.232531
| 6,898
| 187
| 82
| 36.887701
| 0.70986
| 0.065671
| 0
| 0.144928
| 0
| 0
| 0.146216
| 0.057147
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07971
| false
| 0
| 0.028986
| 0.021739
| 0.137681
| 0.014493
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72085eb6f35c638ad1743b5ae7bd6a8de18fc6f3
| 682
|
py
|
Python
|
conqueror/scraper/base_yandex.py
|
piotrmaslanka/yandex-conqueror
|
cd0b50a43e25551f91150e0bee4f9cd307e4adce
|
[
"MIT"
] | 12
|
2022-03-01T22:45:05.000Z
|
2022-03-16T05:46:24.000Z
|
conqueror/scraper/base_yandex.py
|
piotrmaslanka/yandex-conqueror
|
cd0b50a43e25551f91150e0bee4f9cd307e4adce
|
[
"MIT"
] | 1
|
2022-03-02T10:18:05.000Z
|
2022-03-02T11:03:52.000Z
|
conqueror/scraper/base_yandex.py
|
piotrmaslanka/yandex-conqueror
|
cd0b50a43e25551f91150e0bee4f9cd307e4adce
|
[
"MIT"
] | 1
|
2022-03-02T10:18:35.000Z
|
2022-03-02T10:18:35.000Z
|
import requests
from satella.coding.decorators import retry
@retry(3, exc_classes=requests.RequestException)
def get_yandex_request(url, arguments) -> dict:
"""
Return a JSON object querying Yandex at provided parameters.
Handling CSRF will be done automatically.
:param url: URL to ask
:param arguments: dictionary of arguments to add
:return: object returned via endpoint
"""
resp = requests.get(url, params=arguments)
resp.raise_for_status()
data = resp.json()
if list(data.keys()) == ['csrfToken']:
arguments['csrfToken'] = data['csrfToken']
return get_yandex_request(url, arguments)
else:
return data
| 28.416667
| 64
| 0.692082
| 84
| 682
| 5.535714
| 0.619048
| 0.03871
| 0.068817
| 0.08172
| 0.12043
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001859
| 0.211144
| 682
| 23
| 65
| 29.652174
| 0.862454
| 0.313783
| 0
| 0
| 0
| 0
| 0.061927
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72097fdf43f5937088d329748fec0dc61447255f
| 6,142
|
py
|
Python
|
engine/azbatchengine.py
|
asedighi/azure_realtime_batch
|
c2cf4c8edc2bbded8377842fcad6370fd35af44e
|
[
"MIT"
] | 3
|
2020-05-08T16:20:07.000Z
|
2021-10-06T11:16:10.000Z
|
engine/azbatchengine.py
|
asedighi/azure_realtime_batch
|
c2cf4c8edc2bbded8377842fcad6370fd35af44e
|
[
"MIT"
] | null | null | null |
engine/azbatchengine.py
|
asedighi/azure_realtime_batch
|
c2cf4c8edc2bbded8377842fcad6370fd35af44e
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# @author: asedighi
import asyncio
import sys
sys.path.append('.')
sys.path.append('..')
sys.path.append('/mnt/resource/batch/tasks/shared/')
sys.path.append('/mnt/resource/batch/tasks/shared/engine')
sys.path.append('/mnt/resource/batch/tasks/shared/batchwrapper')
sys.path.append('/mnt/resource/batch/tasks/shared/tasks')
from batchwrapper.config import getRandomizer
from batchwrapper.config import AzureCredentials
from batchwrapper.config import ReadConfig
from batchwrapper.config import TaskConfig
from batchwrapper.config import find_file_path
import argparse
import ntpath
from engine.taskengine import task_loop
from subprocess import *
from azure.storage.blob import BlobServiceClient
from azure.servicebus import ServiceBusClient
import os
class AzureBatchEngine():
def __init__(self):
os.chdir('/mnt/resource/batch/tasks/shared/engine')
configuration = AzureCredentials()
self.storage_string = configuration.getStorageConnectionString()
self.servicebus_string = configuration.get_service_bus_connection_string()
self.blob_service_client = BlobServiceClient.from_connection_string(self.storage_string)
self.service_bus_client = ServiceBusClient.from_connection_string(self.servicebus_string)
task = TaskConfig()
self.container_name = task.getOutputContainer()
paged_cont = self.blob_service_client.list_containers(name_starts_with=self.container_name)
counter = 0
for i in paged_cont:
counter += 1
if counter == 0:
self.blob_container_client = self.blob_service_client.create_container(self.container_name)
print("\tCreated {}... ".format(self.container_name))
else:
self.blob_container_client = self.blob_service_client.get_container_client(self.container_name)
print("\tContainer {} exists already... ".format(self.container_name))
print("Output Container to be used is: {}... ".format(self.container_name))
self.file_list_to_upload = list()
self.result_to_upload = ''
def getOutputContainer(self):
return self.container_name
def readJsonConfigFile(self, name=''):
if name == '':
return
return ReadConfig(name)
def java_runner(self, args) -> list:
#print("argumet is of type in java runner", type(args))
#print("argumet is ", args)
os.chdir('/mnt/resource/batch/tasks/shared/tasks')
process = Popen(args, stdout=PIPE, stderr=PIPE)
ret = []
while process.poll() is None:
line = process.stdout.readline()
if line != b'' and len(line) > 0 and line.endswith(b'\n'):
ret.append(line[:-1].decode('utf-8'))
stdout, stderr = process.communicate()
ret += stdout.split(b'\n')
if stderr != b'':
ret += stderr.split(b'\n')
ret.remove(b'')
return ret
def do(self):
#in_data = ' '.join(args[1:])
#in_data = args[1:]
#print("setting arguments to: ", in_data)
#task_command = (args[0], in_data)
task_loop(self, "../tasks")
#self.uploadResultData()
self.uploadFiles()
def do_action(self, *args):
pass
def addFileToUpload(self, file_name=''):
#/mnt/batch/tasks/workitems/<job id>/job-<#>/<task id>/wd
#/mnt/batch/tasks/shared
name = find_file_path(file_name, "../")
print("Found file to upload: {}".format(name))
if name != '':
self.file_list_to_upload.extend([name])
print("Will upload: {}".format(self.file_list_to_upload))
def dataToUpload(self, data: str =''):
if data != '':
self.result_to_upload = data
self.uploadResultData()
def uploadResultData(self):
##print("the current working directory for uploading results is: {}".format(os.getcwd()))
filen = "result_" + getRandomizer() + ".txt"
if self.result_to_upload != '':
text_file = open(filen, "w")
n = text_file.write(self.result_to_upload)
text_file.close()
self.addFileToUpload(filen)
def uploadFiles(self):
for output_file in self.file_list_to_upload:
print('Uploading file {} to container [{}]...'.format(output_file, self.container_name))
self.blob_client = self.blob_service_client.get_blob_client(container=self.container_name, blob=ntpath.basename(output_file))
# Upload the created file
with open(output_file, "rb") as data:
self.blob_client.upload_blob(data)
self.file_list_to_upload.remove(output_file)
if __name__ == '__main__':
print("Starting engine ...")
#all_input = sys.argv[1:];
#data_input = ' '.join(all_input[1:])
#foo = (all_input[0], data_input)
#print(foo)
#exit(1)
engine = AzureBatchEngine()
engine.do()
| 29.38756
| 137
| 0.667209
| 765
| 6,142
| 5.206536
| 0.309804
| 0.032639
| 0.042681
| 0.031634
| 0.139844
| 0.12478
| 0.079337
| 0.062265
| 0
| 0
| 0
| 0.00272
| 0.221915
| 6,142
| 208
| 138
| 29.528846
| 0.830718
| 0.26506
| 0
| 0
| 0
| 0
| 0.103402
| 0.051925
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104167
| false
| 0.010417
| 0.145833
| 0.010417
| 0.291667
| 0.072917
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
720a41d918f83d5bbf26dfd204b04b9dc1b4ac43
| 1,090
|
py
|
Python
|
j.py
|
chirag127/Language-Translator-Using-Tkinter-in-Python
|
c790a0672c770cf703559d99c74ad581643f4d2f
|
[
"MIT"
] | null | null | null |
j.py
|
chirag127/Language-Translator-Using-Tkinter-in-Python
|
c790a0672c770cf703559d99c74ad581643f4d2f
|
[
"MIT"
] | null | null | null |
j.py
|
chirag127/Language-Translator-Using-Tkinter-in-Python
|
c790a0672c770cf703559d99c74ad581643f4d2f
|
[
"MIT"
] | null | null | null |
import tkinter as tk
import sys
class PrintLogger(): # create file like object
def __init__(self, textbox): # pass reference to text widget
self.textbox = textbox # keep ref
def write(self, text):
self.textbox.insert(tk.END, text) # write text to textbox
# could also scroll to end of textbox here to make sure always visible
def flush(self): # needed for file like object
pass
if __name__ == '__main__':
while True:
try:
def do_something():
print('i did something')
# root.after(1000, do_something)
print("qiaulfskhdnliukf")
root = tk.Tk()
t = tk.Text()
t.pack()
# create instance of file like object
pl = PrintLogger(t)
# replace sys.stdout with our object
sys.stdout = pl
# now we can print to stdout or file
print('hello world')
print('hello world')
root.mainloop()
except:
print("exception")
| 24.772727
| 82
| 0.542202
| 127
| 1,090
| 4.543307
| 0.543307
| 0.041594
| 0.07279
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0059
| 0.377982
| 1,090
| 43
| 83
| 25.348837
| 0.845133
| 0.291743
| 0
| 0.08
| 0
| 0
| 0.091984
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0.04
| 0.08
| 0
| 0.28
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
720b01f5be1444386ad583c605e2465546f819c4
| 2,695
|
py
|
Python
|
byteweiser.py
|
urbanware-org/byteweiser
|
fc90d17b51ead44af53401dc9c8ca5f0efc5e72e
|
[
"MIT"
] | 3
|
2017-11-27T00:35:04.000Z
|
2017-12-13T22:41:31.000Z
|
byteweiser.py
|
urbanware-org/byteweiser
|
fc90d17b51ead44af53401dc9c8ca5f0efc5e72e
|
[
"MIT"
] | 1
|
2017-03-08T19:04:49.000Z
|
2017-03-08T19:04:49.000Z
|
byteweiser.py
|
urbanware-org/byteweiser
|
fc90d17b51ead44af53401dc9c8ca5f0efc5e72e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ============================================================================
# ByteWeiser - Byte comparison and replacement tool
# Main script
# Copyright (C) 2021 by Ralf Kilian
# Distributed under the MIT License (https://opensource.org/licenses/MIT)
#
# GitHub: https://github.com/urbanware-org/byteweiser
# GitLab: https://gitlab.com/urbanware-org/byteweiser
# ============================================================================
import os
import sys
def main():
from core import clap
from core import common
from core import main
try:
p = clap.Parser()
except Exception as e:
print("%s: error: %s" % (os.path.basename(sys.argv[0]), e))
sys.exit(1)
p.set_description("Compare two files and replace different bytes.")
p.set_epilog("Further information and usage examples can be found "
"inside the documentation file for this script.")
# Required arguments
p.add_avalue("-i", "--input-file", "source file where to read the data "
"from", "input_file", None, True)
p.add_avalue("-o", "--output-file", "destination file where to write "
"data into", "output_file", None, True)
# Optional arguments
p.add_avalue("-b", "--buffer-size", "buffer size in bytes", "buffer_size",
4096, False)
p.add_switch(None, "--no-hashes", "do not use file hash comparison",
"no_hash", True, False)
p.add_switch(None, "--no-progress", "do not display the process "
"percentage", "no_progress", True, False)
p.add_switch("-q", "--quiet", "disable output", "quiet", True, False)
p.add_switch("-s", "--simulate", "do not change the output file",
"simulate", True, False)
p.add_switch(None, "--version", "print the version number and exit", None,
True, False)
if len(sys.argv) == 1:
p.error("At least one required argument is missing.")
elif ("-h" in sys.argv) or ("--help" in sys.argv):
p.print_help()
sys.exit(0)
elif "--version" in sys.argv:
print(common.get_version())
sys.exit(0)
args = p.parse_args()
try:
hashes = not args.no_hash
progress = not args.no_progress
verbose = not args.quiet
byteweiser = main.ByteWeiser()
byteweiser.compare_and_replace(args.input_file, args.output_file,
args.buffer_size, args.simulate,
verbose, progress, hashes)
except Exception as e:
p.error(e)
if __name__ == "__main__":
main()
# EOF
| 34.551282
| 78
| 0.562152
| 330
| 2,695
| 4.490909
| 0.424242
| 0.021592
| 0.030364
| 0.050607
| 0.0722
| 0.046559
| 0
| 0
| 0
| 0
| 0
| 0.007493
| 0.257143
| 2,695
| 77
| 79
| 35
| 0.732767
| 0.18961
| 0
| 0.12
| 0
| 0
| 0.289862
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.1
| 0
| 0.12
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
720b83b3d481df1e875ae4b17eade77f3a7f0679
| 9,798
|
py
|
Python
|
scripts/st_dashboard.py
|
rsmith49/simple-budget-pld
|
1bee5a26f53aa4a5b0aab49ee4c158b5ecb7c743
|
[
"Apache-2.0"
] | 1
|
2022-01-01T14:44:40.000Z
|
2022-01-01T14:44:40.000Z
|
scripts/st_dashboard.py
|
rsmith49/simple-budget-pld
|
1bee5a26f53aa4a5b0aab49ee4c158b5ecb7c743
|
[
"Apache-2.0"
] | null | null | null |
scripts/st_dashboard.py
|
rsmith49/simple-budget-pld
|
1bee5a26f53aa4a5b0aab49ee4c158b5ecb7c743
|
[
"Apache-2.0"
] | null | null | null |
import altair as alt
import os
import pandas as pd
import streamlit as st
import sys
from datetime import datetime
from dateutil.relativedelta import relativedelta
from dotenv import load_dotenv
from plaid.api_client import ApiClient
from plaid.exceptions import ApiException
from pathlib import Path
from traceback import format_exc
from urllib.error import URLError
sys.path.append(os.getcwd())
load_dotenv()
from src.budget import Budget
from src.transactions import get_transactions_df
from src.user_modifications import transform_pipeline
from src.views import top_vendors
EXISTING_TRANSACTIONS_FILE = f"{Path.home()}/.ry-n-shres-budget-app/all_transactions.csv"
TRANSACTION_GRACE_BUFFER = relativedelta(days=10) # How far before latest transaction to pull from
@st.cache(
hash_funcs={ApiClient: lambda *args, **kwargs: 0}
)
def get_transaction_data():
try:
existing_df = pd.read_csv(EXISTING_TRANSACTIONS_FILE)
existing_df['date'] = existing_df['date'].astype(str)
except FileNotFoundError:
existing_df = None
# Get Plaid output
now = datetime.now().strftime('%Y-%m-%d')
if existing_df is not None:
latest_date = existing_df['date'].max()
start_date = (datetime.strptime(latest_date, '%Y-%m-%d') - TRANSACTION_GRACE_BUFFER).strftime('%Y-%m-%d')
latest_transactions_df = get_transactions_df(start_date, now)
latest_transactions_df['date'] = latest_transactions_df['date'].astype(str)
all_transactions_df = pd.concat([
existing_df[existing_df['date'] < start_date],
latest_transactions_df
])
else:
all_transactions_df = get_transactions_df(
'2016-01-01',
now
)
os.makedirs(EXISTING_TRANSACTIONS_FILE[:EXISTING_TRANSACTIONS_FILE.rfind("/")], exist_ok=True)
all_transactions_df.to_csv(EXISTING_TRANSACTIONS_FILE, index=False)
# Fix for Streamlit Cache issues
all_transactions_df = all_transactions_df.drop(
['payment_meta', 'location'],
axis=1
)
all_transactions_df['category'] = all_transactions_df['category'].astype(str)
return all_transactions_df
def write_df(df: pd.DataFrame):
"""Helper function to st.write a DF with amount stylized to dollars"""
st.dataframe(
df.style.format({
col_name: "{:,.2f}"
for col_name in ["amount", "Total Spent"]
})
)
# TODO: Make non-budgeted columns show up on bar chart, just without ticks
# TODO: Make all-time a budget period option (figure out what to do about this - maybe it only shows up for one month?)
# TODO: Allow you to set custom start date for your budget period (i.e. make your monthly spending start on the 3rd)
# TODO: Fix the duplicate charge issue with pending charges
def single_inc_spending_summary(df: pd.DataFrame, date_inc_key: str, curr_date: str, is_current: bool = False) -> None:
"""Creates display for a single date increment
Parameters
----------
df
Transactions Dataframe
date_inc_key
The key for date increment (one of week, month, year)
curr_date
The selected date increment value
is_current
Whether the date represents the most recent date increment
"""
budget = Budget(df)
curr_df = df[df[date_inc_key] == curr_date]
total_spending_str = f"{curr_df['amount'].sum():,.2f}"
if budget.budget_plan:
show_budget = st.checkbox("Budget View", value=True)
total_budget = budget.total_limit(date_inc_key)
if budget.budget_plan and show_budget:
metric_col1, metric_col2 = st.columns(2)
with metric_col1:
st.metric(f"Total Spending", total_spending_str)
with metric_col2:
st.metric(f"Total Budget", f"{total_budget:,.2f}")
simple_summary = budget.simple_summary(date_inc_key, curr_date)
bar = alt.Chart(simple_summary).mark_bar().encode(
y="category",
x="spent",
tooltip=alt.Tooltip(field="spent", aggregate="sum", type="quantitative"),
).properties(
height=alt.Step(60)
)
ticks = alt.Chart(simple_summary).mark_tick(
color="red",
thickness=3,
size=60 * 0.9,
).encode(
y="category",
x="total_budget",
tooltip=alt.Tooltip(field="total_budget", aggregate="sum", type="quantitative")
)
if is_current:
ticks += alt.Chart(simple_summary).mark_tick(
color="white",
thickness=2,
size=60 * 0.9,
).encode(
y="category",
x="projected_budget",
)
st.altair_chart(bar + ticks, use_container_width=True)
else:
st.metric(f"Total Spending", total_spending_str)
chart = alt.Chart(curr_df).mark_bar().encode(
x=alt.X("sum(amount)", axis=alt.Axis(title='Spent')),
y=alt.Y("category_1", axis=alt.Axis(title="Category")),
tooltip=alt.Tooltip(field="amount", aggregate="sum", type="quantitative"),
).properties(
height=alt.Step(40),
)
st.altair_chart(chart, use_container_width=True)
with st.expander("Largest Transactions"):
write_df(
curr_df[["date", "amount", "name", "category_1", "category_2"]].sort_values(
by="amount",
ascending=False
)
)
def df_for_certain_categories(df: pd.DataFrame) -> pd.DataFrame:
"""Helper function to get a DF filtered by any user selected categories"""
categories = st.multiselect(
f"Select any categories to only see spending for",
options=sorted(df['category_1'].unique()),
default=[],
)
if len(categories) > 0:
bool_key = df['category_1'] == 'NOT_A CATEGORY'
for cat in categories:
bool_key = bool_key | (df['category_1'] == cat)
df = df[bool_key]
return df
def main():
try:
st.set_page_config(initial_sidebar_state="collapsed")
try:
df = get_transaction_data().copy()
except ApiException as e:
# TODO: Check e for if it is item expiration
st.write("Error accessing Plaid - using old transaction data for now")
st.error(f"{e}")
try:
df = pd.read_csv(EXISTING_TRANSACTIONS_FILE)
except FileNotFoundError:
st.write("Could not find existing transactions file - cannot run this app")
raise e
df = transform_pipeline(df)
# Organizing Page
st.write("# Budget Display")
date_inc = st.sidebar.selectbox(
f"Select the timespan (week, month, year) that you would like to use to view your spending by",
["Month", "Week", "Year"],
)
date_inc_key = date_inc.lower()
date_inc_label = date_inc[0].upper() + date_inc[1:]
categories_to_ignore = st.sidebar.multiselect(
"Any categories to ignore in calculations",
options=sorted(df["category_1"].unique()),
default=["Income"]
)
start_date = st.sidebar.select_slider(
f"Enter a Start Date for viewing your spending",
sorted(df["date"].unique())
)
end_date = st.sidebar.select_slider(
f"Enter an End Date to view your spending until",
sorted(df["date"].unique()),
value=df["date"].max()
)
if start_date is not None:
df = df[df['date'] >= start_date]
if end_date is not None:
df = df[df['date'] <= end_date]
# Preprocessing
if len(categories_to_ignore):
for category in categories_to_ignore:
df = df[df['category_1'] != category]
df['week'] = df['date'].apply(lambda x: datetime.strptime(x, '%Y-%m-%d').strftime('%Y-%V'))
if 'month' not in df:
df['month'] = df['date'].apply(lambda x: x[:7])
df['year'] = df['date'].apply(lambda x: x[:4])
# Data Viz
st.write(f"## Single {date_inc_label} in Spending")
available_date_incs = sorted(df[date_inc_key].unique(), reverse=True)
curr_date = st.selectbox(
f"Pick a {date_inc_label}",
options=available_date_incs,
format_func=lambda label: f"{label} ({df[df[date_inc_key] == label]['amount'].sum():,.2f})"
)
single_inc_spending_summary(
df,
date_inc_key,
curr_date,
is_current=curr_date == max(available_date_incs)
)
st.write(f"## {date_inc_label}ly Spending History")
history_df = df_for_certain_categories(df)
st.bar_chart(history_df.groupby(date_inc_key).sum("amount").sort_index(ascending=False))
st.write(f"## Most Expensive Single {date_inc} Categories")
write_df(top_vendors(df, groupby=[date_inc_key, 'category_1']))
st.write("## All Transactions")
write_df(df)
# TODO: Figure out how we want to show the various conflicting budget periods
# - Do we want the triple layered bar chart still? (spending / projected / limit)
# - Do we just want 2 views? How can we give category level info well
return
except URLError as e:
st.error(
"""
**This demo requires internet access.**
Connection error: %s
"""
% e.reason
)
except Exception as e:
st.error(f"""
Something Broke :(
Error: {e}
Traceback: {format_exc()}
""")
if __name__ == "__main__":
main()
| 33.101351
| 119
| 0.610431
| 1,244
| 9,798
| 4.618971
| 0.262862
| 0.024365
| 0.019144
| 0.008354
| 0.17386
| 0.110338
| 0.096763
| 0.060912
| 0
| 0
| 0
| 0.007199
| 0.276995
| 9,798
| 295
| 120
| 33.213559
| 0.803924
| 0.122576
| 0
| 0.091346
| 0
| 0.004808
| 0.168805
| 0.016275
| 0
| 0
| 0
| 0.00678
| 0
| 1
| 0.024038
| false
| 0
| 0.081731
| 0
| 0.120192
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
720ee96617fe84100cbf9c9517c56d368835bd2c
| 16,818
|
py
|
Python
|
scripts/devvnet_manager.py
|
spmckenney/Devv-Core
|
eb30ae3a092e3fe0f9f756f5f31bdce4f6215b98
|
[
"MIT"
] | null | null | null |
scripts/devvnet_manager.py
|
spmckenney/Devv-Core
|
eb30ae3a092e3fe0f9f756f5f31bdce4f6215b98
|
[
"MIT"
] | null | null | null |
scripts/devvnet_manager.py
|
spmckenney/Devv-Core
|
eb30ae3a092e3fe0f9f756f5f31bdce4f6215b98
|
[
"MIT"
] | null | null | null |
import yaml
import argparse
import sys
import os
import subprocess
import time
def get_devvnet(filename):
with open(filename, "r") as f:
buf = ''.join(f.readlines())
conf = yaml.load(buf, Loader=yaml.Loader)
# Set bind_port values
port = conf['devvnet']['base_port']
for a in conf['devvnet']['shards']:
for b in a['process']:
port = port + 1
b['bind_port'] = port
return(conf['devvnet'])
class Devvnet(object):
_base_port = 0
_password_file = ""
_working_dir = ""
_config_file = ""
_host = ""
_host_index_map = {}
def __init__(self, devvnet):
self._devvnet = devvnet
self._shards = []
self._host_index_map = devvnet['host_index_map']
try:
self._base_port = devvnet['base_port']
self._working_dir = devvnet['working_dir']
self._password_file = devvnet['password_file']
self._config_file = devvnet['config_file']
self._host = devvnet['host']
except KeyError:
pass
current_port = self._base_port
for i in self._devvnet['shards']:
print("Adding shard {}".format(i['shard_index']))
s = Shard(i, self._host_index_map, self._config_file, self._password_file)
current_port = s.initialize_bind_ports(current_port)
s.evaluate_hostname(self._host)
s.connect_shard_nodes()
self._shards.append(s)
for i,shard in enumerate(self._shards):
print("shard: "+ str(shard))
for i2,node in enumerate(shard.get_nodes()):
node.grill_raw_subs(shard.get_index())
for rsub in node.get_raw_subs():
print("Getting for shard/name/node_index {}/{}/{}".format(rsub.get_shard_index(), rsub._name, rsub._node_index))
n = self.get_shard(rsub.get_shard_index()).get_node(rsub._name, rsub._node_index)
node.add_subscriber(n.get_host(), n.get_port())
node.add_working_dir(self._working_dir)
def __str__(self):
s = "Devvnet\n"
s += "base_port : "+str(self._base_port)+"\n"
s += "working_dir : "+str(self._working_dir)+"\n"
for shard in self._shards:
s += str(shard)
return s
def get_shard(self, index):
return self._shards[index]
def get_shards(self):
return self._shards
def get_num_nodes(self):
count = 0
for shard in self._shards:
count += shard.get_num_nodes()
return count
class Shard(object):
_shard_index = 0;
_working_dir = ""
_shard = None
_nodes = []
_host = ""
def __init__(self, shard, host_index_map, config_file, password_file):
self._shard = shard
self._nodes = get_nodes(shard, host_index_map)
self._shard_index = self._shard['shard_index']
try:
self._host = self._shard['host']
except:
pass
try:
self._config_file = self._shard['config_file']
except:
self._config_file = config_file
try:
self._password_file = self._shard['password_file']
except:
self._password_file = password_file
try:
self._name = self._shard['t1']
self._type = "T1"
except:
try:
self._name = self._shard['t2']
self._type = 'T2'
except:
print("Error: Shard type neither Tier1 (t1) or Tier2 (t2)")
for n in self._nodes:
n.set_config_file(self._config_file)
n.set_password_file(self._password_file)
n.set_type(self._type)
#self._connect_shard_nodes()
def __str__(self):
s = "type: " + self._type + "\n"
s += "index: " + str(self._shard_index) + "\n"
for node in self._nodes:
s += " " + str(node) + "\n"
return s
def initialize_bind_ports(self, port_num):
current_port = port_num
for node in self._nodes:
node.set_port(current_port)
current_port = current_port + 1
return current_port
def connect_shard_nodes(self):
v_index = [i for i,x in enumerate(self._nodes) if x.is_validator()]
a_index = [i for i,x in enumerate(self._nodes) if x.is_announcer()]
r_index = [i for i,x in enumerate(self._nodes) if x.is_repeater()]
for i in v_index:
host = self._nodes[i].get_host()
port = self._nodes[i].get_port()
#print("setting port to {}".format(port))
for j in v_index:
if i == j:
continue
self._nodes[j].add_subscriber(host, port)
for k in a_index:
announcer = self._nodes[k]
if self._nodes[i].get_index() == announcer.get_index():
self._nodes[i].add_subscriber(announcer.get_host(), announcer.get_port())
break
for l in r_index:
#print(type(self._nodes[i].get_index()))
#if self._nodes[i].get_index() == self._nodes[l].get_index():
self._nodes[l].add_subscriber(host, port)
def evaluate_hostname(self, host):
if self._host == "":
self._host = host
for node in self._nodes:
node.set_host(node.get_host().replace("${node_index}", str(node.get_index())))
if node.get_host().find("format") > 0:
#print("formatting")
node.set_host(eval(node.get_host()))
node.evaluate_hostname(self._host)
def get_nodes(self):
return self._nodes
def get_num_nodes(self):
return len(self._nodes)
def get_node(self, name, index):
node = [x for x in self._nodes if (x.get_name() == name and x.get_index() == index)]
if len(node) == 0:
return None
if len(node) != 1:
raise("WOOP: identical nodes? ")
return node[0]
#node = [y for y in nodes if y.get_index() == index
#shard1_validators = [x for x in conf['devvnet']['shards'][1]['process'] if x['name'] == 'validator']
def get_index(self):
return self._shard_index
class RawSub():
def __init__(self, name, shard_index, node_index):
self._name = name
self._shard_index = shard_index
self._node_index = node_index
def __str__(self):
sub = "({}:{}:{})".format(self._name, self._shard_index, self._node_index)
return sub
def get_shard_index(self):
return self._shard_index
def substitute_node_index(self, node_index):
if self._node_index == "${node_index}":
self._node_index = int(node_index)
else:
print("WARNING: not subbing "+str(self._node_index)+" with "+str(node_index))
return
class Sub():
def __init__(self, host, port):
self._host = host
self._port = port
def __str__(self):
sub = "({}:{})".format(self.get_host(), str(self.get_port()))
return sub
def __eq__(self, other):
if self._host != other.get_host():
return False
if self._port != other.get_port():
return False
return True
def get_host(self):
return self._host
def set_host(self, hostname):
self._host = hostname
def get_port(self):
return self._port
def set_port(self, port):
self._port = port
class Node():
def __init__(self, shard_index, index, name, host, port = 0):
self._name = name
self._type = ""
self._shard_index = int(shard_index)
self._index = int(index)
self._host = host
self._bind_port = int(port)
self._subscriber_list = []
self._raw_sub_list = []
self._working_dir = ""
def __str__(self):
subs = "s["
for sub in self._subscriber_list:
subs += str(sub)
subs += "]"
rawsubs = "r["
for rawsub in self._raw_sub_list:
rawsubs += str(rawsub)
rawsubs += "]"
s = "node({}:{}:{}:{}:{}) {} {}".format(self._name, self._index, self._host, self._bind_port, self._working_dir, subs, rawsubs)
return s
def add_working_dir(self, directory):
wd = directory.replace("${name}", self._name)
wd = wd.replace("${shard_index}", str(self._shard_index))
wd = wd.replace("${node_index}", str(self.get_index()))
self._working_dir = wd
def is_validator(self):
return(self._name == "validator")
def is_announcer(self):
return(self._name == "announcer")
def is_repeater(self):
return(self._name == "repeater")
def add_subscriber(self, host, port):
self._subscriber_list.append(Sub(host,port))
def add_raw_sub(self, name, shard_index, node_index):
rs = RawSub(name,shard_index, node_index)
#print("adding rawsub: "+str(rs))
self._raw_sub_list.append(rs)
def evaluate_hostname(self, host):
for sub in self._subscriber_list:
sub.set_host(sub.get_host().replace("${node_index}", str(self.get_index())))
if sub.get_host().find("format") > 0:
print("formatting")
sub.set_host(eval(sub.get_host()))
def grill_raw_subs(self, shard_index):
for sub in self._raw_sub_list:
sub.substitute_node_index(self._index)
#d = subs.replace("${node_index}", str(self._index))
print("up "+str(sub))
def get_raw_subs(self):
return self._raw_sub_list
def get_type(self):
return self._type
def set_type(self, type):
self._type = type
def get_name(self):
return self._name
def get_shard_index(self):
return self._shard_index
def get_index(self):
return self._index
def get_host(self):
return self._host
def set_host(self, host):
self._host = host
def get_port(self):
return self._bind_port
def set_port(self, port):
self._bind_port = port
def get_config_file(self):
return self._config_file
def set_config_file(self, config):
self._config_file = config
def get_password_file(self):
return self._password_file
def set_password_file(self, file):
self._password_file = file
def get_subscriber_list(self):
return self._subscriber_list
def get_working_dir(self):
return self._working_dir
def set_working_dir(self, working_dir):
self._working_dir = working_dir
def get_nodes(yml_dict, host_index_map):
nodes = []
shard_index = yml_dict['shard_index']
try:
host_index_map = yml_dict['host_index_map']
print("Using shard's {} for shard {}".format(host_index_map, shard_index))
except:
print("Using devvnet host_index_map ({}) for shard {}".format(host_index_map, shard_index))
for proc in yml_dict['process']:
try:
print("creating {} {} processes".format(len(host_index_map), proc['name']))
for node_index in host_index_map:
node = Node(shard_index, node_index, proc['name'], host_index_map[node_index], proc['bind_port'])
try:
rawsubs = proc['subscribe']
for sub in proc['subscribe']:
try:
si = sub['shard_index']
except:
si = yml_dict['shard_index']
node.add_raw_sub(sub['name'], si, sub['node_index'])
except:
pass
nodes.append(node)
except:
nodes.append(Node(shard_index, ind, proc['name'], proc['host'], proc['bind_port']))
print("creating a "+proc['name']+" process")
return nodes
def run_validator(node):
# ./devcash --node-index 0 --config ../opt/basic_shard.conf --config ../opt/default_pass.conf --host-list tcp://localhost:56551 --host-list tcp://localhost:56552 --host-list tcp://localhost:57550 --bind-endpoint tcp://*:56550
cmd = []
cmd.append("./devcash")
cmd.extend(["--shard-index", str(node.get_shard_index())])
cmd.extend(["--node-index", str(node.get_index())])
cmd.extend(["--num-consensus-threads", "1"])
cmd.extend(["--num-validator-threads", "1"])
cmd.extend(["--config", node.get_config_file()])
cmd.extend(["--config", node.get_password_file()])
cmd.extend(["--bind-endpoint", "tcp://*:" + str(node.get_port())])
for sub in node.get_subscriber_list():
cmd.extend(["--host-list", "tcp://" + sub.get_host() + ":" + str(sub.get_port())])
return cmd
def run_announcer(node):
# ./announcer --node-index 0 --shard-index 1 --mode T2 --stop-file /tmp/stop-devcash-announcer.ctl --inn-keys ../opt/inn.key --node-keys ../opt/node.key --bind-endpoint 'tcp://*:50020' --working-dir ../../tmp/working/input/laminar4/ --key-pass password --separate-ops true
cmd = []
cmd.append("./pb_announcer")
cmd.extend(["--shard-index", str(node.get_shard_index())])
cmd.extend(["--node-index", str(node.get_index())])
cmd.extend(["--config", node.get_config_file()])
cmd.extend(["--config", node.get_password_file()])
cmd.extend(["--mode", node.get_type()])
cmd.extend(["--bind-endpoint", "tcp://*:" + str(node.get_port())])
cmd.extend(["--separate-ops", "true"])
cmd.extend(["--start-delay", str(30)])
cmd.extend(["--protobuf-endpoint", "tcp://*:" + str(node.get_port() + 100)])
return cmd
def run_repeater(node):
# ./repeater --node-index 0 --shard-index 1 --mode T2 --stop-file /tmp/stop-devcash-repeater.ctl --inn-keys ../opt/inn.key --node-keys ../opt/node.key --working-dir ../../tmp/working/output/repeater --host-list tcp://localhost:56550 --key-pass password
cmd = []
cmd.append("./repeater")
cmd.extend(["--shard-index", str(node.get_shard_index())])
cmd.extend(["--node-index", str(node.get_index())])
cmd.extend(["--num-consensus-threads", "1"])
cmd.extend(["--num-validator-threads", "1"])
cmd.extend(["--mode", node.get_type()])
cmd.extend(["--working-dir", node.get_working_dir()])
cmd.extend(["--protobuf-endpoint", "tcp://*:" + str(node.get_port() + 200)])
for sub in node.get_subscriber_list():
cmd.extend(["--host-list", "tcp://" + sub.get_host() + ":" + str(sub.get_port())])
return cmd
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Launch a devvnet.')
parser.add_argument('--logdir', action="store", dest='logdir', help='Directory to log output')
parser.add_argument('--start-processes', action="store_true", dest='start', default=True, help='Start the processes')
parser.add_argument('--hostname', action="store", dest='hostname', default=None, help='Debugging output')
parser.add_argument('--debug', action="store_true", dest='start', default=False, help='Debugging output')
parser.add_argument('devvnet', action="store", help='YAML file describing the devvnet')
args = parser.parse_args()
print(args)
print("logdir: " + args.logdir)
print("start: " + str(args.start))
print("hostname: " + str(args.hostname))
print("devvnet: " + args.devvnet)
devvnet = get_devvnet(args.devvnet)
d = Devvnet(devvnet)
num_nodes = d.get_num_nodes()
logfiles = []
cmds = []
for s in d.get_shards():
for n in s.get_nodes():
if args.hostname and (args.hostname != n.get_host()):
continue
if n.get_name() == 'validator':
cmds.append(run_validator(n))
elif n.get_name() == 'repeater':
cmds.append(run_repeater(n))
elif n.get_name() == 'announcer':
cmds.append(run_announcer(n))
logfiles.append(os.path.join(args.logdir,
n.get_name()+"_s"+
str(n.get_shard_index())+"_n"+
str(n.get_index())+"_output.log"))
ps = []
for index,cmd in enumerate(cmds):
print("Node " + str(index) + ":")
print(" Command: ", *cmd)
print(" Logfile: ", logfiles[index])
if args.start:
with open(logfiles[index], "w") as outfile:
ps.append(subprocess.Popen(cmd, stdout=outfile, stderr=outfile))
time.sleep(1.5)
if args.start:
for p in ps:
print("Waiting for nodes ... ctl-c to exit.")
p.wait()
print("Goodbye.")
| 33.171598
| 276
| 0.576347
| 2,131
| 16,818
| 4.278742
| 0.102299
| 0.044966
| 0.030708
| 0.011516
| 0.317833
| 0.232397
| 0.174929
| 0.149704
| 0.13753
| 0.12097
| 0
| 0.006098
| 0.278392
| 16,818
| 506
| 277
| 33.237154
| 0.745221
| 0.070401
| 0
| 0.26943
| 0
| 0
| 0.104802
| 0.007234
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147668
| false
| 0.041451
| 0.015544
| 0.056995
| 0.297927
| 0.054404
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72103568b2899de2bb48ee1f49834b293ab3bb81
| 5,896
|
py
|
Python
|
run_qasm.py
|
t-imamichi/qiskit-utility
|
2e71d0457bba0e6eb91daa9dbb32f52d87fe9f0b
|
[
"Apache-2.0"
] | 6
|
2019-02-27T11:53:18.000Z
|
2022-03-02T21:28:05.000Z
|
run_qasm.py
|
t-imamichi/qiskit-utility
|
2e71d0457bba0e6eb91daa9dbb32f52d87fe9f0b
|
[
"Apache-2.0"
] | null | null | null |
run_qasm.py
|
t-imamichi/qiskit-utility
|
2e71d0457bba0e6eb91daa9dbb32f52d87fe9f0b
|
[
"Apache-2.0"
] | 2
|
2019-05-03T23:52:03.000Z
|
2020-12-22T12:12:38.000Z
|
#!/usr/bin/env python
# coding: utf-8
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
'''
This tool submits a QASM file to any backend and show the result.
It requires 'Qconfig.py' to set a token of IBM Quantum Experience.
It supports the following backends:
ibmqx2(5 qubits), ibmqx4(5 qubits), ibmqx5(16 qubits), simulator(32 qubits).
see https://quantumexperience.ng.bluemix.net/qx/devices for more details of the backends.
Examples:
$ python run_qasm.py -b # show backend information
$ python run_qasm.py -c # show remaining credits
$ python run_qasm.py -l 10 # show job list (10 jobs)
$ python run_qasm.py -j (job id) # show the result of a job
$ python run_qasm.py -q (qasm file) # submit a qasm file
$ python run_qasm.py -z -l 10 # show job list (10 jobs) of qconsole
$ python run_qasm.py -z -d ibmq_20_tokyo -q (qasm file) # submit a qasm file to ibmq_20_tokyo
'''
import json
import time
from argparse import ArgumentParser
from IBMQuantumExperience import IBMQuantumExperience
try:
import Qconfig
except ImportError:
raise RuntimeError('You need "Qconfig.py" with a token in the same directory.')
def options():
parser = ArgumentParser()
parser.add_argument('-q', '--qasm', action='store', help='QASM file')
parser.add_argument('-d', '--device', action='store', default='sim',
help='choose a device to run the input (sim [default], qx2, qx4, qx5, hpc)')
parser.add_argument('-s', '--shots', action='store', default=1000, type=int,
help='Number of shots (default: 1000)')
parser.add_argument('-i', '--interval', action='store', default=2, type=int,
help='Interval time to poll a result (default: 2)')
parser.add_argument('-l', '--job-list', action='store', default=10, type=int,
help='Number of jobs to show')
parser.add_argument('-j', '--jobid', action='store', type=str, help='Get job information')
parser.add_argument('-z', '--qconsole', action='store_true', help='Use qconsole instead of QX')
parser.add_argument('-b', '--backends', action='store_true', help='Show backends information')
parser.add_argument('-m', '--disable-multishotopt', action='store_true', help='Disable multi-shot optimization')
parser.add_argument('-c', '--credits', action='store_true', help='Show my credits')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose')
args = parser.parse_args()
if args.verbose:
print('options:', args)
return args
class JobManager:
def __init__(self, qconsole=False):
site = 'qconsole' if qconsole else 'qx'
self._api = IBMQuantumExperience(Qconfig.APItoken[site], Qconfig.config[site])
@staticmethod
def read_asm(infilename):
with open(infilename) as infile:
return ''.join(infile.readlines())
def run_qasm(self, qasm, device='sim', shots=1000, verbose=True, interval=2, multishotopt=True):
qasms = [{'qasm': qasm}]
devices = {'sim': 'ibmq_qasm_simulator',
'qx2': 'ibmqx2', 'qx4': 'ibmqx4', 'qx5': 'ibmqx5'}
if device in devices:
dev = devices[device]
else:
dev = device
hpc = None
if dev == 'ibmq_qasm_simulator':
hpc = {'multishot_optimization': multishotopt, 'omp_num_threads': 1}
out = self._api.run_job(job=qasms, backend=dev, shots=shots, max_credits=5, hpc=hpc)
if 'error' in out:
print(out['error']['message'])
return None
jobid = out['id']
print('job id:', jobid)
results = self._api.get_job(jobid)
if verbose:
print(results['status'])
while results['status'] == 'RUNNING':
time.sleep(interval)
results = self._api.get_job(jobid)
if verbose:
print(results['status'])
return results
def get_job_list(self, n_jobs):
jobs = self._api.get_jobs(limit=n_jobs)
tab = {}
for v in jobs:
job_id = v['id']
status = v['status']
cdate = v['creationDate']
tab[cdate] = (status, job_id)
for cdate, v in sorted(tab.items()):
print('{}\t{}\t{}'.format(cdate, *v))
def get_job(self, job_id):
result = self._api.get_job(job_id)
print(json.dumps(result, sort_keys=True, indent=2))
def get_credits(self):
print('credits :', self._api.get_my_credits())
def available_backends(self, verbose=False):
tab = {}
for e in self._api.available_backends() + self._api.available_backend_simulators():
status = self._api.backend_status(e['name'])
try:
tab[e['name']] = [':', str(e['nQubits']) + ' qubits,', e['description'], status]
except KeyError:
tab[e['name']] = [':', status]
if verbose:
tab[e['name']].append(e)
for k, v in sorted(tab.items()):
print(k, *v)
def main():
args = options()
jm = JobManager(args.qconsole)
if args.backends:
jm.available_backends(args.verbose)
if args.credits:
jm.get_credits()
if args.qasm:
qasm = jm.read_asm(args.qasm)
interval = max(1, args.interval)
results = jm.run_qasm(qasm=qasm, device=args.device, shots=args.shots, interval=interval,
multishotopt=not args.disable_multishotopt)
print(json.dumps(results, indent=2, sort_keys=True))
elif args.jobid:
jm.get_job(args.jobid)
elif args.job_list > 0:
jm.get_job_list(args.job_list)
if __name__ == '__main__':
main()
| 39.046358
| 116
| 0.608887
| 770
| 5,896
| 4.54026
| 0.280519
| 0.028318
| 0.05349
| 0.030034
| 0.100687
| 0.067506
| 0.05492
| 0.029748
| 0.029748
| 0.029748
| 0
| 0.013643
| 0.254071
| 5,896
| 150
| 117
| 39.306667
| 0.781264
| 0.176221
| 0
| 0.100917
| 0
| 0
| 0.172998
| 0.009083
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082569
| false
| 0
| 0.055046
| 0
| 0.183486
| 0.091743
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7211ad9fb739bb9a8cf35bb0752773293df5ab6b
| 2,356
|
py
|
Python
|
api/teams/models.py
|
wepickheroes/wepickheroes.github.io
|
032c2a75ef058aaceb795ce552c52fbcc4cdbba3
|
[
"MIT"
] | 3
|
2018-02-15T20:04:23.000Z
|
2018-09-29T18:13:55.000Z
|
api/teams/models.py
|
wepickheroes/wepickheroes.github.io
|
032c2a75ef058aaceb795ce552c52fbcc4cdbba3
|
[
"MIT"
] | 5
|
2018-01-31T02:01:15.000Z
|
2018-05-11T04:07:32.000Z
|
api/teams/models.py
|
prattl/wepickheroes
|
032c2a75ef058aaceb795ce552c52fbcc4cdbba3
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from nucleus.models import (
AbstractBaseModel,
EmailRecord,
TeamMember,
)
User = get_user_model()
class Team(AbstractBaseModel):
name = models.CharField(max_length=255)
logo_url = models.CharField(max_length=255, null=True, blank=True)
players = models.ManyToManyField(User, through='nucleus.TeamMember', related_name='teams')
captain = models.ForeignKey(User, null=True, blank=True, related_name='teams_captain_of',
on_delete=models.SET_NULL)
creator = models.ForeignKey(User, null=True, blank=True, related_name='teams_created',
on_delete=models.SET_NULL)
def save(self, *args, **kwargs):
adding = self._state.adding
super().save(*args, **kwargs)
if adding:
if self.captain:
TeamMember.objects.create(team=self, player=self.captain)
elif self.creator:
TeamMember.objects.create(team=self, player=self.creator)
def __str__(self):
return self.name
INVITE_TEMPLATE = """Hello,
You've been invited to join a team on push.gg. Click the link below to sign up:
{signup_link}
- Push League
"""
class TeamInvite(AbstractBaseModel):
team = models.ForeignKey('teams.Team', on_delete=models.CASCADE)
player_email = models.EmailField()
player = models.ForeignKey(User, null=True, blank=True, on_delete=models.SET_NULL)
def save(self, *args, **kwargs):
try:
previous_self = TeamInvite.objects.get(pk=self.pk)
except TeamInvite.DoesNotExist:
previous_self = None
new_instance = not previous_self
super().save(*args, **kwargs)
if new_instance:
self.send_email()
def send_email(self):
subject = "You have been invited to a team on push.gg"
email_body = INVITE_TEMPLATE.format(
signup_link="",
)
self.player.email_user(
"You have been invited to a team on push.gg",
email_body,
)
EmailRecord.objects.create(
to=self.player_email,
from_address=settings.DEFAULT_FROM_EMAIL,
subject=subject,
text_content=email_body
)
| 29.45
| 94
| 0.639219
| 283
| 2,356
| 5.159011
| 0.34629
| 0.021918
| 0.035616
| 0.046575
| 0.358219
| 0.269178
| 0.269178
| 0.187671
| 0.187671
| 0.187671
| 0
| 0.003454
| 0.262733
| 2,356
| 79
| 95
| 29.822785
| 0.837075
| 0
| 0
| 0.1
| 0
| 0.016667
| 0.112054
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0.016667
| 0.316667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
721392272e51a8013f6d83d05f9c457dc8ce2f53
| 4,811
|
py
|
Python
|
print_results.py
|
MicImbriani/Keras-PRBX
|
ab9dd8196e6f184336f5b30715635670d3586136
|
[
"CC0-1.0"
] | 1
|
2021-09-18T12:42:28.000Z
|
2021-09-18T12:42:28.000Z
|
print_results.py
|
MicImbriani/SkinLesion-Segm-Classif-UNet-FocusNet-ResNet50
|
ab9dd8196e6f184336f5b30715635670d3586136
|
[
"CC0-1.0"
] | null | null | null |
print_results.py
|
MicImbriani/SkinLesion-Segm-Classif-UNet-FocusNet-ResNet50
|
ab9dd8196e6f184336f5b30715635670d3586136
|
[
"CC0-1.0"
] | null | null | null |
import numpy as np
from keras.optimizers import Adam, SGD
from tensorflow.keras.metrics import AUC
import metrics
from networks.unet_nn import unet
from networks.unet_res_se_nn import unet_res_se
from networks.focus import get_focusnetAlpha
from networks.resnet import get_res
from data_processing.generate_new_dataset import generate_targets
from tensorflow.keras.applications.resnet50 import preprocess_input
########### SEGMENTATION ###########
# U-Net
model = unet(batch_norm=False)
model.load_weights("/var/tmp/mi714/NEW/models/UNET/unet10/unet10_weights.h5")
# U-Net BatchNorm
# model = unet(batch_norm=True)
# model.load_weights("/var/tmp/mi714/NEW/models/UNET_BN/unet_bn10/unet_bn10_weights.h5")
# U-Net Res SE
# model = unet_res_se()
# model.load_weights("/var/tmp/mi714/NEW/models/UNET_RES_SE/unet_res_se10/unet_res_se10_weights.h5")
#Focusnet
# model = get_focusnetAlpha()
# model.load_weights("/var/tmp/mi714/NEW/models/FOCUS/focusnet10/focusnet10_weights.h5")
########### CLASSIFICATION ###########
# model = get_res()
# Original
# model.load_weights("/var/tmp/mi714/NEW/models/RESNETS/RESNET_OG/resnet_og10/resnet_og10_weights.h5")
# U-Net
# model.load_weights("/var/tmp/mi714/NEW/models/RESNETS/RESNET_UNET_BN/resnet_unet10/resnet_unet10_weights.h5")
# U-Net BatchNorm
# model.load_weights("/var/tmp/mi714/NEW/models/RESNETS/RESNET_UNET_BN/resnet_unet_bn10/resnet_unet_bn10_weights.h5")
# Res SE U-Net
# model.load_weights("/var/tmp/mi714/NEW/models/RESNETS/RESNET_UNET_RES_SE/resnet_unet_res_se10/resnet_unet_res_se10_weights.h5")
# FocusNet
# model.load_weights("/var/tmp/mi714/NEW/models/RESNETS/RESNET_FOCUSNET/resnet_focusnet7/resnet_focusnet7_weights.h5")
# Data, Masks & Classification target labels
# trainData = np.load('/var/tmp/mi714/test_new_npy2/data.npy')
# valData = np.load('/var/tmp/mi714/test_new_npy2/dataval.npy')
testData = np.load('/var/tmp/mi714/NEW/npy_dataset/datatest.npy')
# Segmentation masks
# trainMask = np.load('/var/tmp/mi714/test_new_npy2/dataMask.npy')
# valMask = np.load('/var/tmp/mi714/test_new_npy2/dataMaskval.npy')
testMask = np.load('/var/tmp/mi714/NEW/npy_dataset/dataMasktest.npy')
########### SEGMENTATION ###########
X = testData
y = testMask
X = X.astype('float32')
y /= 255. # scale masks to [0, 1]
my_adam = Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
model.compile(optimizer=my_adam,
loss=metrics.focal_loss,
metrics=[metrics.dice_coef_loss,
metrics.jaccard_coef_loss,
metrics.true_positive,
metrics.true_negative,
])
score = model.evaluate(X, y, verbose=1)
dice_coef_loss = score[1]
jac_indx_loss = score[2]
true_positive = score[3]
true_negative = score[4]
print(f"""
RESULTS:
Dice Coefficient Loss: {dice_coef_loss}
Jaccard Index Loss: {jac_indx_loss}
True Positive: {true_positive}
True Negative: {true_negative}
""")
########### CLASSIFICATION ###########
# # Classification data
# # x_train = np.concatenate((trainData,)*3, axis=-1)
# # x_train = preprocess_input(x_train)
# # x_val = np.concatenate((valData,)*3, axis=-1)
# # x_val = preprocess_input(x_val)
# x_test = np.concatenate((testData,)*3, axis=-1)
# x_test = preprocess_input(x_test)
# # Classification target labels
# path = "/var/tmp/mi714/NEW/aug_dataset/"
# # y_train = generate_targets(path + "ISIC-2017_Training_Data",
# # path + "ISIC-2017_Training_Part3_GroundTruth.csv")
# # y_val = generate_targets(path + "ISIC-2017_Validation_Data",
# # path + "ISIC-2017_Validation_Part3_GroundTruth.csv")
# y_test = generate_targets(path + "ISIC-2017_Test_v2_Data",
# path + "ISIC-2017_Test_v2_Part3_GroundTruth.csv")
# X = x_test
# y = y_test
# my_adam = Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
# # Compile model and print summary
# rocauc = AUC(num_thresholds=200,
# curve="ROC",
# summation_method="interpolation",
# name=None,
# dtype=None,
# thresholds=None,
# multi_label=False,
# label_weights=None,
# )
# model.compile(loss='categorical_crossentropy',
# optimizer=my_adam,
# metrics=[metrics.sensitivity,
# metrics.specificity,
# rocauc,
# 'acc'
# ])
# score = model.evaluate(X, y, verbose=1)
# binary_ce = score[0]
# sensitivity = score[1]
# specificity = score[2]
# rocauc = score[3]
# acc = score[4]
# print(f"""
# RESULTS:
# Binary Cross-Entropy Loss: {binary_ce}
# Sensitivity: {sensitivity}
# Specificity: {specificity}
# AUC ROC: {rocauc}
# Accuracy: {acc}
# """)
| 29.335366
| 129
| 0.675327
| 651
| 4,811
| 4.75576
| 0.239631
| 0.031008
| 0.056848
| 0.054264
| 0.329457
| 0.283269
| 0.283269
| 0.225775
| 0.158592
| 0.119832
| 0
| 0.047354
| 0.179173
| 4,811
| 163
| 130
| 29.515337
| 0.736642
| 0.649137
| 0
| 0
| 0
| 0
| 0.199338
| 0.096026
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.263158
| 0
| 0.263158
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72140b20f916fb997edbec8a00bb1402df3614ca
| 9,466
|
py
|
Python
|
game.py
|
distortedsignal/bohnanza
|
dfbcfafbdd07cb924cbbc2adc36db7e51673e546
|
[
"Apache-2.0"
] | null | null | null |
game.py
|
distortedsignal/bohnanza
|
dfbcfafbdd07cb924cbbc2adc36db7e51673e546
|
[
"Apache-2.0"
] | null | null | null |
game.py
|
distortedsignal/bohnanza
|
dfbcfafbdd07cb924cbbc2adc36db7e51673e546
|
[
"Apache-2.0"
] | null | null | null |
"""
An implementation of Bohnanza
@author: David Kelley, 2018
"""
import random
from collections import defaultdict
class Card:
"""Card Object
Name and point thresholds are the only properties. The point thresholds
are organized the way they are on the card - to get 1 point, you need th
number of cards listed first in the point_thresholds, 2 for the 2nd, ...
"""
types = {'garden':[2, 2, 3], 'red': [2, 3, 4, 5],
'black-eyed': [2, 4, 5, 6], 'soy': [2, 4, 6, 7],
'green': [3, 5, 6, 7], 'stink': [3, 5, 7, 8],
'chili': [3, 6, 8, 9], 'blue': [4, 6, 8, 10]}
def __init__(self,name):
self.name = name
self.point_thresholds = self.types[self.name]
def __repr__(self):
return self.name
def __eq__(self, card2):
return self.name == card2.name
class Deck:
types = {'garden': 6, 'red': 8, 'black-eyed': 10, 'soy': 12,
'green': 14, 'stink': 16, 'chili': 18, 'blue': 20}
def __init__(self):
cards = [Card(name) for name in
self.types.keys() for i in range(0, self.types[name]) ]
self.draw_order = random.sample(cards, len(cards))
self.discard_order = []
self.completed_rounds = 0
def __repr__(self):
str_out = ("Deck with:\n Draw pile: " +
str(len(self.draw_order)) + " cards\n Discard pile: " +
str(len(self.discard_order)) + " cards\n")
return str_out
def draw(self, nCards):
"""Get nCards from the deck
If there are no cards left, you get only as many as are available
"""
out = []
for iC in range(nCards):
out.append(self.draw_single())
return out
def draw_single(self):
"""Get a single card from the deeck"""
if len(self.draw_order) == 0:
# Shuffle the discard pile if the draw pile is empty
self.draw_order = random.sample(
self.discard_order, len(self.discard_order))
self.discard_order = []
self.completed_rounds += 1
if len(self.draw_order) == 0:
return []
else:
return self.draw_order.pop(0)
def discard(self, c):
if isinstance(c, list):
for iCard in c:
self.discard_order.append(iCard)
else:
self.discard_order.append(c)
class Player:
"""Generic player class. Should be subtyped later for new strategies.
Each player type must implement the following methods:
plant: takes an array of cards and plants them
"""
def __init__(self, seat, strat):
self.hand = []
self.fields = [[], []]
self.points = 0
self.point_discards = []
self.seat = seat
self.strategy = strat
def __repr__(self):
names = []
for iField in range(2):
if len(self.fields[iField]) == 0:
names.append("[Empty]")
else:
names.append(str(self.fields[iField][0]) + \
"(" + str(len(self.fields[iField])) + ")")
return ("Player " + str(self.seat+1) + ".\nHand: " +
str(self.hand)
+ "\nField 1: " + names[0]
+ "\nField 2: " + names[1] + "\n")
def plant_from_hand(self, game_state):
"""Get strategy's choice and execute"""
if len(self.hand) == 0:
return
field_to_plant, cards = self.strategy.plant_from_hand(self)
for (iField, iCard) in zip(field_to_plant, cards):
self.plant_field(iField, iCard, game_state)
self.hand.pop(0)
def plant_from_draw(self, cards, game_state):
"""Get strategy's choice and execute"""
field_to_plant, cards = \
self.strategy.plant_from_trade(self, cards)
for (iField, iCard) in zip(field_to_plant, cards):
self.plant_field(iField, iCard, game_state)
def plant_field(self, field_num, card, game_state):
"""Put card down on field, harvest if neccessary"""
if len(self.fields[field_num]) > 0 and \
card != self.fields[field_num][0]:
self.harvest_field(field_num, game_state)
self.fields[field_num].append(card)
def harvest_field(self, field_num, game_state):
"""Get points, discard cards to correct place"""
nBeans = len(self.fields[field_num])
if nBeans == 0:
return []
nPoints = sum([i <= nBeans for i in self.fields[field_num][0].point_thresholds])
self.points += nPoints
for_discard = self.fields[field_num][0:(nBeans-nPoints)]
for_points = self.fields[field_num][(nBeans-nPoints):]
if len(for_discard) + len(for_points) != nBeans:
print('error')
raise AssertionError("Improper harvest.")
# Handle cards from field
game_state._deck.discard(for_discard)
self.point_discards.extend(for_points)
# Empty the field
self.fields[field_num] = []
class Game:
def __init__(self, player_strats):
self._deck = Deck()
self._players = [Player(i, player_strats[i]) for i in range(len(player_strats))]
self.deal_game(len(self._players))
def __repr__(self):
return "Bohnanza game with \n" + str(self._players) + " players."
def run(self):
active_player = 0
round_number = 1
empty_deck = False
while not (self.game_over() or empty_deck):
empty_deck = self.turn(active_player)
active_player += 1
if active_player >= len(self._players):
active_player = 0
round_number += 1
points = [p.points for p in self._players]
return points
# print("GAME OVER\nPlayer points: " + \
# str([p.points for p in self._players]))
def deal_game(self, nPlayers):
"""Initial game setup"""
for iPlayer in range(0,nPlayers):
self._players[iPlayer].hand.extend(self._deck.draw(5))
def game_over(self):
"""The game is over after completing 3 times through the deck"""
return len(self._deck.draw_order) == 0 and \
self._deck.completed_rounds >= 2
def turn(self, player_num):
"""Have a player take a turn"""
self.gamestate_is_valid()
# Step 1: Plant fron hand
self._players[player_num].plant_from_hand(self)
self.gamestate_is_valid()
# Step 2: Draw new cards & trade
faceup_cards = self._deck.draw(2)
if (len(faceup_cards) != 2) or any([not card for card in faceup_cards]):
self._deck.discard(faceup_cards)
return 1
# trade_spec = self._strategy[player_num].trade(faceup_cards)
# self.execute_trade(trade_spec)
self.gamestate_is_valid(faceup_cards)
# Step 3: Plant new cards
self._players[player_num].plant_from_draw(faceup_cards, self)
self.gamestate_is_valid()
# Step 4: Draw new cards
new_cards = self._deck.draw(3)
if (len(new_cards) != 3) or any([not card for card in new_cards]):
self._deck.discard(new_cards)
return 1
self._players[player_num].hand.extend(new_cards)
self.gamestate_is_valid()
def gamestate_is_valid(self, addl_cards=[], throw_exception=False):
"""
If !throw_exception, returns a boolean of if the game state is valid
If throw_exception, throws an exception if the game state is not valid
"""
original_types = Deck.types
current_cards = defaultdict(int)
for card in self._deck.draw_order:
current_cards[card.name] += 1
for card in self._deck.discard_order:
current_cards[card.name] += 1
for card in addl_cards:
current_cards[card.name] += 1
for player in self._players:
for card in player.hand:
current_cards[card.name] += 1
for card in player.point_discards:
current_cards[card.name] += 1
for field in player.fields:
for card in field:
current_cards[card.name] += 1
for key in original_types:
if original_types[key] != current_cards[key]:
if not throw_exception:
return False
raise AssertionError("not all cards are present")
return True
class Strategy:
def __init__(self, seat):
self.name = "Generic"
def __repr__(self):
return self.name + " player."
def plant_from_hand(self, cards, player):
"""Return a list of which field to put cards in for the given player
"""
pass
def plant_from_trade(self, cards, player):
"""Return a list of which field to put cards in for the given player
"""
pass
def trade(self, cards):
"""Trade with other players. Still working out what the mechanics of
this are
"""
pass
| 34.421818
| 88
| 0.555145
| 1,189
| 9,466
| 4.238015
| 0.184188
| 0.023219
| 0.023814
| 0.028577
| 0.261361
| 0.202818
| 0.124628
| 0.106767
| 0.071046
| 0.057154
| 0
| 0.016433
| 0.337841
| 9,466
| 275
| 89
| 34.421818
| 0.787492
| 0.169132
| 0
| 0.198864
| 0
| 0
| 0.036444
| 0
| 0
| 0
| 0
| 0
| 0.011364
| 1
| 0.147727
| false
| 0.017045
| 0.011364
| 0.022727
| 0.295455
| 0.005682
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72155749ca290c85d0fa365110369fcce2862271
| 1,872
|
py
|
Python
|
pytype/tests/test_calls.py
|
JelleZijlstra/pytype
|
962a0ebc05bd24dea172381b2bedcc547ba53dd5
|
[
"Apache-2.0"
] | 11
|
2017-02-12T12:19:50.000Z
|
2022-03-06T08:56:48.000Z
|
pytype/tests/test_calls.py
|
JelleZijlstra/pytype
|
962a0ebc05bd24dea172381b2bedcc547ba53dd5
|
[
"Apache-2.0"
] | null | null | null |
pytype/tests/test_calls.py
|
JelleZijlstra/pytype
|
962a0ebc05bd24dea172381b2bedcc547ba53dd5
|
[
"Apache-2.0"
] | 2
|
2017-06-27T14:41:57.000Z
|
2021-12-05T11:27:33.000Z
|
"""Tests for calling other functions, and the corresponding checks."""
from pytype import utils
from pytype.tests import test_inference
class CallsTest(test_inference.InferenceTest):
"""Tests for checking function calls."""
def testOptional(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
def foo(x: int, y: int = ..., z: int = ...) -> int
""")
self.assertNoErrors("""\
import mod
mod.foo(1)
mod.foo(1, 2)
mod.foo(1, 2, 3)
""", pythonpath=[d.path])
def testMissing(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
def foo(x, y) -> int
""")
_, errors = self.InferAndCheck("""\
import mod
mod.foo(1)
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "missing-parameter")])
def testExtraneous(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
def foo(x, y) -> int
""")
_, errors = self.InferAndCheck("""\
import mod
mod.foo(1, 2, 3)
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "wrong-arg-count")])
def testMissingKwOnly(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
def foo(x, y, *, z) -> int
""")
_, errors = self.InferAndCheck("""\
import mod
mod.foo(1, 2)
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "missing-parameter", r"\bz\b")])
def testExtraKeyword(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
def foo(x, y) -> int
""")
_, errors = self.InferAndCheck("""\
import mod
mod.foo(1, 2, z=3)
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "wrong-keyword-args")])
if __name__ == "__main__":
test_inference.main()
| 26.742857
| 73
| 0.553953
| 228
| 1,872
| 4.460526
| 0.276316
| 0.041298
| 0.048181
| 0.098328
| 0.650934
| 0.635202
| 0.635202
| 0.635202
| 0.611603
| 0.403147
| 0
| 0.013838
| 0.26656
| 1,872
| 69
| 74
| 27.130435
| 0.726875
| 0.052885
| 0
| 0.678571
| 0
| 0
| 0.354711
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 1
| 0.089286
| false
| 0
| 0.125
| 0
| 0.232143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7216c0aa91d2cb7e990847e2823233ead4e36ab3
| 724
|
py
|
Python
|
test/test_learning_00.py
|
autodrive/NAIST_DeepLearning
|
ac2c0512c43f71ea7df68567c5e24e689ac18aea
|
[
"Apache-2.0"
] | 1
|
2018-09-26T01:52:35.000Z
|
2018-09-26T01:52:35.000Z
|
test/test_learning_00.py
|
autodrive/NAIST_DeepLearning
|
ac2c0512c43f71ea7df68567c5e24e689ac18aea
|
[
"Apache-2.0"
] | 5
|
2015-12-31T10:56:43.000Z
|
2018-11-16T08:57:12.000Z
|
test/test_learning_00.py
|
autodrive/NAIST_DeepLearning
|
ac2c0512c43f71ea7df68567c5e24e689ac18aea
|
[
"Apache-2.0"
] | 1
|
2018-09-26T01:52:37.000Z
|
2018-09-26T01:52:37.000Z
|
import unittest
import lecture1_code00 as dl
from sklearn.datasets.samples_generator import make_blobs
class TestDeepLearning(unittest.TestCase):
def setUp(self):
self.X, self.Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
def tearDown(self):
del self.X
del self.Y
def test_linear_model_00(self):
x = [1.0, -1.0]
w = [1, 1, 0]
result = dl.linear_model(w, x)
self.assertAlmostEqual(x[0]*w[0] + x[1]*w[1] + w[2] * 1, result,)
x3 = [1.0, -1.0, 1.0]
w3 = [1, 2, 1, 0.5]
result3 = dl.linear_model(w3, x3)
self.assertAlmostEqual(x3[0]*w3[0] + x3[1]*w3[1] + x3[2] * w3[2] + w3[3] * 1.0, result3,)
| 27.846154
| 97
| 0.585635
| 122
| 724
| 3.377049
| 0.377049
| 0.038835
| 0.021845
| 0.029126
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107407
| 0.254144
| 724
| 25
| 98
| 28.96
| 0.655556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7217f6133fa71477eb286daa69250fadb04142e7
| 2,389
|
py
|
Python
|
edumediaitem/views_manage.py
|
shagun30/djambala-2
|
06f14e3dd237d7ebf535c62172cfe238c3934f4d
|
[
"BSD-3-Clause"
] | null | null | null |
edumediaitem/views_manage.py
|
shagun30/djambala-2
|
06f14e3dd237d7ebf535c62172cfe238c3934f4d
|
[
"BSD-3-Clause"
] | null | null | null |
edumediaitem/views_manage.py
|
shagun30/djambala-2
|
06f14e3dd237d7ebf535c62172cfe238c3934f4d
|
[
"BSD-3-Clause"
] | null | null | null |
#-*-coding: utf-8 -*-
"""
/dms/edumediaitem/views_manage.py
.. enthaelt den View fuer die Management-Ansicht des Medienpaketes
Django content Management System
Hans Rauch
hans.rauch@gmx.net
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 11.09.2007 Beginn der Arbeit
"""
from django.utils.translation import ugettext as _
from dms.queries import get_site_url
from dms.roles import require_permission
from dms.roles import UserEditPerms
from dms.folder.views_manage import do_manage
from dms_ext.extension import * # dms-Funktionen ueberschreiben
# -----------------------------------------------------
@require_permission('perm_edit_folderish')
def edumediaitem_manage(request, item_container):
""" Pflegemodus des Medienpakets """
user_perms = UserEditPerms(request.user.username, request.path)
add_ons = {}
add_ons[0] = [ { 'url' : get_site_url(item_container,
'index.html/add/edufileitem/'),
'info': _(u'Datei')},
{ 'url' : get_site_url(item_container, 'index.html/add/edutextitem/'),
'info': _(u'Textdokument')},
{ 'url' : get_site_url(item_container, 'index.html/add/edulinkitem/'),
'info': _(u'Verweis')},
]
add_ons[1] = [
{ 'url' : get_site_url(item_container,
'index.html/add/imagethumb/?' + \
'max_width=120&max_height=80'),
'info': _(u'Minibild für Verweise etc.')},
{ 'url' : get_site_url(item_container,
'index.html/add/image/'),
'info': _(u'Bild, Foto, Grafik')},
]
add_ons[2] = [ { 'url' : get_site_url(item_container, 'index.html/add/userfolder/'),
'info': _(u'Community-Mitglieder eintragen, löschen, Rechte ändern ...')}, ]
add_ons[3] = []
app_name = 'edumediaitem'
my_title = _(u'Medienpaket pflegen')
my_title_own = _(u'Eigene Ressourcen etc. pflegen')
dont = { 'navigation_left_mode': False, }
return do_manage(request, item_container, user_perms, add_ons, app_name,
my_title, my_title_own, dont)
| 38.532258
| 95
| 0.577648
| 260
| 2,389
| 5.073077
| 0.488462
| 0.078848
| 0.053071
| 0.059136
| 0.172858
| 0.172858
| 0.172858
| 0.172858
| 0.172858
| 0
| 0
| 0.012317
| 0.286312
| 2,389
| 61
| 96
| 39.163934
| 0.76129
| 0.194224
| 0
| 0.055556
| 0
| 0
| 0.235726
| 0.095338
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.166667
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
721a5ce052e7d21ea063652b0a161c21042f7f06
| 1,089
|
py
|
Python
|
tests/test_muduapiclient.py
|
hanqingliu/mudu-api-python-client
|
92541df27a518dad5312b39749dfbb8bd471a6b8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_muduapiclient.py
|
hanqingliu/mudu-api-python-client
|
92541df27a518dad5312b39749dfbb8bd471a6b8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_muduapiclient.py
|
hanqingliu/mudu-api-python-client
|
92541df27a518dad5312b39749dfbb8bd471a6b8
|
[
"Apache-2.0"
] | null | null | null |
import ddt
import mock
from unittest import TestCase
from muduapiclient.client import MuduApiClient, gen_signed_params
import time
@ddt.ddt
class MuduApiClientTests(TestCase):
@ddt.unpack
@ddt.data(
('ACCESS_KEY', 'SECRET_KEY', {'page':1, 'live_status':2}),
)
def test_gen_signed_params(self, ak, sk, kwargs):
original_time = time.time
time.time = mock.Mock(return_value='1234567890')
signed_params = gen_signed_params(ak, sk, kwargs)
time.time = original_time
self.assertIn('sign', signed_params)
self.assertEqual(signed_params['sign'], 'af7470c6f59d051c633401d1fd0b86fd1aa05352')
self.assertNotIn('secret_key', signed_params)
@ddt.unpack
@ddt.data(
('ACCESS_KEY', 'SECRET_KEY', {'page':1, 'live_status':2}),
('507cfcdfe351e13e6f1c8ba87b80969f', 'SECRET_KEY', {'page':1, 'live_status':4}),
)
def test_call_live(self, ak, sk, kwargs):
api = MuduApiClient(ak, sk)
response = api.call('POST', 'live', 'List', **kwargs)
self.assertIn('code', response)
| 33
| 91
| 0.662994
| 129
| 1,089
| 5.403101
| 0.364341
| 0.120517
| 0.064562
| 0.060258
| 0.177905
| 0.177905
| 0.143472
| 0.143472
| 0.143472
| 0.143472
| 0
| 0.068886
| 0.200184
| 1,089
| 32
| 92
| 34.03125
| 0.731343
| 0
| 0
| 0.214286
| 0
| 0
| 0.193756
| 0.066116
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.071429
| false
| 0
| 0.178571
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
721e9bba1e7ea66054b20c27b7571b65855aeaa1
| 5,970
|
py
|
Python
|
ttt.py
|
YukkuriC/PyTicTacToe
|
c38b330faeb956d82b401e5863c4982f725e5dab
|
[
"MIT"
] | null | null | null |
ttt.py
|
YukkuriC/PyTicTacToe
|
c38b330faeb956d82b401e5863c4982f725e5dab
|
[
"MIT"
] | null | null | null |
ttt.py
|
YukkuriC/PyTicTacToe
|
c38b330faeb956d82b401e5863c4982f725e5dab
|
[
"MIT"
] | null | null | null |
__doc__ = '''
井字棋基础设施
包含棋盘类与单局游戏运行内核
'''
from threading import Thread
from time import process_time
if 'enums':
OK = 0 # 游戏继续
ENDGAME = 1 # 形成三连
DRAW = 2 # 棋盘已满平局
INVALID = -1 # 非法返回值(类型错误/出界)
CONFILCT = -2 # 冲突落子(下于已有棋子位置)
ERROR = -3 # 代码报错
TIMEOUT = -4 # 代码超时
class Board:
"""
基础棋盘类,用于计算局面情形+发放双方玩家所用局面
使用数字1、2分别代表不同方玩家落子
"""
def __init__(self):
self.pool = {} # 仅填充1/2的字典
self.history = [] # 落子历史
def get_board(self, plr: int):
"""
为指定玩家编号返回其局面字典
字典键为2长度元组,每位数字(0,1,2)分别代表行号与列号
返回对象中包含3*3棋盘位置,对应值均为字符串,含义如下:
"S": 我方落子
"F": 对方落子
"E": 空
"""
res = {}
for x in range(3):
for y in range(3):
if (x, y) in self.pool:
res[x, y] = 'S' if self.pool[x, y] == plr else 'F'
else:
res[x, y] = 'E'
return res
def drop(self, plr, pos):
"""
指定玩家编号plr在指定位置pos落子
返回落子结果
"""
if self._drop_data_check(pos): # 非法落子检查
self.history.append('INVALID')
return INVALID
self.history.append(pos)
if pos in self.pool: # 冲突落子检查
return CONFILCT
self.pool[pos] = plr # 落子,检查游戏结束状态
return self._check_endgame()
def _drop_data_check(self, pos):
"""
检验落子位置对象是否符合要求
要求:
* 必须为列表或元组
* 长度必须为2
* 每位均为int,取值只可为0,1,2
"""
if not isinstance(pos, (list, tuple)):
return INVALID
if len(pos) != 2:
return INVALID
for i in pos:
if not (isinstance(i, int) and 0 <= i <= 2):
return INVALID
return OK
def _check_endgame(self):
""" 检查游戏状态是否结束 """
for x in range(3):
if self._3_equal(self.pool.get((x, i))
for i in range(3)): # axis 0
return ENDGAME
if self._3_equal(self.pool.get((i, x))
for i in range(3)): # axis 1
return ENDGAME
if self._3_equal(self.pool.get((i, i)) for i in range(3)): # 正对角线
return ENDGAME
if self._3_equal(self.pool.get((i, 2 - i)) for i in range(3)): # 反对角线
return ENDGAME
return OK # 不执行平局判断
def _3_equal(self, row):
""" 辅助函数:检查一行3数(非空)相等状态 """
row = iter(row)
n1 = next(row)
if not n1:
return False
for n in row:
if n != n1:
return False
return True
class Game:
"""
井字棋游戏对象
接收运行双方代码并收集结果
codes:
双方代码模块,其中包含play函数,可接收Board.get_board结果作为参数并返回落子位置
names:
双方代码模块名称
timeout:
时间限制
"""
def __init__(self, codes, names=['code1', 'code2'], timeout=10):
self.codes = codes
self.names = names
self.timeout = timeout
@staticmethod
def _stringfy_error(e):
return '%s: %s' % (
type(e).__name__,
e,
)
@staticmethod
def _thread_wrap(code, board, thr_output: dict):
"""
线程内运行代码,输出结果
输入:
code: 待运行模块
board: 当前局面
output: 容纳返回值的字典
"result": 模块play函数运行结果
"error": 捕捉的运行异常
"dt": 运行用时
"""
res = {
"result": None,
"error": None,
}
try:
t1 = process_time()
output = code.play(board)
t2 = process_time()
res['result'] = output
except Exception as e:
t2 = process_time()
res['error'] = Game._stringfy_error(e)
res['dt'] = t2 - t1
thr_output.update(res)
def _get_result(self, winner, reason, extra=None):
"""
构造比赛结果字典
"orders": 该局落子顺序
"winner": 胜者
0 - 先手胜
1 - 后手胜
None - 平局
"reason": 终局原因序号
"extra": 额外描述
"timeouts": 双方使用时间历史
"""
return {
'names': self.names,
'orders': self.board.history,
'winner': winner,
'reason': reason,
'extra': extra,
'timeouts': self.timeout_history,
}
def match(self):
"""
运行一场比赛
返回值: 比赛结果字典
"""
self.board = Board()
timeouts = [self.timeout] * 2
self.timeout_history = []
for nround in range(9):
# 构造当局进程
plr_idx = nround % 2
thread_output = {}
frame = self.board.get_board(plr_idx + 1)
thr = Thread(target=self._thread_wrap,
args=(self.codes[plr_idx], frame, thread_output))
# 限时运行
thr.start()
thr.join(timeouts[plr_idx])
# 判断线程死循环
if thr.is_alive():
return self._get_result(1 - plr_idx, TIMEOUT, '死循环')
# 计时统计,判断超时
timeouts[plr_idx] -= thread_output['dt']
if timeouts[plr_idx] < 0:
return self._get_result(1 - plr_idx, TIMEOUT)
self.timeout_history.append(timeouts.copy())
# 判断报错
if thread_output['error']:
return self._get_result(
1 - plr_idx,
ERROR,
thread_output['error'],
)
# 落子判断
res = self.board.drop(plr_idx + 1, thread_output['result'])
if res == OK: # 继续循环
continue
return self._get_result(
plr_idx if res == ENDGAME else 1 - plr_idx,
res,
)
return self._get_result(None, DRAW) # 平局
if __name__ == '__main__':
import codes.dumb_ordered as plr1, codes.dumb_random as plr2
game = Game([plr1, plr2])
print(game.match())
| 25.512821
| 78
| 0.469514
| 640
| 5,970
| 4.228125
| 0.317188
| 0.026608
| 0.020695
| 0.035107
| 0.114191
| 0.105322
| 0.083518
| 0.06541
| 0.04102
| 0.04102
| 0
| 0.019722
| 0.422446
| 5,970
| 233
| 79
| 25.622318
| 0.765081
| 0.148409
| 0
| 0.158273
| 0
| 0
| 0.031041
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079137
| false
| 0
| 0.021583
| 0.007194
| 0.280576
| 0.007194
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72207e110b7ba0434449b56ad831fee21813b6dc
| 1,015
|
py
|
Python
|
Minor Project/Weather GUI/pyowm_helper.py
|
ComputerScientist-01/Technocolabs-Internship-Project
|
3675cc6b9a40a885a29b105ec9b29945a1e4620c
|
[
"MIT"
] | 4
|
2020-07-08T11:32:29.000Z
|
2021-08-05T02:54:02.000Z
|
Minor Project/Weather GUI/pyowm_helper.py
|
ComputerScientist-01/Technocolabs-Internship-Project
|
3675cc6b9a40a885a29b105ec9b29945a1e4620c
|
[
"MIT"
] | null | null | null |
Minor Project/Weather GUI/pyowm_helper.py
|
ComputerScientist-01/Technocolabs-Internship-Project
|
3675cc6b9a40a885a29b105ec9b29945a1e4620c
|
[
"MIT"
] | null | null | null |
import os
import pyowm
from datetime import datetime
from timezone_conversion import gmt_to_eastern
#API_KEY = os.environ['API_KEY']
owm=pyowm.OWM('0833f103dc7c2924da06db624f74565c')
mgr=owm.weather_manager()
def get_temperature():
days = []
dates = []
temp_min = []
temp_max = []
forecaster = mgr.forecast_at_place('New York, US', '3h')
forecast=forecaster.forecast
for weather in forecast:
day = gmt_to_eastern(weather.reference_time())
date = day.date()
if date not in dates:
dates.append(date)
temp_min.append(None)
temp_max.append(None)
days.append(date)
temperature = weather.temperature('fahrenheit')['temp']
if not temp_min[-1] or temperature < temp_min[-1]:
temp_min[-1] = temperature
if not temp_max[-1] or temperature > temp_max[-1]:
temp_max[-1] = temperature
return(days, temp_min, temp_max)
if __name__ == '__main__':
get_temperature()
| 28.194444
| 63
| 0.639409
| 127
| 1,015
| 4.850394
| 0.393701
| 0.068182
| 0.038961
| 0.045455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038058
| 0.249261
| 1,015
| 35
| 64
| 29
| 0.770341
| 0.030542
| 0
| 0
| 0
| 0
| 0.069176
| 0.032553
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.137931
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7222707469c1717bc369a16b35dc8703f4ba96c7
| 4,692
|
py
|
Python
|
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Components/Energy/Storages/Batteries/Constant_Mass/Lithium_Ion_LiFePO4_18650.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Components/Energy/Storages/Batteries/Constant_Mass/Lithium_Ion_LiFePO4_18650.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Components/Energy/Storages/Batteries/Constant_Mass/Lithium_Ion_LiFePO4_18650.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
## @ingroup Components-Energy-Storages-Batteries-Constant_Mass
# Lithium_Ion_LiFePO4_18650.py
#
# Created: Feb 2020, M. Clarke
# Modified: Sep 2021, R. Erhard
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
# suave imports
from SUAVE.Core import Units
from .Lithium_Ion import Lithium_Ion
# package imports
import numpy as np
## @ingroup Components-Energy-Storages-Batteries-Constant_Mass
class Lithium_Ion_LiFePO4_18650(Lithium_Ion):
""" Specifies discharge/specific energy characteristics specific
18650 lithium-iron-phosphate-oxide battery cells.
Assumptions:
N/A
Source:
# Cell Information
Saw, L. H., Yonghuang Ye, and A. A. O. Tay. "Electrochemical–thermal analysis of
18650 Lithium Iron Phosphate cell." Energy Conversion and Management 75 (2013):
162-174.
# Electrode Area
Muenzel, Valentin, et al. "A comparative testing study of commercial
18650-format lithium-ion battery cells." Journal of The Electrochemical
Society 162.8 (2015): A1592.
# Cell Thermal Conductivities
(radial)
Murashko, Kirill A., Juha Pyrhönen, and Jorma Jokiniemi. "Determination of the
through-plane thermal conductivity and specific heat capacity of a Li-ion cylindrical
cell." International Journal of Heat and Mass Transfer 162 (2020): 120330.
(axial)
Saw, L. H., Yonghuang Ye, and A. A. O. Tay. "Electrochemical–thermal analysis of
18650 Lithium Iron Phosphate cell." Energy Conversion and Management 75 (2013):
162-174.
Inputs:
None
Outputs:
None
Properties Used:
N/A
"""
def __defaults__(self):
self.tag = 'Lithium_Ion_LiFePO4_Cell'
self.cell.diameter = 0.0185 # [m]
self.cell.height = 0.0653 # [m]
self.cell.mass = 0.03 * Units.kg # [kg]
self.cell.surface_area = (np.pi*self.cell.height*self.cell.diameter) + (0.5*np.pi*self.cell.diameter**2) # [m^2]
self.cell.volume = np.pi*(0.5*self.cell.diameter)**2*self.cell.height # [m^3]
self.cell.density = self.cell.mass/self.cell.volume # [kg/m^3]
self.cell.electrode_area = 0.0342 # [m^2] # estimated
self.cell.max_voltage = 3.6 # [V]
self.cell.nominal_capacity = 1.5 # [Amp-Hrs]
self.cell.nominal_voltage = 3.6 # [V]
self.cell.charging_voltage = self.cell.nominal_voltage # [V]
self.watt_hour_rating = self.cell.nominal_capacity * self.cell.nominal_voltage # [Watt-hours]
self.specific_energy = self.watt_hour_rating*Units.Wh/self.cell.mass # [J/kg]
self.specific_power = self.specific_energy/self.cell.nominal_capacity # [W/kg]
self.ragone.const_1 = 88.818 * Units.kW/Units.kg
self.ragone.const_2 = -.01533 / (Units.Wh/Units.kg)
self.ragone.lower_bound = 60. * Units.Wh/Units.kg
self.ragone.upper_bound = 225. * Units.Wh/Units.kg
self.resistance = 0.022 # [Ohms]
self.specific_heat_capacity = 1115 # [J/kgK]
self.cell.specific_heat_capacity = 1115 # [J/kgK]
self.cell.radial_thermal_conductivity = 0.475 # [J/kgK]
self.cell.axial_thermal_conductivity = 37.6 # [J/kgK]
return
| 53.931034
| 136
| 0.449915
| 441
| 4,692
| 4.684807
| 0.37415
| 0.100678
| 0.043562
| 0.036302
| 0.25847
| 0.249758
| 0.2091
| 0.158761
| 0.123911
| 0.123911
| 0
| 0.061397
| 0.444587
| 4,692
| 87
| 137
| 53.931034
| 0.730622
| 0.339088
| 0
| 0
| 0
| 0
| 0.008661
| 0.008661
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.1
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72230a4712ff2722d5fd895c22c3d235aabfdf44
| 3,544
|
py
|
Python
|
del_dupli_in_fasta.py
|
ba1/BioParsing
|
8a0257d4765a7bc86fef7688762abbeaaf3cef07
|
[
"MIT"
] | 1
|
2017-06-19T15:15:26.000Z
|
2017-06-19T15:15:26.000Z
|
del_dupli_in_fasta.py
|
ba1/BioParsing
|
8a0257d4765a7bc86fef7688762abbeaaf3cef07
|
[
"MIT"
] | null | null | null |
del_dupli_in_fasta.py
|
ba1/BioParsing
|
8a0257d4765a7bc86fef7688762abbeaaf3cef07
|
[
"MIT"
] | null | null | null |
'''
Created on Oct 20, 2015
@author: bardya
'''
import os
import argparse
from Bio import SeqIO
def parse_args():
parser = argparse.ArgumentParser(description='Delete all duplicate entries (header+sequence) in fasta. If only sequence identical, add "| duplicate" to header.')
parser.add_argument('-i', dest='infilepath', metavar='<fasta_file_path>', type=argparse.FileType('rt'),
help='path to an fasta file')
parser.add_argument('-o', dest='outfilepath', metavar='<fasta_file_path>', type=argparse.FileType('w'),
help='path to desired output fasta file')
parser.add_argument('-m', dest='mode', metavar='<header|sequence>', type=str, choices=["header", "Header", "sequence", "Sequence"],
default="header", help='mode headers checks for "headers and then sequence". Mode sequence searches only for sequence duplicates')
parser.add_argument('-k', dest='keep_flag', action="store_true",
help='with this options nothing gets deleted. Headers get count number attached to end of the line to make them unique.')
parser.add_argument('-rn', dest='rename_flag', action="store_true",
help='with this options nothing gets deleted. Headers get replaced by an integer reflecting the count')
parser.add_argument('--version', action='version', version='0.12')
return parser.parse_args()
def readfasta(seqdbfile, keep_flag=False, rename_flag=False):
from collections import Counter
try:
seqs = SeqIO.parse(seqdbfile, "fasta")
except:
seqs = SeqIO.parse(seqdbfile, "clustal")
seqlst = []
dupcount = 0
modcount = 0
for seq in seqs:
currIDlst = [e.id for e in seqlst]
if rename_flag:
seq.id = ">" + str(Counter(seqlst)[str(seq.id)] + 1)
modcount += 1
continue
if seq.id in currIDlst:
ind = currIDlst.index(seq.id)
if keep_flag:
seq.id = str(seq.id) + "_" + str(Counter(seqlst)[str(seq.id)] + 1)
modcount += 1
continue
if seqlst[ind].seq == seq.seq:
dupcount += 1
continue
else:
seq.id = str(seq.id) + "_" + str(Counter(seqlst)[str(seq.id)] + 1)
modcount += 1
seqlst.append(seq)
stats_dict = {"delentries":dupcount, "numofseqs":len(seqlst), "modentries":modcount}
return seqlst, stats_dict
def printStats(stats_dict):
outp = """
#Entries remaining in output:\t{numofseqs}
#Entries deleted:\t{delentries}
#Headers modified:\t{modentries}
""".format(**stats_dict)
print(outp)
def writefasta(outfile, seqlst):
count = SeqIO.write(seqlst, outfile, "fasta")
outfile.close()
if __name__ == '__main__':
args = parse_args()
try:
inputfile = open(args.infilepath.name, 'r')
outputfile = open(args.outfilepath.name, 'w')
# if not os.path.basename(args.outfilepath.name) == "basename":
# outputfile = open(args.outfilepath.name, 'w')
# else:
# outputfile = open(os.path.join(os.path.dirname(args.outfilepath.name),os.path.basename(args.infilepath.name) + '_consensus.faa'), 'w')
except:
print('IOError occured')
seqlst, stats_dict = readfasta(args.infilepath.name, keep_flag=args.keep_flag, rename_flag=args.rename_flag)
printStats(stats_dict)
writefasta(outputfile, seqlst)
| 35.79798
| 165
| 0.615406
| 425
| 3,544
| 5.032941
| 0.345882
| 0.023375
| 0.047686
| 0.021038
| 0.226741
| 0.202431
| 0.17064
| 0.13324
| 0.13324
| 0.13324
| 0
| 0.00681
| 0.254233
| 3,544
| 99
| 166
| 35.79798
| 0.802497
| 0.093115
| 0
| 0.179104
| 0
| 0.029851
| 0.265293
| 0.014045
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059701
| false
| 0
| 0.059701
| 0
| 0.149254
| 0.059701
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72279efb6ba56531335b2f093691a4196e8f4923
| 2,531
|
py
|
Python
|
ardupilot/Tools/autotest/param_metadata/wikiemit.py
|
quadrotor-IITKgp/emulate_GPS
|
3c888d5b27b81fb17e74d995370f64bdb110fb65
|
[
"MIT"
] | 1
|
2021-07-17T11:37:16.000Z
|
2021-07-17T11:37:16.000Z
|
ardupilot/Tools/autotest/param_metadata/wikiemit.py
|
arl-kgp/emulate_GPS
|
3c888d5b27b81fb17e74d995370f64bdb110fb65
|
[
"MIT"
] | null | null | null |
ardupilot/Tools/autotest/param_metadata/wikiemit.py
|
arl-kgp/emulate_GPS
|
3c888d5b27b81fb17e74d995370f64bdb110fb65
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import re
from param import *
from emit import Emit
# Emit docs in a form acceptable to the APM wiki site
class WikiEmit(Emit):
def __init__(self):
wiki_fname = 'Parameters.wiki'
self.f = open(wiki_fname, mode='w')
preamble = '''#summary Dynamically generated list of documented parameters
= Table of Contents =
<wiki:toc max_depth="4" />
= Vehicles =
'''
self.f.write(preamble)
def close(self):
self.f.close
def camelcase_escape(self, word):
if re.match(r"([A-Z][a-z]+[A-Z][a-z]*)", word.strip()):
return "!"+word
else:
return word
def wikichars_escape(self, text):
for c in "*,{,},[,],_,=,#,^,~,!,@,$,|,<,>,&,|,\,/".split(','):
text = re.sub("\\"+c, '`'+c+'`', text)
return text
def emit_comment(self, s):
self.f.write("\n\n=" + s + "=\n\n")
def start_libraries(self):
self.emit_comment("Libraries")
def emit(self, g, f):
t = "\n\n== %s Parameters ==\n" % (self.camelcase_escape(g.name))
for param in g.params:
if hasattr(param, 'DisplayName'):
t += "\n\n=== %s (%s) ===" % (self.camelcase_escape(param.DisplayName),self.camelcase_escape(param.name))
else:
t += "\n\n=== %s ===" % self.camelcase_escape(param.name)
if hasattr(param, 'Description'):
t += "\n\n_%s_\n" % self.wikichars_escape(param.Description)
else:
t += "\n\n_TODO: description_\n"
for field in param.__dict__.keys():
if field not in ['name', 'DisplayName', 'Description', 'User'] and field in known_param_fields:
if field == 'Values' and Emit.prog_values_field.match(param.__dict__[field]):
t+= " * Values \n"
values = (param.__dict__[field]).split(',')
t+="|| *Value* || *Meaning* ||\n"
for value in values:
v = value.split(':')
t+="|| "+v[0]+" || "+self.camelcase_escape(v[1])+" ||\n"
else:
t += " * %s: %s\n" % (self.camelcase_escape(field), self.wikichars_escape(param.__dict__[field]))
#print t
self.f.write(t)
| 34.671233
| 121
| 0.468589
| 282
| 2,531
| 4.046099
| 0.322695
| 0.092025
| 0.099912
| 0.014023
| 0.078878
| 0.007011
| 0
| 0
| 0
| 0
| 0
| 0.00187
| 0.366258
| 2,531
| 72
| 122
| 35.152778
| 0.709476
| 0.031213
| 0
| 0.078431
| 0
| 0
| 0.201471
| 0.025746
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137255
| false
| 0
| 0.058824
| 0
| 0.27451
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
722ad974ef9283199399d93bbd17a334c7d31249
| 1,038
|
py
|
Python
|
master.py
|
iAzurel/thepicturesorter
|
21a3aee26adcfca0838db63be1434f7c49cd9548
|
[
"MIT"
] | null | null | null |
master.py
|
iAzurel/thepicturesorter
|
21a3aee26adcfca0838db63be1434f7c49cd9548
|
[
"MIT"
] | null | null | null |
master.py
|
iAzurel/thepicturesorter
|
21a3aee26adcfca0838db63be1434f7c49cd9548
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from PIL import Image
import os, os.path
import cv2
import sys
# Detect faces, then returns number of faces.
def detect_face(image_path, face_cascade):
img = cv2.imread(image_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Change the values based on needs.
faces = face_cascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=7,
minSize=(30, 30),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
return faces
# Moves pictures based on detection of faces.
def imagesChecker():
imgs_path = '/home/murtaza/Documents/thepicturesorter/Pictures/'
nofacesdir = '/home/murtaza/Documents/thepicturesorter/NoFaces/'
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
imgs = os.listdir(imgs_path)
for i in range (0, len(imgs)):
faces = detect_face(imgs_path + '/' + imgs[i], face_cascade)
if len(faces) == 0:
os.rename(os.path.abspath(imgs_path + imgs[i]), nofacesdir + imgs[i])
def main():
imagesChecker()
if __name__ == "__main__":
main()
| 23.590909
| 76
| 0.716763
| 144
| 1,038
| 4.986111
| 0.506944
| 0.061281
| 0.027855
| 0.100279
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018391
| 0.16185
| 1,038
| 44
| 77
| 23.590909
| 0.806897
| 0.136802
| 0
| 0
| 0
| 0
| 0.160134
| 0.150056
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7230fd2e2774f3460096d023d321613a2a314e63
| 2,850
|
py
|
Python
|
webscripts/plotlygraphs.py
|
KathrynDH/DataDashboard
|
1bf61497480f778a1c7cc9ce9fc7fb48b3067606
|
[
"MIT"
] | null | null | null |
webscripts/plotlygraphs.py
|
KathrynDH/DataDashboard
|
1bf61497480f778a1c7cc9ce9fc7fb48b3067606
|
[
"MIT"
] | null | null | null |
webscripts/plotlygraphs.py
|
KathrynDH/DataDashboard
|
1bf61497480f778a1c7cc9ce9fc7fb48b3067606
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 23 15:56:55 2021
@author: Kathryn Haske
Create plotly graphs for webpage
"""
import pandas as pd
import plotly.graph_objs as go
def line_graph(x_list, df, name_col, y_cols, chart_title, x_label, y_label):
"""
Function to create plotly line graph
Args:
x_list (list): graph x values
df (Pandas DataFrame): dataframe to use for series and y-values
name_col (string): df column to use for series names
y_cols (int or slice object): df column numbers to use for y-values
chart_title (string): title for chart
x_label (string): label for x-axis
y_label (string): label for y-axis
Returns:
dictionary for plotly line graph
"""
graph = []
for index, row in df.iterrows():
graph.append(go.Scatter(
x = x_list,
y = row.tolist()[y_cols],
mode = 'lines',
name = row[name_col]
))
graph_layout = dict(title = chart_title,
xaxis = dict(title = x_label),
yaxis = dict(title = y_label),
)
return dict(data=graph, layout=graph_layout)
def scatter_plot(x_vals, y_vals, names, chart_title, x_label, y_label):
"""
Function to create plotly scatter plot
Args:
x_vals (list): graph x values
y_vals (list): graph y values
names (list of strings): title for each marker
chart_title (string): title for chart
x_label (string): label for x-axis
y_label (string): label for y-axis
Returns:
dictionary for plotly scatter plot
"""
graph= [go.Scatter(
x = x_vals,
y = y_vals,
mode = 'markers',
text=names,
marker=dict(
color=y_vals, #set color equal to a variable
colorscale='Viridis' # plotly colorscale
)
)]
graph_layout = dict(title = chart_title,
xaxis = dict(title = x_label),
yaxis = dict(title = y_label),
)
return dict(data=graph, layout=graph_layout)
def bar_chart(x_vals, y_vals, chart_title, x_label, y_label):
"""
Function to create plotly bar graph
Args:
x_vals (list): graph x values
y_vals (list): graph y values
chart_title (string): title for chart
x_label (string): label for x-axis
y_label (string): label for y-axis
Returns:
dictionary for plotly bar graph
"""
graph = [go.Bar(
x = x_vals,
y = y_vals
)]
graph_layout = dict(title = chart_title,
xaxis = dict(title = x_label),
yaxis = dict(title = y_label),
)
return dict(data=graph, layout=graph_layout)
| 27.403846
| 76
| 0.567018
| 376
| 2,850
| 4.140957
| 0.236702
| 0.057803
| 0.042389
| 0.073218
| 0.574181
| 0.574181
| 0.558767
| 0.558767
| 0.558767
| 0.558767
| 0
| 0.006948
| 0.343509
| 2,850
| 103
| 77
| 27.669903
| 0.825227
| 0.43193
| 0
| 0.380952
| 0
| 0
| 0.013158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.047619
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72314feeba462045a5c4c66db5b70dc7ce89e3a1
| 2,505
|
py
|
Python
|
jsl/experimental/seql/agents/bfgs_agent.py
|
AdrienCorenflos/JSL
|
8a3ba27179a2bd90207214fccb81df884b05c3d0
|
[
"MIT"
] | null | null | null |
jsl/experimental/seql/agents/bfgs_agent.py
|
AdrienCorenflos/JSL
|
8a3ba27179a2bd90207214fccb81df884b05c3d0
|
[
"MIT"
] | null | null | null |
jsl/experimental/seql/agents/bfgs_agent.py
|
AdrienCorenflos/JSL
|
8a3ba27179a2bd90207214fccb81df884b05c3d0
|
[
"MIT"
] | null | null | null |
import jax.numpy as jnp
from jax import vmap
from jax.scipy.optimize import minimize
import chex
import typing_extensions
from typing import Any, NamedTuple
import warnings
from jsl.experimental.seql.agents.agent_utils import Memory
from jsl.experimental.seql.agents.base import Agent
from jsl.experimental.seql.utils import posterior_noise, mse
Params = Any
class ModelFn(typing_extensions.Protocol):
def __call__(self,
params: chex.Array,
inputs: chex.Array):
...
class ObjectiveFn(typing_extensions.Protocol):
def __call__(self,
params: chex.Array,
inputs: chex.Array,
outputs: chex.Array,
model_fn: ModelFn):
...
class BeliefState(NamedTuple):
params: Params
class Info(NamedTuple):
# True if optimization succeeded
success: bool
'''
0 means converged (nominal)
1=max BFGS iters reached
3=zoom failed
4=saddle point reached
5=max line search iters reached
-1=undefined
'''
status: int
# final function value.
loss: float
def bfgs_agent(objective_fn: ObjectiveFn = mse,
model_fn: ModelFn = lambda mu, x: x @ mu,
obs_noise: float = 0.01,
buffer_size: int = jnp.inf,
threshold: int = 1):
assert threshold <= buffer_size
memory = Memory(buffer_size)
def init_state(x: chex.Array):
return BeliefState(jnp.squeeze(x))
def update(belief: BeliefState,
x: chex.Array,
y: chex.Array):
assert buffer_size >= len(x)
x_, y_ = memory.update(x, y)
if len(x_) < threshold:
warnings.warn("There should be more data.", UserWarning)
info = Info(False, -1, jnp.inf)
return belief, info
optimize_results = minimize(objective_fn,
belief.params,
(x_, y_, model_fn),
method="BFGS")
info = Info(optimize_results.success,
optimize_results.status,
optimize_results.fun)
return BeliefState(optimize_results.x), info
def predict(belief: BeliefState,
x: chex.Array):
d, *_ = x.shape
noise = obs_noise * jnp.eye(d)
return model_fn(belief.params, x), noise
return Agent(init_state, update, predict)
| 26.09375
| 68
| 0.578842
| 282
| 2,505
| 5.003546
| 0.382979
| 0.057406
| 0.040397
| 0.048901
| 0.17151
| 0.092133
| 0.092133
| 0.092133
| 0.092133
| 0.092133
| 0
| 0.006635
| 0.338124
| 2,505
| 96
| 69
| 26.09375
| 0.844391
| 0.020758
| 0
| 0.098361
| 0
| 0
| 0.013152
| 0
| 0
| 0
| 0
| 0
| 0.032787
| 1
| 0.098361
| false
| 0
| 0.163934
| 0.016393
| 0.47541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72320fd783db7905693b184e50b586992cf4d02b
| 2,379
|
py
|
Python
|
abusech/urlhaus.py
|
threatlead/abusech
|
6c62f51f773cb17ac6943d87fb697ce1e9dae049
|
[
"MIT"
] | null | null | null |
abusech/urlhaus.py
|
threatlead/abusech
|
6c62f51f773cb17ac6943d87fb697ce1e9dae049
|
[
"MIT"
] | null | null | null |
abusech/urlhaus.py
|
threatlead/abusech
|
6c62f51f773cb17ac6943d87fb697ce1e9dae049
|
[
"MIT"
] | null | null | null |
from .abusech import AbuseCh
from collections import namedtuple
from datetime import datetime
class UrlHaus(AbuseCh):
base_url = 'https://urlhaus.abuse.ch'
urls = namedtuple('UrlHaus', ['id', 'date_added', 'url', 'url_status', 'threat', 'tags', 'urlhaus_link', 'reporter'])
payloads = namedtuple('Payload', ['timestamp', 'url', 'type', 'md5', 'sha256', 'signature'])
def parse_url_csv(self, urllist):
data = []
for row in urllist:
data.append(self.urls(
id=int(row[0].strip('"')),
date_added=datetime.strptime(row[1].strip('"'), self.date_format),
url=row[2].strip('"'),
url_status=row[3].strip('"'),
threat=row[4].strip('"'),
tags=row[5].strip('"'),
urlhaus_link=row[6].strip('"'),
reporter=row[7].strip('"')
))
return data
def get_data_dump(self):
response = self.get_url(url='{0}/downloads/csv/'.format(self.base_url))
urllist = self.parse_validate_csv(response=response, columns=8)
return self.parse_url_csv(urllist=urllist)
def get_recent_urls(self):
response = self.get_url(url='{0}/downloads/csv_recent/'.format(self.base_url))
urllist = self.parse_validate_csv(response=response, columns=8)
return self.parse_url_csv(urllist=urllist)
def get_online_urls(self):
response = self.get_url(url='{0}/downloads/csv_online/'.format(self.base_url))
urllist = self.parse_validate_csv(response=response, columns=8)
return self.parse_url_csv(urllist=urllist)
def get_payloads(self):
response = self.get_url(url='{0}/downloads/payloads/'.format(self.base_url))
urllist = self.parse_validate_csv(response=response, columns=6)
data = []
for row in urllist:
data.append(self.payloads(
timestamp=datetime.strptime(row[0].strip('"'), self.date_format),
url=row[1].strip('"'),
type=row[2].strip('"').lower(),
md5=row[3].strip('"') if len(row[3].strip('"')) == 32 else None,
sha256=row[4].strip('"') if len(row[4].strip('"')) == 64 else None,
signature=None if row[5].strip('"').lower() == "none" else row[5].strip('"').lower(),
))
return data
| 43.254545
| 121
| 0.584279
| 294
| 2,379
| 4.585034
| 0.231293
| 0.046736
| 0.032641
| 0.05638
| 0.494807
| 0.494807
| 0.457715
| 0.457715
| 0.382789
| 0.354599
| 0
| 0.020624
| 0.245902
| 2,379
| 54
| 122
| 44.055556
| 0.730769
| 0
| 0
| 0.297872
| 0
| 0
| 0.100462
| 0.030685
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106383
| false
| 0
| 0.06383
| 0
| 0.361702
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7233678cd98a3bf61296f7c1aa2006b01024a6ac
| 5,894
|
py
|
Python
|
thorbanks/checks.py
|
Jyrno42/django-thorbanks
|
a8e2daf20b981aecb0c8ee76b0474b6c8e2baad1
|
[
"BSD-3-Clause"
] | 6
|
2015-06-15T12:47:05.000Z
|
2019-04-24T01:32:12.000Z
|
thorbanks/checks.py
|
Jyrno42/django-thorbanks
|
a8e2daf20b981aecb0c8ee76b0474b6c8e2baad1
|
[
"BSD-3-Clause"
] | 13
|
2015-12-23T14:29:26.000Z
|
2021-02-18T18:35:56.000Z
|
thorbanks/checks.py
|
Jyrno42/django-thorbanks
|
a8e2daf20b981aecb0c8ee76b0474b6c8e2baad1
|
[
"BSD-3-Clause"
] | 3
|
2016-08-08T10:35:39.000Z
|
2020-12-29T23:10:55.000Z
|
import os
from django.conf import settings
from django.core.checks import Error, register
from thorbanks.settings import configure, parse_banklinks
@register
def check_model_settings(app_configs, **kwargs):
issues = []
manual_models = getattr(settings, "THORBANKS_MANUAL_MODELS", None)
if manual_models is None: # No manual models
# If no manual models then we need to ensure that `thorbanks_models` is configured correctly
if "thorbanks_models" not in settings.INSTALLED_APPS:
issues.append(
Error(
"thorbanks_models must be added to settings.INSTALLED_APPS when not using THORBANKS_MANUAL_MODELS",
id="thorbanks.E001",
)
)
migration_modules = getattr(settings, "MIGRATION_MODULES", {})
if not migration_modules.get("thorbanks_models", ""):
issues.append(
Error(
"Thorbanks is missing from settings.MIGRATION_MODULES",
hint="Add it to your settings like this - `MIGRATION_MODULES = "
'{ "thorbanks_models": "shop.thorbanks_migrations" }.',
id="thorbanks.E002",
)
)
else:
if manual_models is not None and not isinstance(manual_models, dict):
issues.append(
Error(
"settings.THORBANKS_MANUAL_MODELS must be a dict",
hint="See docstring of thorbanks.settings.get_model.",
id="thorbanks.E003",
)
)
if "thorbanks_models" in settings.INSTALLED_APPS:
issues.append(
Error(
"thorbanks_models should not be added to "
"settings.INSTALLED_APPS when using THORBANKS_MANUAL_MODELS",
id="thorbanks.E011",
)
)
return issues
@register
def check_banklink_settings(app_configs, **kwargs):
issues = []
links = parse_banklinks(getattr(settings, "BANKLINKS", None))
if links and isinstance(links, dict):
# Verify it contains valid data
for bank_name, data in links.items():
if len(bank_name) > 16:
issues.append(
Error(
"settings.BANKLINKS keys are limited to 16 characters ({})".format(
bank_name
),
hint="See docstring of thorbanks.settings.parse_banklinks.",
id="thorbanks.E005",
)
)
if not isinstance(data, dict):
issues.append(
Error(
"settings.BANKLINKS['{}'] must be a dict with settings for the bank".format(
bank_name
),
hint="See docstring of thorbanks.settings.parse_banklinks.",
id="thorbanks.E006",
)
)
continue
required_keys = [
"REQUEST_URL",
"PRIVATE_KEY",
"PUBLIC_KEY",
"CLIENT_ID",
"BANK_ID",
"PROTOCOL",
"PRINTABLE_NAME",
"IMAGE_PATH",
"TYPE",
"ORDER",
]
if data["PROTOCOL"] == "ipizza":
for key in required_keys:
if key not in data or data[key] is None:
issues.append(
Error(
"settings.BANKLINKS['{}']: {} is required".format(
bank_name, key
),
hint="See docstring of thorbanks.settings.parse_banklinks.",
id="thorbanks.E007",
)
)
if data["PUBLIC_KEY"] is not None and not os.path.isfile(
data["PUBLIC_KEY"]
):
issues.append(
Error(
"settings.BANKLINKS['{}']: PUBLIC_KEY file `{}` does not exist".format(
bank_name, data["PUBLIC_KEY"]
),
hint="See docstring of thorbanks.settings.parse_banklinks.",
id="thorbanks.E008",
)
)
if data["PRIVATE_KEY"] is not None and not os.path.isfile(
data["PRIVATE_KEY"]
):
issues.append(
Error(
"settings.BANKLINKS['{}']: PRIVATE_KEY file `{}` does not exist".format(
bank_name, data["PRIVATE_KEY"]
),
hint="See docstring of thorbanks.settings.parse_banklinks.",
id="thorbanks.E009",
)
)
else:
issues.append(
Error(
"settings.BANKLINKS['{}']: PROTOCOL must be ipizza".format(
bank_name
),
hint="See docstring of thorbanks.settings.parse_banklinks.",
id="thorbanks.E010",
)
)
else:
issues.append(
Error(
"settings.BANKLINKS must be a dict",
hint="See docstring of thorbanks.settings.parse_banklinks for reference.",
id="thorbanks.E004",
)
)
configure()
return issues
| 35.293413
| 119
| 0.449779
| 490
| 5,894
| 5.267347
| 0.244898
| 0.051143
| 0.072453
| 0.077489
| 0.496707
| 0.428516
| 0.354901
| 0.328555
| 0.323131
| 0.221232
| 0
| 0.011821
| 0.468951
| 5,894
| 166
| 120
| 35.506024
| 0.81278
| 0.023244
| 0
| 0.345324
| 0
| 0
| 0.280028
| 0.103772
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014388
| false
| 0
| 0.028777
| 0
| 0.057554
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
723547959ebc4a91f17440d870c4a23f152e86d1
| 4,705
|
py
|
Python
|
rm_protection/rm_p.py
|
https-waldoww90-wadewilson-com/rm-protection
|
4dcc678fa687373fb4439c5c4409f7649e653084
|
[
"MIT"
] | 490
|
2017-02-03T14:15:50.000Z
|
2022-03-31T02:57:20.000Z
|
rm_protection/rm_p.py
|
https-waldoww90-wadewilson-com/rm-protection
|
4dcc678fa687373fb4439c5c4409f7649e653084
|
[
"MIT"
] | 8
|
2017-02-03T16:13:53.000Z
|
2017-05-28T05:20:45.000Z
|
rm_protection/rm_p.py
|
alanzchen/rm-protection
|
4dcc678fa687373fb4439c5c4409f7649e653084
|
[
"MIT"
] | 41
|
2017-02-04T15:13:26.000Z
|
2021-12-19T08:58:38.000Z
|
from sys import argv, exit
from os.path import expanduser as expu, expandvars as expv
from os.path import basename, dirname, abspath, isdir, exists
from subprocess import Popen, PIPE
from builtins import input
from rm_protection.config import Config
c = Config()
evaledpaths = []
def pprint(msg):
global c
print(c.rm_prefix + msg)
def ask(evalpath, parent=False):
global evaledpaths
if evalpath in evaledpaths:
return True
else:
with open(evalpath, "r") as f:
question = f.readline().rstrip("\n")
answer = f.readline().rstrip("\n")
try:
flags = f.readline().rstrip("\n")
except:
flags = ''
if parent and 'R' not in flags:
pprint(original_path(evalpath) + ' is protected but flag "R" is missing')
evaledpaths.append(evalpath)
return True
else:
if parent:
pprint('The parent directory ' + original_path(evalpath) + ' is protected')
pprint(original_path(evalpath) + ": " + question)
if input("Answer: ") == answer:
evaledpaths.append(evalpath)
return True
else:
if parent:
return False
else:
pprint("Wrong answer! " + original_path(evalpath) + " will not be removed")
pprint("The answer is stored in " + evalpath)
return False
def original_path(evalpath):
global c
basepath = dirname(evalpath)
filename = basename(evalpath)[1:-len(c.suffix)]
if basepath == '/':
return basepath + filename
else:
return basepath + '/' + filename
def ask_in(q, a):
return bool(input(q) in a)
def gen_evalpaths(path):
paths = {}
path = dirname(path)
while path != '/':
evalpath = gen_eval(path)
paths[path] = evalpath
path = dirname(path)
return paths
def gen_eval(path):
global c
basedir = dirname(path)
if basedir == '/':
basedir = ''
return basedir + "/." + basename(path) + c.suffix
def parent_clear(file_evalpaths, path):
for filepath in file_evalpaths:
parent_eval = file_evalpaths[filepath]
if exists(parent_eval):
if not ask(parent_eval, parent=True):
pprint(path + ' will not be removed')
return False
return True
def rm(rm_args=None):
global c
global evaledpaths
args = ''
paths = []
evalpaths = []
option_end = False
if not rm_args:
rm_args = argv[1:]
for arg in rm_args:
if arg == '--':
option_end = True
elif (arg.startswith("-") and not option_end) or arg in c.invalid:
pass
else:
path = abspath(expv(expu(arg)))
file_evalpaths = gen_evalpaths(path)
evalpath = gen_eval(path)
if c.suffix in arg:
pprint(path + " is a protection file")
if ask_in(q="Do you want to remove it? (y/n) ", a="Yesyes"):
args += arg + ' '
else:
pprint(path + " will not be removed")
continue
if exists(evalpath):
if ask(evalpath):
paths.append(path)
evalpaths.append(evalpath)
else:
continue
if not parent_clear(file_evalpaths, path):
continue
if isdir(path):
find_exec = "find " + path + " -name " + "\".*" + c.suffix + "\"" + " -print"
out, err = Popen(find_exec, shell=True, stdout=PIPE, stderr=PIPE, universal_newlines=True).communicate()
for pfile in iter(out.splitlines()):
pprint("A protected file or directory is found inside " + path)
if not ask(pfile):
pprint("Terminated due to potentially dangerous action")
exit(1)
args += bash_path(arg) + ' '
Popen("rm " + args, shell=True).wait()
remove_protection_files = ''
for evalpath, path in zip(evalpaths, paths):
if exists(evalpath) and not exists(path):
remove_protection_files += bash_path(evalpath) + ' '
if remove_protection_files:
Popen("rm " + remove_protection_files, shell=True).wait()
evaledpaths = []
def bash_path(path):
for sym in "\\#;,\'\"|{}[]() *&?@<>=!":
path = ("\\"+sym).join(path.split(sym))
return path
if __name__ == "__main__":
rm()
| 30.953947
| 120
| 0.530287
| 515
| 4,705
| 4.741748
| 0.264078
| 0.044226
| 0.04095
| 0.019656
| 0.126945
| 0.059787
| 0.038493
| 0.038493
| 0
| 0
| 0
| 0.000998
| 0.361105
| 4,705
| 151
| 121
| 31.15894
| 0.811377
| 0
| 0
| 0.263566
| 0
| 0
| 0.086716
| 0
| 0.007752
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0.007752
| 0.046512
| 0.007752
| 0.217054
| 0.100775
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72381b6de058125b33932e8f4cd988e19b104ff7
| 6,856
|
py
|
Python
|
src/text_normalizer/tokenization/_tokenize.py
|
arkataev/text_normalizer
|
a99326e31012157980d014c9730ac94bd1d18c1d
|
[
"MIT"
] | null | null | null |
src/text_normalizer/tokenization/_tokenize.py
|
arkataev/text_normalizer
|
a99326e31012157980d014c9730ac94bd1d18c1d
|
[
"MIT"
] | null | null | null |
src/text_normalizer/tokenization/_tokenize.py
|
arkataev/text_normalizer
|
a99326e31012157980d014c9730ac94bd1d18c1d
|
[
"MIT"
] | null | null | null |
"""Модуль для создания и работы с токенами"""
import logging
import re
import string
from enum import IntEnum
from functools import lru_cache
from typing import Tuple, Iterator
from nltk.corpus import stopwords
from nltk.tokenize import ToktokTokenizer
from nltk.tokenize.api import TokenizerI
from ..config import RegexConfigType, PipelineConfigType, load_regex_conf, load_conf
__all__ = [
'sent_tokenize',
'TokTok',
'token_type',
'to_token',
'TokenType',
'iTokenTuple',
'russian_stopwords',
'replace_bigrams',
'KILO_POSTFIX',
'init_cache',
'cache_clear',
'get_tokenizer'
]
logger = logging.getLogger('rtn')
# Символ, которым токенизатор будет выделять токены с "тысячным" префиксом (e.g. 5к, 5 к )
KILO_POSTFIX = '%'
russian_stopwords = stopwords.words("russian")
_spaces = string.whitespace
_punct = set(f'{string.punctuation}{"«»…=#-——–``"}{string.whitespace}')
_isolating_punct = {'"', "'", '{', '}', '[', ']', '(', ')', '«', '»'}
_synonyms = load_conf(PipelineConfigType.SYNONIMS)
_regex_time = load_regex_conf(RegexConfigType.TIME)
class TokenType(IntEnum):
"""
Типы токенов.
NB! IntEnum позволяет быстро проверять соответствие типа токена
>>> TokenType.NUM == TokenType.NUM
True
>>> [TokenType.TXT, TokenType.PUNKT] == [TokenType.TXT, TokenType.PUNKT]
True
"""
NONE = 0
TXT = 1
PUNKT = 2
DATE = 3
NUM = 4
TIME = 5
PHONE = 6
EMOJI = 7
URL = 8
EMAIL = 9
PUNKT_ISO = 10 # изолирующая пунктуация (e.g. "", (), [] etc.)
SPACE = 11
CARDNUM = 12
class iTokenTuple(Tuple):
"""
Интерфейс для создания и работы с токенами.
NB! Данный класс НЕ следует использовать в качестве конструктора, т.к это значительно замедлит
создание объектов. Оптимальнее - возвращать из функций, реализующих данный интерфейс, простые
картежи с элементами нужного типа в нужном порядке.
"""
_value: str
_type: TokenType
class RegexTokenType:
"""
Определитель типа токена на основе регулярных выражений.
Проверяет совпадения токена против фиксированного списка регулярных выражений.
Если совпадение найдено, возвращается соответствующий тип токена, иначе - специальный тип TokeType.NONE
>>> tok_rextype = RegexTokenType()
>>> tok_rextype('20.10.2020')
TokenType.DATE
>>> tok_rextype('test@gmail.com')
TokenType.EMAIL
>>> tok_rextype('https://pypi.org/')
TokenType.URL
"""
def __init__(self):
self.regex = {
TokenType.DATE: load_regex_conf(RegexConfigType.DATE),
TokenType.EMAIL: load_regex_conf(RegexConfigType.EMAIL),
TokenType.URL: load_regex_conf(RegexConfigType.URL),
TokenType.TIME: load_regex_conf(RegexConfigType.TIME),
}
def __call__(self, token: str) -> TokenType:
r = self.regex
for key in r:
if r[key].match(token):
return key
return TokenType.NONE
class TokTok(TokenizerI):
"""
В качестве основы используется набор регулярных выражений и упрощенный алгоритм обработки строки
из токенизатора `TokTok <https://www.nltk.org/api/nltk.tokenize.html#module-nltk.tokenize.toktok>`_.
"""
def __init__(self):
self._regexes = ToktokTokenizer.TOKTOK_REGEXES[:]
self._regexes[2] = (_regex_time, r"(\1)")
self._regexes.insert(3, (re.compile(r"(?<![а-яА-Я])([а-яА-Я]{1})(\/)([а-яА-Я]{1})"), r"\1\3 "))
self._regexes.insert(4, (re.compile(r"(\d)(-)([а-яА-Я]+)"), r"\1\3 "))
self._regexes.append((re.compile(r"(-«»)"), r" \1 "))
self._regexes.append((re.compile(r"\s+(-)(\w+)"), r" \1 \2 "))
self._regexes.append((re.compile(r"(\w+)(-)\s"), r" \1 \2 "))
self._regexes.append((re.compile(r"(?<=[а-яА-я])([/\\])"), r" \1 "))
self._regexes.append((re.compile(r"([=…№\-——'\s]+)(\d+)([=…№\-——'\s]+)"), r" \1 \2 \3"))
# Выделение токенов с "тысячным" префиксом (e.g. 5к, 5 к )
self._regexes.append((re.compile(r"(\d)\s?[кk]"), rf"{KILO_POSTFIX}\1{KILO_POSTFIX}"))
self._regexes.append(ToktokTokenizer.FUNKY_PUNCT_2)
def tokenize(self, text: str) -> [str]:
for regexp, subsitution in self._regexes:
text = regexp.sub(subsitution, text)
text = text.strip()
return text.split()
@lru_cache(maxsize=1)
def get_tokenizer() -> TokenizerI:
return TokTok()
@lru_cache(maxsize=1)
def get_regex_type() -> RegexTokenType:
return RegexTokenType()
def sent_tokenize(sentence: str, tokenizer: TokenizerI) -> Iterator[iTokenTuple]:
"""
Создает итератор картежей с токеном и типом токена из предложения
:param sentence: предложение
:param tokenizer: токенизатор поддерживающий интерфейс NLTK-TokenizerI
"""
return map(to_token, tokenizer.tokenize(sentence))
def token_type(token_string: str) -> TokenType:
"""Определить тип токена"""
if not token_string:
return TokenType.NONE
if token_string in _spaces: # "in" works faster then calling a method ' '.isspace()
return TokenType.SPACE
elif token_string in _isolating_punct:
return TokenType.PUNKT_ISO
elif token_string in _punct:
return TokenType.PUNKT
elif token_string.isnumeric():
return TokenType.NUM
rextype = get_regex_type()
type_ = rextype(token_string)
if type_ is not TokenType.NONE:
return type_
return TokenType.TXT
def to_token(token_string: str) -> iTokenTuple:
"""
Создать токен из строки
>>> to_token('.')
('.', TokenType.PUNKT)
>>> to_token('1ый')
('1', TokenType.NUM)
>>> to_token('hello@gmail.com')
('hello@gmail.com', TokenType.EMAIL)
:param token_string: строка без пробелов
"""
return token_string, token_type(token_string)
def replace_bigrams(tokens: Iterator[iTokenTuple]) -> Iterator[iTokenTuple]:
"""
Заменить биграммы на токены из словаря.
Служит для быстрой замены токенов вроде "когда то" на "когда-то", а также прочих биграмм.
>>> from text_normalizer.tokenization import replace_bigrams
>>> replace_bigrams(iter(['окко', TokenType.TXT), ('тв', TokenType.TXT)]))
('окко-тв', TokenType.TXT)
"""
crnt = None
buffer = []
for token, _type in tokens:
crnt, prev = token, crnt
synonym = _synonyms.get(f'{crnt}', crnt)
if prev:
bigram = _synonyms.get(f'{prev} {crnt}')
if bigram:
buffer[-1] = (bigram, _type)
continue
buffer.append((synonym, _type))
yield from buffer
def init_cache():
get_regex_type()
get_tokenizer()
logger.debug('Cache initiated')
def cache_clear():
get_regex_type.cache_clear()
get_tokenizer.cache_clear()
logger.debug('Cache cleared')
| 27.534137
| 107
| 0.641774
| 833
| 6,856
| 5.158463
| 0.337335
| 0.030719
| 0.018618
| 0.02653
| 0.099837
| 0.09495
| 0.038632
| 0.038632
| 0.013963
| 0
| 0
| 0.010092
| 0.219516
| 6,856
| 248
| 108
| 27.645161
| 0.788077
| 0.305134
| 0
| 0.046875
| 0
| 0.007813
| 0.107348
| 0.035857
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.078125
| 0.015625
| 0.429688
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7239365caa1436583482800c75a7cb1d2a4fbe35
| 18,942
|
py
|
Python
|
pi/los.py
|
Coding-Badly/Little-Oven
|
3d1178f495aea1180e25bddbb4f139d8e37e6a65
|
[
"Apache-2.0"
] | null | null | null |
pi/los.py
|
Coding-Badly/Little-Oven
|
3d1178f495aea1180e25bddbb4f139d8e37e6a65
|
[
"Apache-2.0"
] | null | null | null |
pi/los.py
|
Coding-Badly/Little-Oven
|
3d1178f495aea1180e25bddbb4f139d8e37e6a65
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""=============================================================================
los for Little-Oven. los (Little Oven Setup) prepares a Raspberry Pi for
Little-Oven development. This module does the actual work. los (no
extension) is a bash script that creates a service that runs this code.
Running the following puts the whole mess in motion...
curl -s "https://raw.githubusercontent.com/Coding-Badly/Little-Oven/master/pi/los" | bash
journalctl -u los.service
----------------------------------------------------------------------------
Copyright 2019 Brian Cook (aka Coding-Badly)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
============================================================================="""
import grp
import json
import os
import pathlib
import pwd
import requests
import stat
import subprocess
import time
import uuid
class CurrentStepManager():
def __init__(self):
self._path_step = pathlib.Path('los.step')
self._current_step = None
def get_current_step(self):
if self._current_step is None:
try:
current_step_text = self._path_step.read_text()
self._current_step = int(current_step_text)
except FileNotFoundError:
self._current_step = 1
return self._current_step
def increment_current_step(self):
_ = self.get_current_step()
self._current_step += 1
self._path_step.write_text(str(self._current_step))
class DirectoryMaker():
def __init__(self, default_final_mode=0o700):
self._default_final_mode = default_final_mode
self._uid = pwd.getpwnam("pi").pw_uid
self._gid = grp.getgrnam("pi").gr_gid
def mkdir(self, path, parents=False, final_mode=None):
final_mode = self._default_final_mode if final_mode is None else final_mode
path.mkdir(mode=0o777, parents=parents, exist_ok=True)
os.chown(str(path), self._uid, self._gid)
path.chmod(final_mode)
def chown(self, path):
os.chown(str(path), self._uid, self._gid)
def wall(text):
subprocess.run(['wall',text], check=True)
def wall_and_print(text, step=None):
if step is not None:
text = 'Step #{}: {}'.format(int(step), text)
wall(text)
print(text)
def update_then_upgrade():
time.sleep(5.0)
wall('Update the APT package list.')
subprocess.run(['apt-get','-y','update'], check=True)
wall('Upgrade APT packages.')
subprocess.run(['apt-get','-y','upgrade'], check=True)
def simple_get(source_url, destination_path):
r = requests.get(source_url, stream=True)
r.raise_for_status()
with destination_path.open('wb') as f:
for chunk in r.iter_content(64*1024):
f.write(chunk)
def check_global_config():
global global_config
if path_los_json.exists():
with path_los_json.open() as f:
global_config = json.load(f)
else:
global_config = dict()
csm = CurrentStepManager()
path_los_json = pathlib.Path('los.json')
check_global_config()
MODE_EXECUTABLE = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH
need_reboot = False
go_again = True
while go_again:
go_again = False
if csm.get_current_step() == 1:
wall_and_print('Ensure the operating system is up-to-date.', csm.get_current_step())
update_then_upgrade()
need_reboot = True
csm.increment_current_step()
elif csm.get_current_step() == 2:
wall_and_print('Install Git.', csm.get_current_step())
subprocess.run(['apt-get','-y','install','git'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 3:
wall_and_print('Install Python development.', csm.get_current_step())
subprocess.run(['apt-get','-y','install','python3-dev'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 4:
wall_and_print('Ensure the operating system is up-to-date again.', csm.get_current_step())
update_then_upgrade()
need_reboot = True
csm.increment_current_step()
elif csm.get_current_step() == 5:
wall_and_print('Install pip.', csm.get_current_step())
path_get_pip = pathlib.Path('get-pip.py')
simple_get('https://bootstrap.pypa.io/get-pip.py', path_get_pip)
subprocess.run(['python3',str(path_get_pip)], check=True)
path_get_pip.unlink()
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 6:
wall_and_print('Install Python modules required by this module.', csm.get_current_step())
subprocess.run(['pip','install', 'xkcdpass'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 7:
wall_and_print('Get the global configuration file.', csm.get_current_step())
base_url = os.environ.get('LOS_BASE_URL', 'https://raw.githubusercontent.com/Coding-Badly/Little-Oven/master/pi')
get_this = base_url + '/' + 'los.json'
try:
simple_get(get_this, path_los_json)
except requests.exceptions.HTTPError:
pass
check_global_config()
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 8:
wall_and_print('Set the password using the https://xkcd.com/936/ technique.', csm.get_current_step())
from xkcdpass import xkcd_password as xp
wordfile = xp.locate_wordfile()
mywords = xp.generate_wordlist(wordfile=wordfile, min_length=5, max_length=8)
new_password = xp.generate_xkcdpassword(mywords, delimiter=',', numwords=3)
wall_and_print(' The new password is...')
wall_and_print(' {}'.format(new_password))
# fix: Send the new password to a repository.
new_password = 'whatever' # rmv
pi_new_password = ('pi:' + new_password).encode('ascii')
subprocess.run("chpasswd", input=pi_new_password, check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 9:
wall_and_print('Change the hostname.', csm.get_current_step())
path_hostname = pathlib.Path('/etc/hostname')
path_hostname.write_text('Little-Oven\n')
subprocess.run(['sed','-i',"s/raspberrypi/Little-Oven/",'/etc/hosts'], check=True)
need_reboot = True
csm.increment_current_step()
elif csm.get_current_step() == 10:
wall_and_print('Change the timezone.', csm.get_current_step())
# Why localtime has to be removed...
# https://bugs.launchpad.net/ubuntu/+source/tzdata/+bug/1554806
# date "+%Z %z"
pathlib.Path('/etc/timezone').write_text('America/Chicago\n')
pathlib.Path('/etc/localtime').unlink()
subprocess.run(['dpkg-reconfigure','-f','noninteractive','tzdata'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 11:
wall_and_print('Change the keyboard layout.', csm.get_current_step())
# debconf-get-selections | grep keyboard-configuration
# The top entry is suspect. "gb" was the value after changing
# keyboards using dpkg-reconfigure.
keyboard_conf = """
keyboard-configuration\tkeyboard-configuration/xkb-keymap\tselect\tus
keyboard-configuration\tkeyboard-configuration/layoutcode\tstring\tus
keyboard-configuration\tkeyboard-configuration/layout\tselect\tEnglish (US)
keyboard-configuration\tkeyboard-configuration/variant\tselect\tEnglish (US)
""".encode("ascii")
subprocess.run("debconf-set-selections", input=keyboard_conf, check=True)
subprocess.run(['dpkg-reconfigure','-f','noninteractive','keyboard-configuration'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 12:
wall_and_print('Change the locale.', csm.get_current_step())
# locale
locale_conf = """
locales\tlocales/locales_to_be_generated\tmultiselect\ten_US.UTF-8 UTF-8
locales\tlocales/default_environment_locale\tselect\ten_US.UTF-8
""".encode("ascii")
subprocess.run("debconf-set-selections", input=locale_conf, check=True)
subprocess.run(['sed','-i',"s/^# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/",'/etc/locale.gen'], check=True)
subprocess.run(['dpkg-reconfigure','-f','noninteractive','locales'], check=True)
subprocess.run(['update-locale','LANG=en_US.UTF-8'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 13:
wall_and_print('Configure Git.', csm.get_current_step())
this_mac = format(uuid.getnode(), 'X')
config_by_this_mac = global_config.get(this_mac, None)
config_github = config_by_this_mac.get('github', None) if config_by_this_mac else None
if config_github:
# Set basic Git configuration.
git_user_name = config_github.get('user.name', 'Git User Name Goes Here')
git_user_email = config_github.get('user.email', 'whomever@dallasmakerspace.org')
git_core_editor = config_github.get('core.editor', 'nano')
subprocess.run(['git','config','--system','user.name',git_user_name], check=True)
subprocess.run(['git','config','--system','user.email',git_user_email], check=True)
subprocess.run(['git','config','--system','core.editor',git_core_editor], check=True)
# Ensure the .ssh directory exists.
path_dot_ssh = pathlib.Path('/home/pi/.ssh')
# https://superuser.com/questions/215504/permissions-on-private-key-in-ssh-folder
dm = DirectoryMaker()
dm.mkdir(path_dot_ssh)
# Add a Github section to the .ssh/config file.
path_ssh_config = path_dot_ssh / 'config'
with path_ssh_config.open('at') as f:
f.write('Host github.com\n')
f.write(' User git\n')
f.write(' Hostname github.com\n')
f.write(' PreferredAuthentications publickey\n')
f.write(' IdentityFile ~/.ssh/github/id_rsa\n')
dm.chown(path_ssh_config)
# Create a github subdirectory for the Github key pair.
path_github = path_dot_ssh / 'github'
dm.mkdir(path_github)
# Generate the Github key pair.
path_id_rsa = path_github / 'id_rsa'
# ssh-keygen -t rsa -C "arduino.tiny@gmail.com" -b 1024 -N '' -f ~/.ssh/github/id_rsa
subprocess.run(['ssh-keygen','-t','rsa','-C',git_user_email,'-b','4096','-N','','-f',str(path_id_rsa)], check=True)
dm.chown(path_id_rsa)
dm.chown(path_id_rsa.with_suffix('.pub'))
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 14:
# wall_and_print('Install PiFace Digital 2 packages from GitHub.', csm.get_current_step())
# # Common
# subprocess.run(['git','clone','git://github.com/piface/pifacecommon.git','/home/pi/python-things/pifacecommon'], check=True)
# subprocess.run(['python3','/home/pi/python-things/pifacecommon/setup.py','install'], cwd='/home/pi/python-things/pifacecommon/', check=True)
# #subprocess.run(['rm','-rf','/home/pi/python-things/pifacecommon'], check=True)
# # Digital I/O
# subprocess.run(['git','clone','git://github.com/piface/pifacedigitalio.git','/home/pi/python-things/pifacedigitalio'], check=True)
# subprocess.run(['python3','/home/pi/python-things/pifacedigitalio/setup.py','install'], cwd='/home/pi/python-things/pifacedigitalio/', check=True)
# #subprocess.run(['rm','-rf','/home/pi/python-things/pifacedigitalio'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 15:
# wall_and_print('Install python-dispatch package from GitHub.', csm.get_current_step())
# subprocess.run(['git','clone','https://github.com/Coding-Badly/python-dispatch.git','/home/pi/python-things/python-dispatch'], check=True)
# subprocess.run(['python3','/home/pi/python-things/python-dispatch/setup.py','install'], cwd='/home/pi/python-things/python-dispatch/', check=True)
# #subprocess.run(['rm','-rf','/home/pi/python-dispatch'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 16:
wall_and_print('Clone the Little Oven.', csm.get_current_step())
# git clone git@github.com:Coding-Badly/Little-Oven.git /home/pi/Little-Oven
# git clone https://github.com/Coding-Badly/Little-Oven.git /home/pi/Little-Oven
subprocess.run(['git','clone','https://github.com/Coding-Badly/Little-Oven.git','/home/pi/Little-Oven'], check=True)
try:
subprocess.run(['git','checkout','-t','remotes/origin/master'], cwd='/home/pi/Little-Oven', stderr=subprocess.PIPE, check=True)
except subprocess.CalledProcessError as exc:
if not "already exists" in exc.stderr.decode("utf-8"):
raise
# Change the remote url to use ssh.
# git remote set-url origin git@github.com:Coding-Badly/Little-Oven.git
subprocess.run(['git','remote','set-url','origin','git@github.com:Coding-Badly/Little-Oven.git'], cwd='/home/pi/Little-Oven', check=True)
# Use pip to install dependencies.
path_requirements = pathlib.Path('/home/pi/Little-Oven/requirements.txt')
if path_requirements.exists():
subprocess.run(['pip','install','-U','-r',str(path_requirements)], check=True)
# Fix ownership of the Little-Oven repository.
subprocess.run(['chown','-R','pi:pi','/home/pi/Little-Oven'], check=True)
# Prepare the cache directory.
dm = DirectoryMaker(default_final_mode=0o755)
path_cache = pathlib.Path('/var/cache/Rowdy Dog Software/Little-Oven/pans')
dm.mkdir(path_cache, parents=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 17:
# wall_and_print('Install PiFace Digital 2 initialization service.', csm.get_current_step())
# subprocess.run(['cp','/home/pi/Little-Oven/pi/init_PiFace_Digital_2.service','/etc/systemd/system/init_PiFace_Digital_2.service'], check=True)
# subprocess.run(['systemctl','enable','init_PiFace_Digital_2.service'], check=True)
# need_reboot = True
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 18:
wall_and_print('Configure Rust to be easily installed.', csm.get_current_step())
# Download rustup.sh to a common location and make it Read + Execute
# for everyone. Writable for the owner (root).
path_rustup_sh = pathlib.Path('/usr/local/bin/rustup.sh')
simple_get('https://sh.rustup.rs', path_rustup_sh)
path_rustup_sh.chmod(MODE_EXECUTABLE)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 19:
wall_and_print('Install FUSE (support for VeraCrypt).', csm.get_current_step())
subprocess.run(['apt-get','-y','install','fuse'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 20:
wall_and_print('Configure VeraCrypt to be easily installed.', csm.get_current_step())
# Prepare a directory for the VeraCrypt files.
dm = DirectoryMaker(default_final_mode=0o755)
path_temp = pathlib.Path('./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW')
dm.mkdir(path_temp, parents=True)
# Download the install script
path_tar_bz2 = path_temp / 'veracrypt-setup.tar.bz2'
simple_get('https://launchpad.net/veracrypt/trunk/1.21/+download/veracrypt-1.21-raspbian-setup.tar.bz2', path_tar_bz2)
# Extract the contents
subprocess.run(['tar','xvfj',str(path_tar_bz2),'-C',str(path_temp)], check=True)
path_src = path_temp / 'veracrypt-1.21-setup-console-armv7'
path_dst = pathlib.Path('/usr/local/bin/veracrypt-setup')
# Copy the console setup to a location on the PATH
subprocess.run(['cp',str(path_src),str(path_dst)], check=True)
# Remove the temporary directory
subprocess.run(['rm','-rf',str(path_temp)], check=True)
# Run the install script
#subprocess.run(['bash',str(path_setup),'--quiet'], check=True)
# mkdir veracrypt_CErQ2nnwvZCVeKQHhLV24TWW
# wget --output-document=./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW/veracrypt-setup.tar.bz2 https://launchpad.net/veracrypt/trunk/1.21/+download/veracrypt-1.21-raspbian-setup.tar.bz2
# tar xvfj ./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW/veracrypt-setup.tar.bz2 -C ./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW
# ./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW/veracrypt-1.21-setup-console-armv7 --check
# ./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW/veracrypt-1.21-setup-console-armv7 --quiet
# rm -rf veracrypt_CErQ2nnwvZCVeKQHhLV24TWW
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 21:
wall_and_print('Check for Rust and VeraCrypt after login.', csm.get_current_step())
# Write the following to /etc/profile.d/check_for_rust_and_veracrypt.sh and make it
# executable.
check_for_rust_and_veracrypt = """#!/bin/bash
if [ ! -e $HOME/.cargo ]; then
rustup.sh -y
fi
if ! command -v veracrypt; then
veracrypt-setup
fi
"""
path_check_for = pathlib.Path('/etc/profile.d/check_for_rust_and_veracrypt.sh')
path_check_for.write_text(check_for_rust_and_veracrypt)
path_check_for.chmod(MODE_EXECUTABLE)
go_again = True
csm.increment_current_step()
#elif csm.get_current_step() == 20:
# wall_and_print('One last reboot for good measure.', csm.get_current_step())
# need_reboot = True
# csm.increment_current_step()
# fix: Configure Little-Oven to automatically run on boot.
else:
wall_and_print('Little-Oven installed. Disabling the los service.')
subprocess.run(['systemctl','disable','los.service'], check=True)
if need_reboot:
wall_and_print('REBOOT!')
time.sleep(5.0)
subprocess.run(['reboot'], check=True)
| 50.244032
| 184
| 0.658853
| 2,508
| 18,942
| 4.775917
| 0.192982
| 0.071631
| 0.053765
| 0.062448
| 0.415929
| 0.344882
| 0.309401
| 0.279095
| 0.230172
| 0.197278
| 0
| 0.011435
| 0.196706
| 18,942
| 376
| 185
| 50.37766
| 0.775762
| 0.268821
| 0
| 0.227106
| 0
| 0.003663
| 0.221746
| 0.064963
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040293
| false
| 0.03663
| 0.040293
| 0
| 0.091575
| 0.087912
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
723b9095a8d15e2c9c1b3f5d5be4c81a6f6e858e
| 2,304
|
py
|
Python
|
streamlit_app.py
|
fhebal/nlp-medical-notes
|
f1fed9e34ba47da14220b5719f28c1e720302f45
|
[
"MIT"
] | null | null | null |
streamlit_app.py
|
fhebal/nlp-medical-notes
|
f1fed9e34ba47da14220b5719f28c1e720302f45
|
[
"MIT"
] | null | null | null |
streamlit_app.py
|
fhebal/nlp-medical-notes
|
f1fed9e34ba47da14220b5719f28c1e720302f45
|
[
"MIT"
] | null | null | null |
import streamlit as st
import yaml
from load_css import local_css
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text as text
import numpy as np
from random import sample
import os
local_css("style.css")
prediction_key = {
0:'Gastroenterology',
1:'Neurology',
2:'Orthopedic',
3:'Radiology',
4:'Urology'
}
class Highlighter():
def __init__(self):
self.start = "<span class='highlight blue'><span class='bold'>"
self.end = "</span></span>"
def highlight_match(self, text, config):
for value in config:
text = text.replace(" "+value+" ", "{0}"+value+"{1}")
text = "<div>" + text.format(self.start, self.end) + "</div>"
return text
# Load model from file
model = tf.keras.models.load_model('/home/muody/saved_model/my_model', compile=False)
# load data
def load_data():
data_path = '/home/muody/data/medicalnotes/dataset/unlabeled-test-data/'
files = os.listdir(data_path)
sample_file = data_path + sample(files, 1)[0]
with open(sample_file, 'r') as stream:
sample_data = stream.read()
sample_data = sample_data.replace('\n','')
sample_data = sample_data.replace('</B>','')
sample_data = sample_data.replace('<B>','')
return sample_data
def main():
# INPUT DATA
#sample = st.text_input('Input your sentence here:')
sample = load_data()
prediction_arr = tf.sigmoid(model.predict(tf.convert_to_tensor([sample]))).numpy()
prediction_num = np.argmax(prediction_arr)
prediction = prediction_key[prediction_num]
prediction_text = "<div>Prediction: <span class='highlight red'><span class='bold'>" + prediction + '</span></span></div>'
st.markdown(prediction_text, unsafe_allow_html=True)
st.write('\n')
for key, value in prediction_key.items():
st.write(value, prediction_arr[0][key])
label = prediction_num
with open("config/{}.yaml".format(label), 'r') as stream:
try:
config = stream.read().splitlines()
except yaml.YAMLError as exc:
print(exc)
highlighter = Highlighter()
t = highlighter.highlight_match(sample, config)
st.markdown(t, unsafe_allow_html=True)
if st.button("New Text Sample"):
main()
| 29.538462
| 126
| 0.647569
| 300
| 2,304
| 4.816667
| 0.36
| 0.055363
| 0.033218
| 0.041522
| 0.057439
| 0.038754
| 0
| 0
| 0
| 0
| 0
| 0.005501
| 0.210938
| 2,304
| 77
| 127
| 29.922078
| 0.789329
| 0.039931
| 0
| 0
| 0
| 0
| 0.161758
| 0.040779
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.155172
| 0
| 0.275862
| 0.017241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
723e3c60c657572c4703c5d71bdcbccb656fe914
| 18,265
|
py
|
Python
|
src/elora/elora.py
|
morelandjs/elora
|
e902c40d66b0bf95a8d2374afa0cc165b87c9b82
|
[
"MIT"
] | 1
|
2021-07-26T20:36:32.000Z
|
2021-07-26T20:36:32.000Z
|
src/elora/elora.py
|
morelandjs/elora
|
e902c40d66b0bf95a8d2374afa0cc165b87c9b82
|
[
"MIT"
] | null | null | null |
src/elora/elora.py
|
morelandjs/elora
|
e902c40d66b0bf95a8d2374afa0cc165b87c9b82
|
[
"MIT"
] | null | null | null |
from operator import add, sub
import numpy as np
from scipy.stats import norm
class Elora:
def __init__(self, times, labels1, labels2, values, biases=0):
"""
Elo regressor algorithm for paired comparison time series prediction
Author: J. Scott Moreland
Args:
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison outcome values
biases (array of float or scalar, optional): comparison bias
corrections
Attributes:
examples (np.recarray): time-sorted numpy record array of
(time, label1, label2, bias, value, value_pred) samples
first_update_time (np.datetime64): time of the first comparison
last_update_time (np.datetime64): time of the last comparison
labels (array of string): unique compared entity labels
median_value (float): median expected comparison value
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
values = np.array(values, dtype='float', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
self.first_update_time = times.min()
self.last_update_time = times.max()
self.labels = np.union1d(labels1, labels2)
self.median_value = np.median(values)
prior = self.median_value * np.ones_like(values, dtype=float)
self.examples = np.sort(
np.rec.fromarrays([
times,
labels1,
labels2,
biases,
values,
prior,
], names=(
'time',
'label1',
'label2',
'bias',
'value',
'value_pred'
)), order=['time', 'label1', 'label2'], axis=0)
@property
def initial_rating(self):
"""
Customize this function for a given subclass.
It computes the initial rating, equal to the rating one would
expect if all labels were interchangeable.
Default behavior is to return one-half the median outcome value
if the labels commute, otherwise 0.
"""
return .5*self.median_value if self.commutes else 0
def regression_coeff(self, elapsed_time):
"""
Customize this function for a given subclass.
It computes the regression coefficient—prefactor multiplying the
rating of each team evaluated at each update—as a function of
elapsed time since the last rating update for that label.
Default behavior is to return 1, i.e. no rating regression.
"""
return 1.0
def evolve_rating(self, rating, elapsed_time):
"""
Evolves 'state' to 'time', applying rating regression if necessary,
and returns the evolved rating.
Args:
state (dict): state dictionary {'time': time, 'rating': rating}
time (np.datetime64): time to evaluate state
Returns:
state (dict): evolved state dictionary
{'time': time, 'rating': rating}
"""
regress = self.regression_coeff(elapsed_time)
return regress * rating + (1.0 - regress) * self.initial_rating
def fit(self, k, commutes, scale=1, burnin=0):
"""
Primary routine that performs model calibration. It is called
recursively by the `fit` routine.
Args:
k (float): coefficient that multiplies the prediction error to
determine the rating update.
commutes (bool): false if the observed values change sign under
label interchange and true otheriwse.
"""
self.commutes = commutes
self.scale = scale
self.commutator = 0. if commutes else self.median_value
self.compare = add if commutes else sub
record = {label: [] for label in self.labels}
prior_state_dict = {}
for idx, example in enumerate(self.examples):
time, label1, label2, bias, value, value_pred = example
default = (time, self.initial_rating)
prior_time1, prior_rating1 = prior_state_dict.get(label1, default)
prior_time2, prior_rating2 = prior_state_dict.get(label2, default)
rating1 = self.evolve_rating(prior_rating1, time - prior_time1)
rating2 = self.evolve_rating(prior_rating2, time - prior_time2)
value_pred = self.compare(rating1, rating2) + self.commutator + bias
self.examples[idx]['value_pred'] = value_pred
rating_change = k * (value - value_pred)
rating1 += rating_change
rating2 += rating_change if self.commutes else -rating_change
record[label1].append((time, rating1))
record[label2].append((time, rating2))
prior_state_dict[label1] = (time, rating1)
prior_state_dict[label2] = (time, rating2)
for label in record.keys():
record[label] = np.rec.array(
record[label], dtype=[
('time', 'datetime64[s]'), ('rating', 'float')])
self.record = record
residuals = np.rec.fromarrays([
self.examples.time,
self.examples.value - self.examples.value_pred
], names=('time', 'residual'))
return residuals
def get_rating(self, times, labels):
"""
Query label state(s) at the specified time accounting
for rating regression.
Args:
times (array of np.datetime64): Comparison datetimes
labels (array of string): Comparison entity labels
Returns:
rating (array): ratings for each time and label pair
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels = np.array(labels, dtype='str', ndmin=1)
ratings = np.empty_like(times, dtype='float')
for idx, (time, label) in enumerate(zip(times, labels)):
try:
label_record = self.record[label]
index = label_record.time.searchsorted(time)
prev_index = max(index - 1, 0)
prior_state = label_record[prev_index]
rating = self.evolve_rating(
prior_state.rating, time - prior_state.time)
except KeyError:
rating = self.initial_rating
ratings[idx] = rating
return ratings
def cdf(self, x, times, labels1, labels2, biases=0):
"""
Computes the comulative distribution function (CDF) for each
comparison, i.e. prob(value < x).
Args:
x (array of float): threshold of comparison for each value
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison value observed outcomes
biases (array of float): comparison bias correct factors,
default value is 0
Returns:
y (array of float): cumulative distribution function value
for each input
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
loc = self.compare(ratings1, ratings2) + self.commutator + biases
return norm.cdf(x, loc=loc, scale=self.scale)
def sf(self, x, times, labels1, labels2, biases=0):
"""
Computes the survival function (SF) for each
comparison, i.e. prob(value > x).
Args:
x (array of float): threshold of comparison for each value
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison value observed outcomes
biases (array of float): comparison bias correct factors,
default value is 0
Returns:
y (array of float): survival function value for each input
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
loc = self.compare(ratings1, ratings2) + self.commutator + biases
return np.squeeze(norm.sf(x, loc=loc, scale=self.scale))
def pdf(self, x, times, labels1, labels2, biases=0):
"""
Computes the probability distribution function (PDF) for each
comparison, i.e. P(x).
Args:
x (array of float): input values
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison value observed outcomes
biases (array of float): comparison bias correct factors,
default value is 0
Returns:
y (array of float): probability density at each input
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
loc = self.compare(ratings1, ratings2) + self.commutator + biases
return np.squeeze(norm.pdf(x, loc=loc, scale=self.scale))
def percentile(self, p, times, labels1, labels2, biases=0):
"""
Computes percentiles p of the probability distribution.
Args:
p (array of float): percentiles to evaluate (in range [0, 100])
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison value observed outcomes
biases (array of float): comparison bias correct factors,
default value is 0
Returns:
x (array of float): values of the distribution corresponding to
each percentile
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
loc = self.compare(ratings1, ratings2) + self.commutator + biases
p = np.true_divide(p, 100.0)
if np.count_nonzero(p < 0.0) or np.count_nonzero(p > 1.0):
raise ValueError("percentiles must be in the range [0, 100]")
return np.squeeze(norm.ppf(p, loc=loc, scale=self.scale))
def quantile(self, q, times, labels1, labels2, biases=0):
"""
Computes quantiles q of the probability distribution.
Same as percentiles but accepts values [0, 1].
Args:
q (array of float): quantiles to evaluate (in range [0, 1])
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison value observed outcomes
biases (array of float): comparison bias correct factors,
default value is 0
Returns:
x (array of float): values of the distribution corresponding to
each quantile
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
loc = self.compare(ratings1, ratings2) + self.commutator + biases
return np.squeeze(
norm.ppf(q, loc=loc[:, np.newaxis], scale=self.scale))
def mean(self, times, labels1, labels2, biases=0):
"""
Computes the mean of the probability distribution.
Args:
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison value observed outcomes
biases (array of float): comparison bias correct factors,
default value is 0
Returns:
y (array of float): mean of the probability distribution
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
loc = self.compare(ratings1, ratings2) + self.commutator + biases
return np.squeeze(loc)
def residuals(self, y_true=None, standardize=False):
"""
Computes residuals of the model predictions for each training example
Args:
standardize (bool): if True, the residuals are standardized to unit
variance
Returns:
residuals (array of float): residuals for each example
"""
y_pred = self.mean(
self.examples.time,
self.examples.label1,
self.examples.label2,
self.examples.bias)
if y_true is None:
y_true = self.examples.value
residuals = y_true - y_pred
if standardize is True:
quantiles = [.159, .841]
qlo, qhi = self.quantile(
quantiles,
self.examples.time,
self.examples.label1,
self.examples.label2,
self.examples.bias
).T
residuals /= .5*abs(qhi - qlo)
return residuals
def rank(self, time):
"""
Ranks labels by comparing mean of each label to the average label.
Args:
time (np.datetime64): time at which the ranking should be computed.
Returns:
label rankings (list of tuples): returns a rank sorted list of
(label, rank) pairs, where rank is the comparison value of
the specified summary statistic.
"""
ranked_list = [
(label, self.get_rating(time, label).item())
for label in self.labels]
return sorted(ranked_list, key=lambda v: v[1], reverse=True)
def sample(self, times, labels1, labels2, biases=0, size=1):
"""
Draw random samples from the predicted comparison probability
distribution.
Args:
times (array_like of np.datetime64): list of datetimes.
labels1 (array_like of string): list of first entity labels.
labels2 (array_like of string): list of second entity labels.
biases (array_like of float, optional): single bias number or
list of bias numbers which match the comparison inputs.
Default is 0, in which case no bias is used.
size (int, optional): number of samples to be drawn.
default is 1, in which case a single value is returned.
Returns:
x (array of float): random samples for the comparison outcome
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
if size < 1 or not isinstance(size, int):
raise ValueError("sample size must be a positive integer")
loc = self.compare(ratings1, ratings2) + self.commutator + biases
return norm.rvs(loc=loc, scale=self.scale, size=size)
| 36.750503
| 80
| 0.594854
| 2,170
| 18,265
| 4.957604
| 0.136866
| 0.033835
| 0.030117
| 0.022123
| 0.556051
| 0.52045
| 0.496375
| 0.470998
| 0.466629
| 0.451292
| 0
| 0.02295
| 0.312948
| 18,265
| 496
| 81
| 36.824597
| 0.83417
| 0.373446
| 0
| 0.42233
| 0
| 0
| 0.043626
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072816
| false
| 0
| 0.014563
| 0
| 0.160194
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
723fcadfa719088f86b59d8093c6f9655d115794
| 48,147
|
py
|
Python
|
steady_cell_phenotype/poly.py
|
knappa/steadycellphenotype
|
b033f01ebc1fa062d310296f19f2f11b484cb557
|
[
"MIT"
] | 1
|
2021-12-13T22:20:19.000Z
|
2021-12-13T22:20:19.000Z
|
steady_cell_phenotype/poly.py
|
knappa/steadycellphenotype
|
b033f01ebc1fa062d310296f19f2f11b484cb557
|
[
"MIT"
] | 5
|
2021-04-07T01:47:19.000Z
|
2021-11-17T01:46:19.000Z
|
steady_cell_phenotype/poly.py
|
knappa/steadycellphenotype
|
b033f01ebc1fa062d310296f19f2f11b484cb557
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import operator
from enum import Enum
from itertools import product
from typing import Dict, Union
import numpy as np
class Operation(Enum):
PLUS = 'PLUS'
MINUS = 'MINUS'
TIMES = 'TIMES'
EXP = 'EXP'
MAX = 'MAX'
MIN = 'MIN'
CONT = 'CONT'
NOT = 'NOT'
####################################################################################################
def h(x, fx):
"""helper function as in the PLoS article, doi:10.1371/journal.pcbi.1005352.t003 pg 16/24"""
fx = fx % 3
x = x % 3
if fx > x:
return x + 1
elif fx < x:
return x - 1
else:
return x
####################################################################################################
# monomial and sparse polynomial classes. These should be faster than the sympy versions due to
# their reduced scope.
####################################################################################################
class Expression(object):
def __add__(self, other):
return BinaryOperation('PLUS', self, other)
__radd__ = __add__
def __sub__(self, other):
return BinaryOperation('MINUS', self, other)
def __mul__(self, other):
return BinaryOperation('TIMES', self, other)
__rmul__ = __mul__
def __neg__(self):
return UnaryRelation('MINUS', self)
def __pow__(self, power, modulo=None):
return BinaryOperation('EXP', self, power)
# def __divmod__(self, other):
# raise NotImplementedError("division, modulus not implemented")
# def __truediv__(self, other):
# raise NotImplementedError("truediv not implemented")
# def __floordiv__(self, other):
# raise NotImplementedError("floordiv not implemented")
def eval(self, variable_dict):
"""
evaluates the expression. variable_dict is expected to be a dict containing str:Expression or
Monomial:Expression pairs. The latter are constrained to be of single-variable type.
:param variable_dict: a dictionary of taking either single-term monomials or string (variable names) to ints
:return: evaluated expression
"""
raise NotImplementedError("eval() unimplemented in " + str(type(self)))
def is_constant(self):
raise NotImplementedError("is_constant() unimplemented in " + str(type(self)))
def as_c_expression(self):
raise NotImplementedError("as_c_expression() unimplemented in " + str(type(self)))
def as_polynomial(self) -> Union[int, Expression]:
raise NotImplementedError("as_polynomial() unimplemented in " + str(type(self)))
# def as_sympy(self):
# """
# converts to sympy expression
#
# Returns
# -------
# sympy expression
# """
# raise NotImplementedError("as_sympy() unimplemented in " + str(type(self)))
def as_numpy_str(self, variables) -> str:
"""
returns numpy-based function of variables, with order corresponding to that
given in the variables parameter
Parameters
----------
variables
Returns
-------
lambda with len(variables) parameters
"""
raise NotImplementedError("as_numpy_str() unimplemented in " + str(type(self)))
def get_variable_set(self):
""" returns a set containing all variable which occur in this expression """
raise NotImplementedError("get_var_set() unimplemented in " + str(type(self)))
def num_variables(self):
""" returns the number of variables which occur in this expression """
return len(self.get_variable_set())
def rename_variables(self, name_dict: Dict[str, str]):
""" rename variables """
raise NotImplementedError("rename_variables() unimplemented in " + str(type(self)))
def continuous_function_version(self, control_variable):
"""
Wrap this equation with the 'continuity controller' i.e. return CONT(control_variable,self)
:param control_variable: variable or string
:return: functional continuous version
"""
if self.is_constant():
return self
if isinstance(control_variable, str):
control_variable = Monomial.as_var(control_variable)
return Function('CONT', [control_variable, self])
####################################################################################################
#
# the following method converts a system of equations into one which is "continuous" in the sense
# that application of the system does not change the per-coordinate values by more than 1. This is
# accomplished by a type of curve fitting. Fortunately, the formula for this
#
# g(x) = sum_{c\in \F_3^n} h(c) prod_{j=0}^n (1-(x_j-c_j)**2)
#
# (as seen in the PLoS article, doi:10.1371/journal.pcbi.1005352.t003 pg 16/24) admits a recursive
# formulation. That is, for a polynomial x_k = f_k(x_0,x_1,...,x_l) we can select one of the
# variables, say x_0 and reduce the polynomial each of 3-ways x_0=0, x_0=1, and x_0=2. This
# correspondingly divides the sum into those which have each of the 3 types of terms
# (1-(x_0-c_0)**2) for c_0=0, c_0=1, and c_0=2
#
# fortunately, (1-(x_j-0)**2)+(1-(x_j-1)**2)+(1-(x_j-2)**2) = 1 so if the evaluations of f become
# constant or even simply eliminate a variable, we need no longer consider that variable.
#
# recursion proceeds by eliminating variables in this manner, multiplying by the appropriate fitting
# term (1-(x_j-c_j)**2) (c_j being the evaluated value of x_j) on the way up.
#
# this comment is not really the place for a full proof of this method, but the proof is easily
# obtained from the above.
#
####################################################################################################
def continuous_polynomial_version(self, control_variable):
if self.is_constant():
return self
if isinstance(control_variable, str):
control_variable = Monomial.as_var(control_variable)
# as the control variable is special (due to use in the 'h' function),
# we will need to go through the procedure for it separately, first
accumulator = Mod3Poly.zero()
for control_variable_value in range(3):
evaluated_poly = self.eval({control_variable: control_variable_value})
if is_integer(evaluated_poly) or evaluated_poly.is_constant():
computed_value = int(evaluated_poly)
continuous_value = h(control_variable_value, computed_value)
accumulator += continuous_value * (1 - (control_variable - control_variable_value) ** 2)
else:
accumulator += evaluated_poly.continuous_version_helper(control_variable_value) * \
(1 - (control_variable - control_variable_value) ** 2)
return accumulator
def continuous_version_helper(self, control_variable_value):
# find some free variable
free_variable = tuple(self.get_variable_set())[0]
if isinstance(free_variable, str):
free_variable = Monomial.as_var(free_variable)
# iterate over the ways of setting that variable: 0, 1, 2
accumulator = Mod3Poly.zero()
for free_variable_value in range(3):
evaluated_poly = self.eval({free_variable: free_variable_value})
if is_integer(evaluated_poly) or evaluated_poly.is_constant():
computed_value = int(evaluated_poly)
continuous_value = h(control_variable_value, computed_value)
accumulator += \
continuous_value * (1 - (free_variable - free_variable_value) ** 2)
else:
accumulator += evaluated_poly.continuous_version_helper(control_variable_value) * \
(1 - (free_variable - free_variable_value) ** 2)
return accumulator
####################################################################################################
def rename_helper(expression: Union[Expression, int], name_dict: Dict[str, str]):
if is_integer(expression):
return expression
else:
return expression.rename_variables(name_dict=name_dict)
####################################################################################################
# actions on expressions, suitable for conversion to polynomial form. Not best for simulator.
def mod_3(n):
return n % 3
def not3(n):
value = 2 + 2 * n
if is_integer(value) or value.is_constant():
return mod_3(int(value))
else:
return value
def max3(a, b):
value = a + b + 2 * a * b + (a ** 2) * b + a * (b ** 2) + (a ** 2) * (b ** 2)
if is_integer(value) or value.is_constant():
return mod_3(int(value))
else:
return value
def min3(a, b):
value = a * b + 2 * (a ** 2) * b + 2 * a * (b ** 2) + 2 * (a ** 2) * (b ** 2)
if is_integer(value) or value.is_constant():
return mod_3(int(value))
else:
return value
def is_integer(x):
return isinstance(x, int) or isinstance(x, np.integer)
####################################################################################################
class Function(Expression):
def __init__(self, function_name, expression_list):
self._function_name = function_name
self._expression_list = expression_list
def rename_variables(self, name_dict: Dict[str, str]):
renamed_parameters = [rename_helper(expr, name_dict) for expr in self._expression_list]
return Function(self._function_name, renamed_parameters)
def eval(self, variable_dict):
# evaluate function parameters
evaluated_expressions = [expr if is_integer(expr)
else expr.eval(variable_dict)
for expr in self._expression_list]
# simplify constants to ints, if possible
evaluated_expressions = [int(expr) if is_integer(expr) or expr.is_constant()
else expr
for expr in evaluated_expressions]
if self._function_name == 'MAX':
assert len(evaluated_expressions) == 2, "wrong number of arguments for MAX"
expr_one, expr_two = evaluated_expressions
# if it can be computed directly, do it. otherwise, return in function form
if is_integer(expr_one) and is_integer(expr_two):
expr_one = mod_3(expr_one)
expr_two = mod_3(expr_two)
return max(expr_one, expr_two)
elif is_integer(expr_one) and expr_one == 2:
return 2
elif is_integer(expr_one) and expr_one == 0:
return expr_two
elif is_integer(expr_two) and expr_two == 2:
return 2
elif is_integer(expr_two) and expr_two == 0:
return expr_one
else:
return Function('MAX', [expr_one, expr_two])
elif self._function_name == 'MIN':
assert len(evaluated_expressions) == 2, "wrong number of arguments for MIN"
expr_one, expr_two = evaluated_expressions
# if it can be computed directly, do it. otherwise, return in function form
if is_integer(expr_one) and is_integer(expr_two):
expr_one = mod_3(expr_one)
expr_two = mod_3(expr_two)
return min(expr_one, expr_two)
elif is_integer(expr_one) and expr_one == 2:
return expr_two
elif is_integer(expr_one) and expr_one == 0:
return 0
elif is_integer(expr_two) and expr_two == 2:
return expr_one
elif is_integer(expr_two) and expr_two == 0:
return 0
else:
return Function('MIN', [expr_one, expr_two])
elif self._function_name == 'CONT':
assert len(evaluated_expressions) == 2, "wrong number of arguments for CONT"
ctrl_var, expr = evaluated_expressions
if is_integer(ctrl_var):
raise Exception("Unsupported; nonsense")
return Function('CONT', [ctrl_var, expr])
elif self._function_name == 'NOT':
assert len(evaluated_expressions) == 1, "wrong number of arguments for NOT"
expr = evaluated_expressions[0]
# if it can be computed directly, do it. otherwise, return in function form
if is_integer(expr):
return not3(int(expr))
else:
return Function('NOT', [expr])
else:
raise Exception("cannot evaluate unknown function " + self._function_name)
def is_constant(self):
return all(is_integer(expr) or expr.is_constant()
for expr in self._expression_list)
def __str__(self):
return self._function_name + "(" + ",".join([str(exp) for exp in self._expression_list]) + ")"
__repr__ = __str__
def as_c_expression(self):
c_exprs = [str(expr) if is_integer(expr) else expr.as_c_expression() for expr in self._expression_list]
if self._function_name == 'MAX':
func_name = 'mod3max'
elif self._function_name == 'MIN':
func_name = 'mod3min'
elif self._function_name == 'CONT':
func_name = 'mod3continuity'
elif self._function_name == 'NOT':
func_name = 'mod3not'
else:
raise Exception("Unknown binary relation: " + self._function_name)
return func_name + '(' + ",".join(c_exprs) + ')'
def as_polynomial(self):
expressions_as_polynomials = [mod_3(expr) if is_integer(expr)
else expr.as_polynomial()
for expr in self._expression_list]
if self._function_name == 'MAX':
assert len(expressions_as_polynomials) == 2, "wrong number of arguments for MAX"
return max3(expressions_as_polynomials[0], expressions_as_polynomials[1])
elif self._function_name == 'MIN':
assert len(expressions_as_polynomials) == 2, "wrong number of arguments for MIN"
return min3(expressions_as_polynomials[0], expressions_as_polynomials[1])
elif self._function_name == 'CONT':
assert len(expressions_as_polynomials) == 2, "wrong number of arguments for CONT"
return expressions_as_polynomials[1].continuous_polynomial_version(expressions_as_polynomials[0])
elif self._function_name == 'NOT':
assert len(expressions_as_polynomials) == 1, "wrong number of arguments for NOT"
return not3(expressions_as_polynomials[0])
else:
raise Exception("cannot evaluate unknown function " + self._function_name + " as a polynomial")
# def as_sympy(self):
#
# def cont_sympy(control, expr):
# return expr if is_integer(expr) \
# else expr.continuous_polynomial_version(control)
#
# def not_sympy(expr):
# return 1 - expr
#
# # tuples are param-count, function
# functions = {'MAX': (2, sympy.Max),
# 'MIN': (2, sympy.Min),
# 'CONT': (2, cont_sympy),
# 'NOT': (1, not_sympy)}
#
# if self._function_name not in functions:
# raise Exception("cannot evaluate unknown function " + self._function_name + " as a sympy expression")
#
# if len(self._expression_list) != functions[self._function_name][0]:
# raise Exception(f"Wrong number of arguments for {self._function_name}")
#
# function = functions[self._function_name][1]
#
# sympy_expressions = [sympy.Mod(expr, 3) if is_integer(expr)
# else sympy.Mod(expr.as_sympy(), 3)
# for expr in self._expression_list]
# return function(*sympy_expressions)
def as_numpy_str(self, variables) -> str:
np_parameter_strings = [str(expr) if is_integer(expr)
else expr.as_numpy_str(variables)
for expr in self._expression_list]
# this one is slow
# continuous_str = "( (({1})>({0})) * (({0})+1) + (({1})<({0})) * (({0})-1) + (({1})==({0}))*({0}) )"
continuous_str = "( {0}+np.sign(np.mod({1},3)-np.mod({0},3)) )"
max_str = "np.maximum(np.mod({0},3),np.mod({1},3))"
min_str = "np.minimum(np.mod({0},3),np.mod({1},3))"
not_str = "(2-({0}))"
# tuples are param-count, function
function_strings = {'MAX': (2, max_str),
'MIN': (2, min_str),
'CONT': (2, continuous_str),
'NOT': (1, not_str)}
if self._function_name not in function_strings:
raise Exception("cannot evaluate unknown function " + self._function_name + " as a numpy function")
if len(self._expression_list) != function_strings[self._function_name][0]:
raise Exception(f"Wrong number of arguments for {self._function_name}")
function = function_strings[self._function_name][1]
return function.format(*np_parameter_strings)
def get_variable_set(self):
var_set = set()
for expr in self._expression_list:
if not is_integer(expr):
var_set = var_set.union(expr.get_variable_set())
return var_set
class BinaryOperation(Expression):
def __init__(self, relation_name, left_expression: Union[Expression, int],
right_expression: Union[Expression, int]):
self.relation_name = relation_name
self._left_expression: Union[Expression, int] = left_expression
self._right_expression: Union[Expression, int] = right_expression
def rename_variables(self, name_dict: Dict[str, str]):
renamed_left_expression = rename_helper(self._left_expression, name_dict)
renamed_right_expression = rename_helper(self._right_expression, name_dict)
return BinaryOperation(self.relation_name,
left_expression=renamed_left_expression,
right_expression=renamed_right_expression)
def is_constant(self):
return (is_integer(self._left_expression) or self._left_expression.is_constant()) and \
(is_integer(self._right_expression) or self._right_expression.is_constant())
def eval(self, variable_dict):
"""
evaluate parameters, making them ints if possible
:param variable_dict: a dictionary of taking either single-term monomials or string (variable names) to ints
:return: evaluated expression
"""
evaled_left_expr = self._left_expression if is_integer(self._left_expression) \
else self._left_expression.eval(variable_dict)
evaled_left_expr = int(evaled_left_expr) \
if is_integer(evaled_left_expr) or evaled_left_expr.is_constant() \
else evaled_left_expr
evaled_right_expr = self._right_expression if is_integer(self._right_expression) \
else self._right_expression.eval(variable_dict)
evaled_right_expr = int(evaled_right_expr) \
if is_integer(evaled_right_expr) or evaled_right_expr.is_constant() \
else evaled_right_expr
if self.relation_name == 'PLUS':
return evaled_left_expr + evaled_right_expr
elif self.relation_name == 'MINUS':
return evaled_left_expr - evaled_right_expr
elif self.relation_name == 'TIMES':
return evaled_left_expr * evaled_right_expr
elif self.relation_name == 'EXP':
return evaled_left_expr ** evaled_right_expr
else:
raise Exception("cannot evaluate unknown binary op: " + self.relation_name)
def __str__(self):
short_relation_name = "?"
if self.relation_name == 'PLUS':
short_relation_name = '+'
elif self.relation_name == 'MINUS':
short_relation_name = '-'
elif self.relation_name == 'TIMES':
short_relation_name = '*'
elif self.relation_name == 'EXP':
short_relation_name = '^'
left_side = str(self._left_expression)
if isinstance(self._left_expression, BinaryOperation):
left_side = "(" + left_side + ")"
right_side = str(self._right_expression)
if isinstance(self._right_expression, BinaryOperation):
right_side = "(" + right_side + ")"
return left_side + short_relation_name + right_side
__repr__ = __str__
def as_c_expression(self):
if is_integer(self._left_expression):
left_c_expr = str(self._left_expression)
else:
left_c_expr = self._left_expression.as_c_expression()
if is_integer(self._right_expression):
right_c_expr = str(self._right_expression)
else:
right_c_expr = self._right_expression.as_c_expression()
if self.relation_name == 'PLUS':
return '(' + left_c_expr + ')+(' + right_c_expr + ')'
elif self.relation_name == 'MINUS':
return '(' + left_c_expr + ')-(' + right_c_expr + ')'
elif self.relation_name == 'TIMES':
return '(' + left_c_expr + ')*(' + right_c_expr + ')'
elif self.relation_name == 'EXP':
return 'mod3pow(' + left_c_expr + ',' + right_c_expr + ')'
else:
raise Exception("Unknown binary relation: " + self.relation_name)
def as_polynomial(self):
if is_integer(self._left_expression):
left_poly = self._left_expression
else:
left_poly = self._left_expression.as_polynomial()
if is_integer(self._right_expression):
right_poly = self._right_expression
else:
right_poly = self._right_expression.as_polynomial()
if self.relation_name == 'PLUS':
return left_poly + right_poly
elif self.relation_name == 'MINUS':
return left_poly - right_poly
elif self.relation_name == 'TIMES':
return left_poly * right_poly
elif self.relation_name == 'EXP':
# simplify the exponent = 0, 1 cases
if is_integer(right_poly):
if right_poly == 0:
return 1
elif right_poly == 1:
return left_poly
else:
return left_poly ** right_poly
else:
return left_poly ** right_poly
else:
raise Exception("Unknown binary relation: " + self.relation_name)
# def as_sympy(self):
# """
# Convert to sympy expression
# Returns
# -------
# sympy expression
# """
#
# def simple_pow(left_exp, right_exp):
# # simplify the exponent = 0, 1 cases
# if is_integer(right_exp):
# if right_exp == 0:
# return 1
# elif right_exp == 1:
# return left_exp
# else:
# return left_exp ** right_exp
# else:
# return left_exp ** right_exp
#
# relations = {'PLUS': operator.add,
# 'MINUS': operator.sub,
# 'TIMES': operator.mul,
# 'EXP': simple_pow}
#
# if self.relation_name not in relations:
# raise Exception("Unknown binary relation: " + self.relation_name)
#
# lhs = self._left_expression if is_integer(self._left_expression) else self._left_expression.as_sympy()
# rhs = self._right_expression if is_integer(self._right_expression) else self._right_expression.as_sympy()
#
# return relations[self.relation_name](lhs, rhs)
def as_numpy_str(self, variables) -> str:
"""
Convert to numpy function
Parameters
----------
variables
Returns
-------
str version of numpy function
"""
relations = {'PLUS': "(({0})+({1}))",
'MINUS': "(({0})-({1}))",
'TIMES': "(({0})*({1}))",
'EXP': "(({0})**({1}))"}
if self.relation_name not in relations:
raise Exception("Unknown binary relation: " + self.relation_name)
lhs = str(self._left_expression) if is_integer(self._left_expression) \
else self._left_expression.as_numpy_str(variables)
rhs = str(self._right_expression) if is_integer(self._right_expression) \
else self._right_expression.as_numpy_str(variables)
return relations[self.relation_name].format(lhs, rhs)
def get_variable_set(self):
var_set = set()
if not is_integer(self._left_expression):
var_set = var_set.union(self._left_expression.get_variable_set())
if not is_integer(self._right_expression):
var_set = var_set.union(self._right_expression.get_variable_set())
return var_set
class UnaryRelation(Expression):
def __init__(self, relation_name, expr):
self._relation_name = relation_name
self._expr = expr
def rename_variables(self, name_dict: Dict[str, str]):
return UnaryRelation(relation_name=self._relation_name,
expr=rename_helper(self._expr, name_dict))
def is_constant(self):
return self._expr.is_constant()
def eval(self, variable_dict):
if self._relation_name == 'MINUS':
if is_integer(self._expr):
return (-1) * self._expr
elif type(self._expr) == Expression:
evaluated_subexpression = self._expr.eval(variable_dict)
if is_integer(evaluated_subexpression) or evaluated_subexpression.is_constant():
return (-1) * int(evaluated_subexpression)
else:
return (-1) * evaluated_subexpression
else:
raise Exception("UnaryRelation in bad state with unknown unary relation name")
def __str__(self) -> str:
short_rel_name = str(self._relation_name)
if self._relation_name == 'MINUS':
short_rel_name = '-'
return short_rel_name + (
"(" + str(self._expr) + ")" if type(self._expr) == BinaryOperation else str(self._expr))
__repr__ = __str__
def as_c_expression(self):
if is_integer(self._expr):
c_exp = str(mod_3(self._expr))
else:
c_exp = self._expr.as_c_expression()
if self._relation_name == 'MINUS':
return '-(' + c_exp + ')'
else:
raise Exception("Unknown binary relation: " + self._relation_name)
def as_polynomial(self):
if is_integer(self._expr) or self._expr.is_constant():
poly = mod_3(int(self._expr))
else:
poly = self._expr.as_polynomial()
if self._relation_name == 'MINUS':
return (-1) * poly
else:
raise Exception("Unknown unary relation: " + self._relation_name)
def as_sympy(self):
"""
Convert to sympy expression
Returns
-------
sympy expression
"""
relations = {'MINUS': operator.neg}
if self._relation_name not in relations:
raise Exception("Unknown unary relation: " + self._relation_name)
expr = self._expr if is_integer(self._expr) else self._expr.as_sympy()
return relations[self._relation_name](expr)
def as_numpy_str(self, variables):
"""
Convert to numpy function
Parameters
----------
variables
Returns
-------
str numpy-representation
"""
relations = {'MINUS': "(-({0}))"}
if self._relation_name not in relations:
raise Exception("Unknown unary relation: " + self._relation_name)
expr_str = str(self._expr) if is_integer(self._expr) \
else self._expr.as_numpy_str(variables)
return relations[self._relation_name].format(expr_str)
def get_variable_set(self):
if is_integer(self._expr):
return set()
else:
return self._expr.get_variable_set()
####################################################################################################
class Monomial(Expression):
"""A class to encapsulate monomials reduced by x^3-x==0 for all variables x"""
def __init__(self, power_dict: dict):
# copy over only those terms which actually appear
self._power_dict = {str(var): power_dict[var] for var in power_dict if power_dict[var] != 0}
for var in self._power_dict.keys():
# while self._power_dict[var] < 0:
# self._power_dict[var] += 2 <--- replace with below
assert self._power_dict[var] > 0 # b/c x^-1 isn't exactly x (i.e. when x=0)
# while self._power_dict[var] >= 3:
# self._power_dict[var] -= 2 <--- replace with below
self._power_dict[var] = 1 + ((-1 + self._power_dict[var]) % 2)
def rename_variables(self, name_dict: Dict[str, str]):
# this ends up a little more complicated than I was originally thinking, b/c
# I would like to allow two variables to be updated to the same new name
renamed_dict = dict()
for variable, exponent in self._power_dict.items():
name = variable
if variable in name_dict:
name = name_dict[variable]
if name in renamed_dict:
renamed_dict[name] += self._power_dict[variable]
renamed_dict[name] = 1 + ((-1 + renamed_dict[name]) % 2)
else:
renamed_dict[name] = self._power_dict[variable]
return Monomial(power_dict=renamed_dict)
def as_polynomial(self):
return self
def is_constant(self):
return len(self._power_dict) == 0
def num_variables(self):
return len(self._power_dict)
def variable_list(self):
return self._power_dict.keys()
def eval(self, variable_dict: Dict):
"""evaluates the monomial. variable_dict is expected to be a dict containing str:Expression or
Monomial:Expression pairs. The latter are constrained to be of single-variable type.
"""
if type(variable_dict) != dict:
raise Exception("eval is not defined on this input")
# sanitize inputs
sanitized_variable_dict = dict()
for variable, quantity in variable_dict.items():
if type(variable) == str:
sanitized_variable_dict.update({variable: variable_dict[variable]})
elif type(variable) == Monomial:
if variable.num_variables() != 1:
raise Exception(
"We do not know how to evaluate monomials of zero or several variables to a single number")
else:
variable_as_str = list(variable.variable_list())[0]
sanitized_variable_dict.update({variable_as_str: variable_dict[variable]})
variable_dict = sanitized_variable_dict
accumulator = Mod3Poly.one()
for variable, quantity in self._power_dict.items():
if variable in variable_dict.keys():
accumulator *= variable_dict[variable] ** self._power_dict[variable]
else:
accumulator *= Monomial.as_var(variable) ** self._power_dict[variable]
return accumulator
def get_variable_set(self):
""" returns a set containing all variable which occur in this monomial """
return {var for var in self._power_dict if self._power_dict[var] != 0}
@staticmethod
def unit():
"""produces the unit, 1, as a monomial"""
return Monomial(dict())
@staticmethod
def as_var(var_name: str):
return Monomial({var_name: 1})
def __mul__(self, other) -> Expression:
if isinstance(other, Monomial):
result_power_dict = self._power_dict.copy()
for key in other._power_dict.keys():
if key in result_power_dict.keys():
result_power_dict[key] += other._power_dict[key]
while result_power_dict[key] >= 3:
result_power_dict[key] -= 2
else:
result_power_dict[key] = other._power_dict[key]
return Monomial(result_power_dict)
elif isinstance(other, Mod3Poly) or is_integer(other):
return self.as_poly() * other
else:
return BinaryOperation('TIMES', self, other)
# raise TypeError("unsupported operand type(s) for *: '{}' and '{}'".format(self.__class__, type(other)))
__rmul__ = __mul__
def __neg__(self):
return (-1) * self
def __pow__(self, power, **kwargs):
if type(power) == Mod3Poly and power.is_constant():
power = power[Monomial.unit()]
assert is_integer(power)
if power == 0:
return Monomial.unit()
elif power == 1:
return self
elif power == 2:
return self * self
# Now handle higher powers; probably not going to happen too much for this application
# (int) half power root
int_root = self ** (power // 2)
if power % 2 == 0:
return int_root * int_root
else:
return int_root * int_root * self
def as_poly(self):
"""converts this monomial to a polynomial with only one term"""
return Mod3Poly({self: 1})
def __add__(self, other):
if isinstance(other, Mod3Poly):
return other + self.as_poly()
elif isinstance(other, Monomial):
return self.as_poly() + other.as_poly()
elif is_integer(other):
return self.as_poly() + other
elif isinstance(other, Expression):
return BinaryOperation("PLUS", self, other)
else:
raise TypeError("unsupported operand type(s) for +: '{}' and '{}'".format(self.__class__, type(other)))
def __radd__(self, other):
return self + other
def __sub__(self, other):
return self + ((-1) * other)
def __rsub__(self, other):
return ((-1) * self) + other
def __eq__(self, other):
if type(other) == str:
other = Monomial.as_var(other)
if type(other) == Monomial:
return self._power_dict == other._power_dict
elif type(other) == Mod3Poly:
if len(other.coeff_dict) == 1:
monomial, coeff = list(other.coeff_dict)[0]
return coeff == 1 and monomial == self
else:
return False
elif is_integer(other) and self == Monomial.unit():
return other == 1
else:
return False
def __ne__(self, other):
if type(other) == str:
other = Monomial.as_var(other)
return not (self == other)
def __lt__(self, other):
self_vars = set(self._power_dict.keys())
if type(other) == str:
other = Monomial.as_var(other)
other_vars = set(other._power_dict.keys())
# if we have a var that they don't we cannot be "smaller"
if len(self_vars - other_vars) > 0:
return False
# check that we do not exceed and are smaller at least once
at_least_once_less = False
for var in self_vars:
if self._power_dict[var] > other._power_dict[var]:
return False
elif self._power_dict[var] < other._power_dict[var]:
at_least_once_less = True
return at_least_once_less or len(other_vars - self_vars) > 0
def __le__(self, other):
self_vars = set(self._power_dict.keys())
if type(other) == str:
other = Monomial.as_var(other)
other_vars = set(other._power_dict.keys())
# if we have a var that they don't we cannot be "smaller"
if len(self_vars - other_vars) > 0:
return False
# check that we do not exceed
for var in self_vars:
if self._power_dict[var] > other._power_dict[var]:
return False
return True
def __gt__(self, other):
self_vars = set(self._power_dict.keys())
if type(other) == str:
other = Monomial.as_var(other)
other_vars = set(other._power_dict.keys())
# if they have a var that they don't we cannot be "greater"
if len(other_vars - self_vars) > 0:
return False
# check that we are not smaller and are greater at least once
at_least_once_greater = False
for var in other_vars:
if self._power_dict[var] < other._power_dict[var]:
return False
elif self._power_dict[var] > other._power_dict[var]:
at_least_once_greater = True
return at_least_once_greater or len(self_vars - other_vars) > 0
def __ge__(self, other):
self_vars = set(self._power_dict.keys())
if type(other) == str:
other = Monomial.as_var(other)
other_vars = set(other._power_dict.keys())
# if they have a var that they don't we cannot be "greater"
if len(other_vars - self_vars) > 0:
return False
# check that we are not smaller
for var in other_vars:
if self._power_dict[var] < other._power_dict[var]:
return False
return True
def __hash__(self):
return sum(hash(k) for k in self._power_dict.keys()) + \
sum(hash(v) for v in self._power_dict.values())
def __str__(self):
if self._power_dict == {}:
return "1"
else:
variables = sorted(self._power_dict.keys())
return "*".join([str(var) + "^" + str(self._power_dict[var])
if self._power_dict[var] > 1 else str(var) for var in variables])
__repr__ = __str__
def as_c_expression(self):
if self._power_dict == {}:
return "1"
else:
variables = sorted(self._power_dict.keys())
return "*".join(["mod3pow(" + str(var) + "," + str(self._power_dict[var]) + ")"
if self._power_dict[var] > 1 else str(var) for var in variables
if self._power_dict[var] != 0])
# def as_sympy(self):
# # sympy empty product is 1, consistent with power_dict
# return sympy.prod([sympy.Symbol(var, integer=True) ** pow
# for var, pow in self._power_dict.items()])
# # Fun fact: sympy doesn't recognize Symbol(var) and Symbol(var, integer=True) to be the same
def as_numpy_str(self, variables) -> str:
if len(self._power_dict) == 0:
return "1"
return '(' + \
'*'.join(["1".format(variables.index(var), self._power_dict[var])
if self._power_dict[var] == 0 else
"state[{0}]".format(variables.index(var))
if self._power_dict[var] == 1 else
"(state[{0}]**{1})".format(variables.index(var), self._power_dict[var])
for var in self._power_dict]) + \
')'
####################################################################################################
class Mod3Poly(Expression):
"""a sparse polynomial class"""
def __init__(self, coeffs: Union[Dict, int]):
if type(coeffs) == dict:
self.coeff_dict = {monomial: coeffs[monomial] for monomial in coeffs if coeffs[monomial] != 0}
elif is_integer(coeffs):
self.coeff_dict = {Monomial.unit(): (coeffs % 3)}
else:
raise TypeError("unsupported initialization type for '{}': '{}'".format(self.__class__, type(coeffs)))
def rename_variables(self, name_dict: Dict[str, str]):
return Mod3Poly(coeffs={monomial.rename_variables(name_dict): coeff
for monomial, coeff in self.coeff_dict.items()})
@staticmethod
def zero():
return Mod3Poly({Monomial.unit(): 0})
@staticmethod
def one():
return Mod3Poly({Monomial.unit(): 1})
def as_polynomial(self):
return self
def __int__(self):
self.__clear_zero_monomials()
if len(self.coeff_dict) > 1 or (len(self.coeff_dict) == 1 and Monomial.unit() not in self.coeff_dict):
raise Exception("cannot cast non-constant polynomial to int")
if Monomial.unit() in self.coeff_dict:
return self.coeff_dict[Monomial.unit()]
else:
return 0
def eval(self, variable_dict):
"""evaluates the polynomial. variable_dict is expected to be a dict containing str:Expression or
Monomial:Expression pairs. The latter are constrained to be of single-variable type. """
if type(variable_dict) != dict:
raise Exception("Mod3Poly.eval is not defined on this input")
accumulator = Mod3Poly.zero()
for monomial, coeff in self.coeff_dict.items():
accumulator += coeff * monomial.eval(variable_dict)
return accumulator
def get_variable_set(self):
"""return a set containing all variables which occur in this polynomial"""
var_set = set()
for monomial in self.coeff_dict:
var_set = var_set.union(monomial.get_variable_set())
return var_set
def __clear_zero_monomials(self):
"""purge unneeded data"""
self.coeff_dict = {monomial: self.coeff_dict[monomial]
for monomial in self.coeff_dict
if self.coeff_dict[monomial] != 0}
# assure at least one entry
if len(self.coeff_dict) == 0:
self.coeff_dict = {Monomial.unit(): 0}
def is_constant(self):
# possibly unnecessary
self.__clear_zero_monomials()
num_nonzero_monomial = len(self.coeff_dict)
if num_nonzero_monomial > 1:
return False
elif num_nonzero_monomial == 0:
return True
else:
# only one entry
return Monomial.unit() in self.coeff_dict
def __getitem__(self, index):
if index in self.coeff_dict:
return self.coeff_dict[index]
else:
return 0
def __setitem__(self, index, value):
self.coeff_dict[index] = value
def __add__(self, other):
if is_integer(other):
self_copy = Mod3Poly(self.coeff_dict)
self_copy[Monomial.unit()] = (self_copy[Monomial.unit()] + other) % 3
return self_copy
elif isinstance(other, Monomial):
self_copy = Mod3Poly(self.coeff_dict)
self_copy[other] += 1
return self_copy
elif isinstance(other, Mod3Poly):
self_copy = Mod3Poly(self.coeff_dict)
for key in other.coeff_dict.keys():
if key in self_copy.coeff_dict.keys():
self_copy[key] = (self_copy[key] + other[key]) % 3
else:
self_copy[key] = other[key]
return self_copy
elif isinstance(other, Expression):
return BinaryOperation('PLUS', self, other)
else:
raise TypeError("unsupported operand type(s) for +: '{}' and '{}'".format(self.__class__, type(other)))
__radd__ = __add__
def __sub__(self, other):
if is_integer(other):
self_copy = Mod3Poly(self.coeff_dict)
self_copy[Monomial.unit()] = (self_copy[Monomial.unit()] - other) % 3
return self_copy
elif isinstance(other, Mod3Poly) or isinstance(other, Monomial):
self_copy = Mod3Poly(self.coeff_dict)
if isinstance(other, Monomial):
other = other.as_poly()
for key in other.coeff_dict.keys():
if key in self_copy.coeff_dict.keys():
self_copy[key] = (self_copy[key] - other[key]) % 3
else:
self_copy[key] = other[key]
return self_copy
else:
raise TypeError("unsupported operand type(s) for +: '{}' and '{}'".format(self.__class__, type(other)))
def __rsub__(self, other):
return other + ((-1) * self)
def __mul__(self, other):
if is_integer(other):
return Mod3Poly({key: (self.coeff_dict[key] * other) % 3 for key in self.coeff_dict})
elif isinstance(other, Monomial):
return Mod3Poly({(other * monomial): self.coeff_dict[monomial] for monomial in self.coeff_dict})
elif isinstance(other, Mod3Poly):
accumulator = Mod3Poly.zero()
for self_mono, other_mono in product(self.coeff_dict.keys(), other.coeff_dict.keys()):
monomial_prod = self_mono * other_mono
accumulator[monomial_prod] = (accumulator[monomial_prod] + self[self_mono] * other[other_mono]) % 3
return accumulator
else:
return BinaryOperation('TIMES', self, other)
__rmul__ = __mul__
def __pow__(self, power, **kwargs):
if type(power) == Mod3Poly and power.is_constant():
power = power[Monomial.unit()]
assert is_integer(power)
if power == 0:
return Monomial.unit().as_poly()
elif power == 1:
return self
elif power == 2:
return self * self
# Now handle higher powers; probably not going to happen too much for this application
# (int) half power root
int_root = self ** (power // 2)
if power % 2 == 0:
return int_root * int_root
else:
return int_root * int_root * self
def __str__(self):
accumulator = ""
for monomial in sorted(self.coeff_dict.keys()):
if monomial == Monomial.unit():
if self[monomial] != 0:
accumulator += str(self[monomial])
else:
if len(accumulator) > 0 and self[monomial] != 0:
accumulator += "+"
if self[monomial] == 1:
accumulator += str(monomial)
elif self[monomial] == 2:
accumulator += "2*"
accumulator += str(monomial)
if len(accumulator) > 0:
return accumulator
else:
return "0"
__repr__ = __str__
def as_c_expression(self):
accumulator = ""
for monomial in sorted(self.coeff_dict.keys()):
if monomial == Monomial.unit():
if self[monomial] != 0:
accumulator += str(self[monomial])
else:
if len(accumulator) > 0 and self[monomial] != 0:
accumulator += "+"
if self[monomial] == 1:
accumulator += monomial.as_c_expression()
elif self[monomial] == 2:
accumulator += "2*"
accumulator += monomial.as_c_expression()
if len(accumulator) > 0:
return accumulator
else:
return "0"
# def as_sympy(self):
# return sum([coeff * expr.as_sympy() for expr, coeff in self.coeff_dict.items()])
def as_numpy_str(self, variables) -> str:
return '(' + \
"+".join(["({0}*({1}))".format(coeff, expr.as_numpy_str(variables))
for expr, coeff in self.coeff_dict.items()]) + \
')'
| 38.985425
| 117
| 0.577897
| 5,664
| 48,147
| 4.648305
| 0.076977
| 0.026322
| 0.025182
| 0.013978
| 0.62933
| 0.549681
| 0.498899
| 0.417768
| 0.374202
| 0.350122
| 0
| 0.010594
| 0.302033
| 48,147
| 1,234
| 118
| 39.017018
| 0.772861
| 0.172202
| 0
| 0.49441
| 0
| 0.001242
| 0.051735
| 0.003637
| 0
| 0
| 0
| 0
| 0.013665
| 1
| 0.122981
| false
| 0
| 0.007453
| 0.034783
| 0.367702
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72404d3d39210b175e825c5b94b9e21a7e2698f1
| 421
|
py
|
Python
|
src/combine_npy.py
|
hongli-ma/RNANetMotif
|
34b4de443ec7edb59f4e4e06b17686543c438366
|
[
"MIT"
] | null | null | null |
src/combine_npy.py
|
hongli-ma/RNANetMotif
|
34b4de443ec7edb59f4e4e06b17686543c438366
|
[
"MIT"
] | null | null | null |
src/combine_npy.py
|
hongli-ma/RNANetMotif
|
34b4de443ec7edb59f4e4e06b17686543c438366
|
[
"MIT"
] | null | null | null |
import numpy as np
import sys
import glob
rbp=sys.argv[1]
kmer=sys.argv[2]
pfile_list=glob.glob("result_VDM3_"+rbp+"_positive_"+kmer+"_*.npy")
pfile1=np.load(pfile_list[0])
psha=np.shape(pfile1)
pmatrix=np.zeros(psha)
for pfile in pfile_list:
file=np.load(pfile)
# file=np.fromfile(pfile,dtype=np.float32)
pmatrix+=file
np.save("positive_"+rbp+"_vdm3_nopaircontrol_distance_matrix_"+kmer+"mer.npy",pmatrix)
| 23.388889
| 86
| 0.750594
| 70
| 421
| 4.314286
| 0.5
| 0.089404
| 0.072848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023438
| 0.087886
| 421
| 17
| 87
| 24.764706
| 0.763021
| 0.095012
| 0
| 0
| 0
| 0
| 0.21164
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.230769
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7242536c3707c16822eadee50c71c7b05cdd3796
| 7,768
|
py
|
Python
|
concourse/steps/scan_container_images.py
|
jia-jerry/cc-utils
|
01322d2acb7343c92138dcf0b6ac913b276525bc
|
[
"Apache-2.0"
] | null | null | null |
concourse/steps/scan_container_images.py
|
jia-jerry/cc-utils
|
01322d2acb7343c92138dcf0b6ac913b276525bc
|
[
"Apache-2.0"
] | null | null | null |
concourse/steps/scan_container_images.py
|
jia-jerry/cc-utils
|
01322d2acb7343c92138dcf0b6ac913b276525bc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed
# under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import textwrap
import typing
import tabulate
import clamav.util
import mailutil
from concourse.model.traits.image_scan import Notify
from product.model import ComponentName, UploadResult
class MailRecipients(object):
def __init__(
self,
root_component_name: str,
protecode_cfg,
protecode_group_id: int,
protecode_group_url: str,
cfg_set,
result_filter=None,
recipients: typing.List[str]=[],
recipients_component: ComponentName=None,
):
self._root_component_name = root_component_name
self._result_filter = result_filter
self._protecode_results = []
self._clamav_results = []
self._cfg_set = cfg_set
if not bool(recipients) ^ bool(recipients_component):
raise ValueError('exactly one of recipients, component_name must be given')
self._recipients = recipients
self._recipients_component= recipients_component
self._protecode_cfg = protecode_cfg
self._protecode_group_id = protecode_group_id
self._protecode_group_url = protecode_group_url
@functools.lru_cache()
def resolve_recipients(self):
if not self._recipients_component:
return self._recipients
# XXX it should not be necessary to pass github_cfg
return mailutil.determine_mail_recipients(
github_cfg_name=self._cfg_set.github().name(),
component_names=(self._recipients_component.name(),),
)
def add_protecode_results(self, results: typing.Iterable[typing.Tuple[UploadResult, int]]):
print(f'adding protecode results for {self}')
for result in results:
if self._result_filter:
if not self._result_filter(component=result[0].component):
print(f'did not match: {result[0].component.name()}')
continue
self._protecode_results.append(result)
def add_clamav_results(self, results):
for result in results:
self._clamav_results.append(result)
def has_results(self):
if self._protecode_results:
return True
if self._clamav_results:
return True
def mail_body(self):
parts = []
parts.append(self._mail_disclaimer())
parts.append(protecode_results_table(
protecode_cfg=self._protecode_cfg,
upload_results=self._protecode_results,
)
)
parts.append(self._clamav_report())
return ''.join(parts)
def _mail_disclaimer(self):
return textwrap.dedent(f'''
<div>
<p>
Note: you receive this E-Mail, because you were configured as a mail recipient
in repository "{self._root_component_name}" (see .ci/pipeline_definitions)
To remove yourself, search for your e-mail address in said file and remove it.
</p>
<p>
The following components in Protecode-group
<a href="{self._protecode_group_url}">{self._protecode_group_id}</a>
were found to contain critical vulnerabilities:
</p>
</div>
''')
def _clamav_report(self):
if not self._clamav_results:
return textwrap.dedent(f'''
<p>Scanned all container image(s) for matching virus signatures
without any matches (id est: all container images seem to be free of known malware)
''')
result = '<p><div>Virus Scanning Results</div>'
return result + tabulate.tabulate(
self._clamav_results,
headers=('Image-Reference', 'Scanning Result'),
tablefmt='html',
)
def __repr__(self):
if self._recipients_component:
descr = f'component {self._recipients_component.name()}'
else:
descr = 'for all results'
return 'MailRecipients: ' + descr
def mail_recipients(
notification_policy: Notify,
root_component_name:str,
protecode_cfg,
protecode_group_id: int,
protecode_group_url: str,
cfg_set,
email_recipients: typing.Iterable[str]=(),
components: typing.Iterable[ComponentName]=(),
):
mail_recps_ctor = functools.partial(
MailRecipients,
root_component_name=root_component_name,
protecode_cfg=protecode_cfg,
protecode_group_id=protecode_group_id,
protecode_group_url=protecode_group_url,
cfg_set=cfg_set,
)
notification_policy = Notify(notification_policy)
if notification_policy == Notify.EMAIL_RECIPIENTS:
if not email_recipients:
raise ValueError('at least one email_recipient must be specified')
# exactly one MailRecipients, catching all (hence no filter)
yield mail_recps_ctor(
recipients=email_recipients,
)
elif notification_policy == Notify.NOBODY:
return
elif notification_policy == Notify.COMPONENT_OWNERS:
def make_comp_filter(own_component):
def comp_filter(component):
print(f'filter: component: {own_component.name()} - other: {component.name()}')
return own_component.name() == component.name() # only care about matching results
return comp_filter
for comp in components:
yield mail_recps_ctor(
recipients_component=comp,
result_filter=make_comp_filter(own_component=comp)
)
else:
raise NotImplementedError()
def virus_scan_images(image_references: typing.Iterable[str]):
for image_reference in image_references:
status, signature = clamav.util.scan_container_image(image_reference=image_reference)
if clamav.util.result_ok(status=status, signature=signature):
continue
yield (image_reference, f'{status}: {signature}')
def protecode_results_table(protecode_cfg, upload_results: typing.Iterable[UploadResult]):
def result_to_tuple(upload_result: UploadResult):
# upload_result tuple of product.model.UploadResult and CVE Score
upload_result, greatest_cve = upload_result
# protecode.model.AnalysisResult
analysis_result = upload_result.result
name = analysis_result.display_name()
analysis_url = \
f'{protecode_cfg.api_url()}/products/{analysis_result.product_id()}/#/analysis'
link_to_analysis_url = f'<a href="{analysis_url}">{name}</a>'
custom_data = analysis_result.custom_data()
if custom_data is not None:
image_reference = custom_data.get('IMAGE_REFERENCE')
else:
image_reference = None
return [link_to_analysis_url, greatest_cve, image_reference]
table = tabulate.tabulate(
map(result_to_tuple, upload_results),
headers=('Component Name', 'Greatest CVE', 'Container Image Reference'),
tablefmt='html',
)
return table
| 36.299065
| 99
| 0.660788
| 899
| 7,768
| 5.458287
| 0.269188
| 0.042388
| 0.024251
| 0.012839
| 0.110047
| 0.072142
| 0.031384
| 0.031384
| 0.031384
| 0.031384
| 0
| 0.001911
| 0.259011
| 7,768
| 213
| 100
| 36.469484
| 0.850591
| 0.121395
| 0
| 0.2
| 0
| 0
| 0.204115
| 0.045849
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.048485
| 0.006061
| 0.230303
| 0.018182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72430bcb51d12558e07e88c7e1a6d221c05d6f85
| 647
|
py
|
Python
|
py/cv/video.py
|
YodaEmbedding/experiments
|
567c6a1c18fac2d951fe2af54aaa4917b7d529d2
|
[
"MIT"
] | null | null | null |
py/cv/video.py
|
YodaEmbedding/experiments
|
567c6a1c18fac2d951fe2af54aaa4917b7d529d2
|
[
"MIT"
] | null | null | null |
py/cv/video.py
|
YodaEmbedding/experiments
|
567c6a1c18fac2d951fe2af54aaa4917b7d529d2
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
height = 500
width = 700
gray = np.zeros((height, width), dtype=np.uint8)
# fourcc = cv2.VideoWriter_fourcc(*"MJPG")
# filename = "output.avi"
fourcc = cv2.VideoWriter_fourcc(*"MP4V")
filename = "output.mp4"
writer = cv2.VideoWriter(
filename, fourcc, fps=30, frameSize=(width, height), isColor=False
)
# NOTE isColor doesn't seem to influence resulting file size
xs = np.arange(width // 10)
ys = np.arange(height // 10)
locations = np.dstack(np.meshgrid(ys, xs)).reshape(-1, 2)
for y, x in locations:
gray[y, x] = 255
# gray_3c = cv2.merge([gray, gray, gray])
writer.write(gray)
writer.release()
| 24.884615
| 70
| 0.689335
| 97
| 647
| 4.56701
| 0.56701
| 0.094808
| 0.090293
| 0.117381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047794
| 0.159196
| 647
| 25
| 71
| 25.88
| 0.766544
| 0.251932
| 0
| 0
| 0
| 0
| 0.029228
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
724b92184d8f2e9819e55008805cce856be796bd
| 4,012
|
py
|
Python
|
learnware/algorithm/anomaly_detect/iforest.py
|
marvinren/aiops_gaussian_learnware
|
47683546d6648a38bb71988c33f959cf7308376f
|
[
"Apache-2.0"
] | null | null | null |
learnware/algorithm/anomaly_detect/iforest.py
|
marvinren/aiops_gaussian_learnware
|
47683546d6648a38bb71988c33f959cf7308376f
|
[
"Apache-2.0"
] | null | null | null |
learnware/algorithm/anomaly_detect/iforest.py
|
marvinren/aiops_gaussian_learnware
|
47683546d6648a38bb71988c33f959cf7308376f
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from scipy.stats import binom
from sklearn.ensemble import IsolationForest
from sklearn.preprocessing import MinMaxScaler
from scipy.special import erf
from learnware.algorithm.anomaly_detect.base import BaseAnomalyDetect
class iForest(BaseAnomalyDetect):
def __init__(self, n_estimators=100,
max_samples="auto",
contamination=0.1,
max_features=1.,
bootstrap=False,
n_jobs=1,
behaviour='old',
random_state=None,
verbose=0):
super(iForest, self).__init__()
self.contamination = contamination
self.n_estimators = n_estimators
self.max_samples = max_samples
self.max_features = max_features
self.bootstrap = bootstrap
self.n_jobs = n_jobs
self.behaviour = behaviour
self.random_state = random_state
self.verbose = verbose
# 内部算法的检测器
self.detector_ = None
self.decision_scores_ = None
self.threshold_ = None
self.labels_ = None
def fit(self, X, y=None):
self.detector_ = IsolationForest(n_estimators=self.n_estimators,
max_samples=self.max_samples,
contamination=self.contamination,
max_features=self.max_features,
bootstrap=self.bootstrap,
n_jobs=self.n_jobs,
random_state=self.random_state,
verbose=self.verbose)
X = self._data_type_transform(X)
self.detector_.fit(X, y=None, sample_weight=None)
self.decision_function(X)
self._decision_threshold_process()
return self
def predict(self, X, return_confidence=False):
X = self._data_type_transform(X)
if self.detector_ is None:
raise EOFError("detector not found, please fit the train data.")
pred_score = self.decision_function(X)
prediction = np.ones_like(pred_score, dtype=int)
prediction[pred_score < self.threshold_] = -1
if return_confidence:
confidence = self.predict_confidence(X)
return prediction, confidence
return prediction
def decision_function(self, X):
if self.detector_ is None:
raise EOFError("detector not found, please fit the train data.")
self.decision_scores_ = self.detector_.decision_function(X)
return self.decision_scores_
def _decision_threshold_process(self):
self.threshold_ = np.percentile(self.decision_scores_,
100 * self.contamination)
self.labels_ = (self.decision_scores_ > self.threshold_).astype(
'int').ravel()
self._mu = np.mean(self.decision_scores_)
self._sigma = np.std(self.decision_scores_)
return self
def predict_confidence(self, X):
n = len(self.decision_scores_)
test_scores = self.decision_function(X)
count_instances = np.vectorize(
lambda x: np.count_nonzero(self.decision_scores_ <= x))
n_instances = count_instances(test_scores)
# Derive the outlier probability using Bayesian approach
posterior_prob = np.vectorize(lambda x: (1 + x) / (2 + n))(n_instances)
# Transform the outlier probability into a confidence value
confidence = np.vectorize(
lambda p: 1 - binom.cdf(n - np.int(n * self.contamination), n, p))(
posterior_prob)
prediction = (test_scores > self.threshold_).astype('int').ravel()
np.place(confidence, prediction == 0, 1 - confidence[prediction == 0])
return confidence
def _data_type_transform(self, X):
if type(X) is list:
return np.array(X).reshape(-1, 1)
return X
| 37.148148
| 79
| 0.598704
| 438
| 4,012
| 5.242009
| 0.257991
| 0.067944
| 0.070557
| 0.027439
| 0.110192
| 0.110192
| 0.061847
| 0.061847
| 0.061847
| 0.061847
| 0
| 0.007348
| 0.321535
| 4,012
| 107
| 80
| 37.495327
| 0.83615
| 0.03016
| 0
| 0.094118
| 0
| 0
| 0.027013
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082353
| false
| 0
| 0.070588
| 0
| 0.258824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7252008c26b1662083a1400694c806c34e33ed67
| 910
|
py
|
Python
|
graviteeio_cli/lint/functions/length.py
|
gravitee-io/gravitee-cli
|
8e3bf9f2c0c2873e0f6e67f8fcaf0d3b6c44b3ca
|
[
"Apache-2.0"
] | 12
|
2019-05-29T20:06:01.000Z
|
2020-10-07T07:40:27.000Z
|
graviteeio_cli/lint/functions/length.py
|
gravitee-io/graviteeio-cli
|
0e0069b00ce40813efc7d40142a6dc4b4ec7a261
|
[
"Apache-2.0"
] | 41
|
2019-11-04T18:18:18.000Z
|
2021-04-22T16:12:51.000Z
|
graviteeio_cli/lint/functions/length.py
|
gravitee-io/gravitee-cli
|
8e3bf9f2c0c2873e0f6e67f8fcaf0d3b6c44b3ca
|
[
"Apache-2.0"
] | 6
|
2019-06-18T04:27:49.000Z
|
2021-06-02T17:52:24.000Z
|
from graviteeio_cli.lint.types.function_result import FunctionResult
def length(value, **kwargs):
"""Count the length of a string an or array, the number of properties in an object, or a numeric value, and define minimum and/or maximum values."""
min = None
max = None
if "min" in kwargs and type(kwargs["min"]) is int:
min = kwargs["min"]
if "max" in kwargs and type(kwargs["max"]) is int:
max = kwargs["max"]
value_length = 0
if value:
if type(value) is (int or float):
value_length = value
else:
value_length = len(value)
results = []
if min and value_length < min:
results.append(
FunctionResult("min length is {}".format(min))
)
if max and value_length > max:
results.append(
FunctionResult("max length is {}".format(max))
)
return results
| 26
| 152
| 0.597802
| 121
| 910
| 4.438017
| 0.380165
| 0.102421
| 0.040968
| 0.055866
| 0.078212
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00157
| 0.3
| 910
| 34
| 153
| 26.764706
| 0.841444
| 0.156044
| 0
| 0.083333
| 0
| 0
| 0.065617
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.041667
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0c69fd6e11617fc5f9eb586f7c2029856d0877b
| 2,399
|
py
|
Python
|
Technical_Indicators/rainbow_charts.py
|
vhn0912/Finance
|
39cf49d4d778d322537531cee4ce3981cc9951f9
|
[
"MIT"
] | 441
|
2020-04-22T02:21:19.000Z
|
2022-03-29T15:00:24.000Z
|
Technical_Indicators/rainbow_charts.py
|
happydasch/Finance
|
4f6c5ea8f60fb0dc3b965ffb9628df83c2ecef35
|
[
"MIT"
] | 5
|
2020-07-06T15:19:58.000Z
|
2021-07-23T18:32:29.000Z
|
Technical_Indicators/rainbow_charts.py
|
happydasch/Finance
|
4f6c5ea8f60fb0dc3b965ffb9628df83c2ecef35
|
[
"MIT"
] | 111
|
2020-04-21T11:40:39.000Z
|
2022-03-20T07:26:17.000Z
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
import yfinance as yf
yf.pdr_override()
import datetime as dt
# input
symbol = 'AAPL'
start = dt.date.today() - dt.timedelta(days = 365*2)
end = dt.date.today()
# Read data
df = yf.download(symbol,start,end)
# R=red, O=orange, Y=yellow, G=green, B=blue, I = indigo, and V=violet
df['Red'] = df['Adj Close'].rolling(2).mean()
df['Orange'] = df['Red'].rolling(2).mean()
df['Yellow'] = df['Orange'].rolling(2).mean()
df['Green'] = df['Yellow'].rolling(2).mean()
df['Blue'] = df['Green'].rolling(2).mean()
df['Indigo'] = df['Blue'].rolling(2).mean()
df['Violet'] = df['Indigo'].rolling(2).mean()
df = df.dropna()
colors = ['k','r', 'orange', 'yellow', 'g', 'b', 'indigo', 'violet']
df[['Adj Close','Red','Orange','Yellow','Green','Blue','Indigo','Violet']].plot(colors=colors, figsize=(18,12))
plt.fill_between(df.index, df['Low'], df['High'], color='grey', alpha=0.4)
plt.plot(df['Low'], c='darkred', linestyle='--', drawstyle="steps")
plt.plot(df['High'], c='forestgreen', linestyle='--', drawstyle="steps")
plt.title('Rainbow Charts')
plt.legend(loc='best')
plt.xlabel('Date')
plt.ylabel('Price')
plt.show()
# ## Candlestick with Rainbow
from matplotlib import dates as mdates
dfc = df.copy()
dfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close']
#dfc = dfc.dropna()
dfc = dfc.reset_index()
dfc['Date'] = mdates.date2num(dfc['Date'].tolist())
from mplfinance.original_flavor import candlestick_ohlc
fig, ax1 = plt.subplots(figsize=(20,12))
candlestick_ohlc(ax1,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)
#colors = ['red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet']
#labels = ['Red', 'Orange', 'Yellow', 'Green', 'Blue', 'Indigo', 'Violet']
for i in dfc[['Red', 'Orange', 'Yellow', 'Green', 'Blue', 'Indigo', 'Violet']]:
ax1.plot(dfc['Date'], dfc[i], color=i, label=i)
ax1.xaxis_date()
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
ax1.grid(True, which='both')
ax1.minorticks_on()
ax1v = ax1.twinx()
colors = dfc.VolumePositive.map({True: 'g', False: 'r'})
ax1v.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
ax1v.axes.yaxis.set_ticklabels([])
ax1v.set_ylim(0, 3*df.Volume.max())
ax1.set_title('Stock '+ symbol +' Closing Price')
ax1.set_ylabel('Price')
ax1.set_xlabel('Date')
ax1.legend(loc='best')
plt.show()
| 36.348485
| 111
| 0.667361
| 370
| 2,399
| 4.286486
| 0.383784
| 0.035309
| 0.052963
| 0.061791
| 0.090794
| 0.090794
| 0.090794
| 0
| 0
| 0
| 0
| 0.021179
| 0.094623
| 2,399
| 66
| 112
| 36.348485
| 0.709024
| 0.114631
| 0
| 0.037037
| 0
| 0
| 0.172577
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.148148
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0c8d55fb37c691da19d42d22717e7769ad0fbbf
| 1,670
|
py
|
Python
|
UpWork_Projects/pdf_downloader.py
|
SurendraTamang/Web-Scrapping
|
2bb60cce9010b4b68f5c11bf295940832bb5df50
|
[
"MIT"
] | null | null | null |
UpWork_Projects/pdf_downloader.py
|
SurendraTamang/Web-Scrapping
|
2bb60cce9010b4b68f5c11bf295940832bb5df50
|
[
"MIT"
] | null | null | null |
UpWork_Projects/pdf_downloader.py
|
SurendraTamang/Web-Scrapping
|
2bb60cce9010b4b68f5c11bf295940832bb5df50
|
[
"MIT"
] | 1
|
2022-01-18T17:15:51.000Z
|
2022-01-18T17:15:51.000Z
|
import requests
from urllib.request import urlopen
from urllib.request import urlretrieve
import cgi
import os.path
def retrive_file_name(url):
#url = 'https://material.ibear.pt/BTHorarios2019/FileGet.aspx?FileId=5601'
remotefile = urlopen(url)
blah = remotefile.info()['Content-Disposition']
_, params = cgi.parse_header(blah)
filename = params["filename"]
#urlretrieve(url, filename)
return filename
def pdf_downloader():
for i in range (0,10000):
cntr = ''
l = len(str(i))
if l<4:
for _ in range(0,(4-l)):
cntr += '0'
cntr += str(i)
else:
cntr = str(i)
try:
url = f"https://material.ibear.pt/BTHorarios2019/FileGet.aspx?FileId={cntr}"
response = requests.get(url)
if response.status_code == 200:
file_name = retrive_file_name(url)
file_path1 = f'D:/upworkWorkspace/25032020_pdf_downloader/downloads/{file_name}'
file_path2 = f'D:/upworkWorkspace/25032020_pdf_downloader/downloads/copy_{cntr}_{file_name}'
if not os.path.isfile(file_path1) and not os.path.isfile(file_path2):
print(file_name)
with open(file_path1, 'wb') as f:
f.write(response.content)
else:
print(f'copy_{cntr}_{file_name}')
with open(file_path2, 'wb') as f:
f.write(response.content)
else:
print("Counter: ", cntr)
except:
pass
pdf_downloader()
| 33.4
| 108
| 0.552096
| 190
| 1,670
| 4.7
| 0.394737
| 0.06271
| 0.038074
| 0.051512
| 0.385218
| 0.297872
| 0.297872
| 0.192609
| 0.078387
| 0
| 0
| 0.042611
| 0.339521
| 1,670
| 50
| 109
| 33.4
| 0.766999
| 0.059281
| 0
| 0.121951
| 0
| 0
| 0.172611
| 0.103822
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0.02439
| 0.121951
| 0
| 0.195122
| 0.073171
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0cab7a3ae269edaac7fa1a7d902a54bd96a752d
| 13,282
|
py
|
Python
|
backend/app/vta/texdf/tex_df.py
|
megagonlabs/leam
|
f19830d4d6935bece7d163abbc533cfb4bc2e729
|
[
"Apache-2.0"
] | 7
|
2020-09-14T07:03:51.000Z
|
2022-01-13T10:11:53.000Z
|
backend/app/vta/texdf/tex_df.py
|
megagonlabs/leam
|
f19830d4d6935bece7d163abbc533cfb4bc2e729
|
[
"Apache-2.0"
] | null | null | null |
backend/app/vta/texdf/tex_df.py
|
megagonlabs/leam
|
f19830d4d6935bece7d163abbc533cfb4bc2e729
|
[
"Apache-2.0"
] | 1
|
2020-09-07T22:26:27.000Z
|
2020-09-07T22:26:27.000Z
|
import spacy
import json, os
import dill as pickle
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sqlalchemy import create_engine, select, MetaData, Table, Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from typing import List, Dict, Any
from flask import current_app
from app.models import Dataset
# from vta.operators import featurize
# from vta.operators import clean
# from vta.operators import select
# from vta import spacy_nlp
from .tex_column import TexColumn
from .tex_metadata import MetadataItem
from .tex_vis import TexVis
from ..types import VTAColumnType, VisType
class TexDF:
dataset_name: str
data_view: pd.DataFrame
table_view: []
table_links: []
columns: Dict[str, TexColumn]
visualizations: List[TexVis]
coordination_indexes: Dict[str, Dict]
udf: Dict[str, Any] # TODO: specify typing of function expected
def __init__(self, df, name):
self.dataset_name = name
self.data_view = df
self.table_view = []
self.table_links = []
self.columns = {i: TexColumn(i, VTAColumnType.TEXT) for i in df.columns}
self.visualizations = []
self.coordination_indexes = {}
self.udf = {}
# self.cached_visual_encodings = {i: {} for i in self.df.columns}
# self.view_indexes = {}
self.update_table_view()
if os.path.exists("/app/UI_QUEUE.pkl"):
self.UI_QUEUE = pickle.load(open("UI_QUEUE.pkl", "rb"))
else:
self.UI_QUEUE = []
pickle.dump(self.UI_QUEUE, open("/app/UI_QUEUE.pkl", "wb"))
def get_dataview_column(self, col_name: str) -> pd.Series:
return self.data_view[col_name]
def get_column_type(self, col_name: str) -> VTAColumnType:
return self.columns[col_name].col_type
def get_column_types(self, col_names: List[str]) -> List[VTAColumnType]:
return [self.columns[col].col_type for col in col_names]
def get_all_column_types(self) -> List[str]:
return [self.columns[col].col_type.value for col in self.columns.keys()]
def get_table_view(self):
return self.table_view
def get_table_view_columns(self):
return [i for i in self.data_view.columns]
def get_vis(self, i):
return self.visualizations[i]
def get_visualizations(self):
vis_list = [i.to_dict() for i in self.visualizations]
return vis_list
def get_column_metadata(self, col_name: str) -> TexColumn:
return self.columns[col_name]
def get_all_metadata(self):
# metadata will be a table with 3 columns: tag | data_type | data
all_metadata = []
for _, col in self.columns.items():
for _, md in col.metadata.items():
all_metadata.append(
{"tag": md.tag, "type": md.md_type.value, "value": md.value}
)
return all_metadata
def print_metadata(self):
# metadata will be a table with 3 columns: tag | data_type | data
pretty_print = ""
for _, col in self.columns.items():
for _, md in col.metadata.items():
col_metadata_item = {}
col_metadata_item["column"] = col.col_name
col_metadata_item["tag_name"] = md.tag
col_metadata_item["metadata_type"] = md.md_type.value
col_metadata_item["value"] = str(md.value)[:80] + "..."
pretty_print += str(col_metadata_item) + "\n\n"
print(pretty_print)
def get_vis_lookup_table(self, vis_idx):
return self.visualizations[vis_idx].row_lookup_table
def get_coordination_idx(self, metadata_name):
return self.coordination_indexes[metadata_name]
def get_vis_links(self, vis_idx):
if vis_idx == "table":
return self.table_links
return self.visualizations[vis_idx].links
def get_columns_vega_format(self, columns, data_type, md_tag=None):
# Take in list of columns, output data from those columns formatted
# in vega-lite format: [{"id": 1, "x": 0.3}, {"id": 2, "x": 0.7}, ...]
vega_rows = []
if data_type == "dataview":
for _, row in self.data_view[columns].iterrows():
vega_row = {c: row[c] for c in columns}
vega_rows.append(vega_row)
elif data_type == "metadata":
col_name = columns[0]
data = self.get_column_metadata(col_name).get_metadata_by_tag(md_tag)
# add some way to handle different types of metadata
if md_tag == "top_scores":
tw_list = [(k, v) for k, v in data.value.items()]
tw_list = sorted(tw_list, key=lambda word: word[1], reverse=True)
tw_list = [(v[0], v[1], i + 1) for i, v in enumerate(tw_list)]
# log.info("top words list:")
# log.info(tw_list)
for v in tw_list:
vega_rows.append({"topword": v[0], "score": v[1], "order": v[2]})
else:
print("data is: ")
print(data.value)
assert isinstance(data.value, dict)
for label, count in data.value.items():
vega_rows.append({"label": label, "count": count})
return vega_rows
def get_udf(self, func_name):
return self.udf[func_name]
# TODO: specify a certain function params/return values
def add_udf(self, func):
self.udf[func.__name__] = func
self.checkpoint_texdf()
def print_udfs(self):
print(self.udf)
def rename_column(self, old_col, new_col):
self.data_view = self.data_view.rename(columns={old_col: new_col})
self.columns[new_col] = self.columns[old_col]
del self.columns[old_col]
self.update_table_view()
task = {"view": "table", "type": "update_column"}
self.add_to_uiq(task)
self.checkpoint_texdf()
# TODO: add regex to this
def replace_column_value(self, col_name, old_value, new_value):
# data_view["category"].replace("ham", 0, inplace=True)
self.data_view[col_name].replace(old_value, new_value, inplace=True)
self.update_table_view()
task = {"view": "table", "type": "update_column"}
self.add_to_uiq(task)
self.checkpoint_texdf()
def select_vis_element(self, vis_idx, item_idx):
# TODO: add support for words in select like in topwords tf-idf barchart
# TODO: add support for linking, where we might generate many new select ui tasks
if vis_idx == "table":
task = {"view": "table", "type": "select", "rows": item_idx}
else:
task = {
"view": "datavis",
"type": "select",
"vis_idx": vis_idx,
"rows": item_idx,
}
self.add_to_uiq(task)
self.checkpoint_texdf()
def add_coord_idx(self, metadata, coord_idx):
self.coordination_indexes[metadata] = coord_idx
self.checkpoint_texdf()
def remove_vis(self, vis_idx):
if vis_idx < 0 or vis_idx >= len(self.visualizations):
return
# remove vis in place and save
del self.visualizations[vis_idx]
for v in self.visualizations:
if vis_idx in v.links:
v.links.remove(vis_idx)
# update the rest of the links to correspond to their new positions
for v in self.visualizations:
for link_idx, link in enumerate(v.links):
if vis_idx > link:
pass
elif vis_idx < link:
v.links[link_idx] = link - 1
else:
raise Exception(
"there should be no link with vis idx %d it was deleted",
vis_idx,
)
task = {
"view": "table",
"type": "update_vis",
} # change this to be related to vis
self.add_to_uiq(task)
self.checkpoint_texdf()
def remove_link(self, src, target):
if src == "table":
vis_obj = self.table_links
else:
vis_obj = self.visualizations[src].links
if target in vis_obj:
vis_obj.remove(target)
self.checkpoint_texdf()
def add_uni_link(self, src, target):
if src == "table":
vis_obj = self.table_links
else:
vis_obj = self.visualizations[src].links
if target not in vis_obj:
vis_obj.append(target)
self.checkpoint_texdf()
def add_bi_link(self, src, target):
if src == "table":
vis_obj_src = self.table_links
else:
vis_obj_src = self.visualizations[src].links
if target == "table":
vis_obj_target = self.table_links
else:
vis_obj_target = self.visualizations[target].links
if target not in vis_obj_src:
vis_obj_src.append(target)
if src not in vis_obj_target:
vis_obj_target.append(src)
self.checkpoint_texdf()
def add_visualization(self, columns, vis_type, selection=None, md_tag=None):
# if aggregate type vis, using metadata, if not using column(s)
if vis_type == VisType.tw_barchart or vis_type == VisType.barchart:
data_type = "metadata"
vis_data = self.get_columns_vega_format(columns, data_type, md_tag=md_tag)
else:
data_type = "dataview"
vis_data = self.get_columns_vega_format(columns, data_type)
col_types = self.get_column_types(columns)
new_vis = TexVis(
vis_type,
columns,
col_types,
vis_data,
selection_type=selection,
md_tag=md_tag,
)
self.visualizations.append(new_vis)
vis_index = len(self.visualizations) - 1
task = {
"view": "datavis",
"type": "add_vis",
"idx": vis_index,
"vis_type": new_vis.vis_type.value,
"selection_type": new_vis.selection_type,
}
self.add_to_uiq(task)
self.checkpoint_texdf()
def update_table_view(self):
readable_df = self.data_view.copy()
for k, v in self.columns.items():
col_type = v.col_type
if col_type == VTAColumnType.VECTOR:
is_column_list = type(readable_df[k][0]) == list
row_vectors = (
readable_df[k].map(lambda r: np.array(r).tolist())
if is_column_list
else readable_df[k].map(lambda r: r.toarray().tolist())
)
row_vectors = (
row_vectors if is_column_list else [r[0] for r in row_vectors]
)
row_string_vectors = [[str(f)[:6] for f in r] for r in row_vectors]
row_string_vectors = map(lambda r: r[:6], row_string_vectors)
row_string_vectors = [
", ".join(r) + ", ..." for r in row_string_vectors
]
readable_df[k] = row_string_vectors
elif col_type == VTAColumnType.FLOAT:
float_column = readable_df[k]
row_floats = [round(f, 5) for f in float_column]
readable_df[k] = row_floats
self.table_view = readable_df.values.tolist()
def update_dataview_column(
self, col_name: str, col_type: VTAColumnType, new_column: Any
):
self.data_view[col_name] = new_column
col = self.columns[col_name]
col.col_type = col_type
self.update_table_view()
task = {"view": "table", "type": "update_column"}
self.add_to_uiq(task)
self.checkpoint_texdf()
def create_dataview_column(
self, new_col_name: str, col_type: VTAColumnType, new_column: Any
):
self.data_view[new_col_name] = new_column
self.columns[new_col_name] = TexColumn(new_col_name, col_type)
task = {"view": "table", "type": "create_column"}
self.add_to_uiq(task)
self.update_table_view()
self.checkpoint_texdf()
# make sure that an aggregate is returning a data structure with the corresponding rows included
# b/c will use those to determine coordination
def add_metadata(
self, col_name: str, tag: str, md_type: VTAColumnType, md_value: Any
):
new_metadata = MetadataItem(tag, col_name, md_type, md_value)
col = self.columns[col_name]
col.metadata[tag] = new_metadata
task = {"view": "table", "type": "add_metadata"}
self.add_to_uiq(task)
# TODO: update table view to create presentable version of metadata???
self.checkpoint_texdf()
def add_to_uiq(self, task):
self.UI_QUEUE.append(task)
pickle.dump(self.UI_QUEUE, open("UI_QUEUE.pkl", "wb"))
def checkpoint_texdf(self):
name = self.dataset_name.split(".")[0]
dataframe_pkl_file = "/app/" + name + ".pkl"
pickle.dump(self, open(dataframe_pkl_file, "wb"))
| 37.840456
| 100
| 0.595844
| 1,732
| 13,282
| 4.331409
| 0.155889
| 0.019595
| 0.032925
| 0.032258
| 0.302319
| 0.213676
| 0.157691
| 0.141296
| 0.132765
| 0.11317
| 0
| 0.00302
| 0.301837
| 13,282
| 350
| 101
| 37.948571
| 0.805996
| 0.097651
| 0
| 0.22807
| 0
| 0
| 0.045994
| 0
| 0
| 0
| 0
| 0.002857
| 0.003509
| 1
| 0.119298
| false
| 0.003509
| 0.05614
| 0.038596
| 0.266667
| 0.02807
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0ceec8ec85ef44ddb9d9cd56199a36790b171fc
| 4,171
|
py
|
Python
|
tests/contour_classifiers/test_randomforest.py
|
yamathcy/motif
|
3f43568e59f0879fbab5ef278e9e687b7cac3dd6
|
[
"MIT"
] | 21
|
2016-08-22T22:00:49.000Z
|
2020-03-29T04:15:19.000Z
|
tests/contour_classifiers/test_randomforest.py
|
yamathcy/motif
|
3f43568e59f0879fbab5ef278e9e687b7cac3dd6
|
[
"MIT"
] | 22
|
2016-08-28T01:07:08.000Z
|
2018-02-07T14:38:26.000Z
|
tests/contour_classifiers/test_randomforest.py
|
yamathcy/motif
|
3f43568e59f0879fbab5ef278e9e687b7cac3dd6
|
[
"MIT"
] | 3
|
2017-01-12T10:04:27.000Z
|
2022-01-06T13:25:48.000Z
|
"""Test for motif.classify.mvgaussian
"""
from __future__ import print_function
import unittest
import numpy as np
from motif.contour_classifiers import random_forest
def array_equal(array1, array2):
return np.all(np.isclose(array1, array2))
class TestRandomForest(unittest.TestCase):
def setUp(self):
self.clf = random_forest.RandomForest(
n_estimators=2, n_iter_search=1, random_state=6
)
def test_n_estimators(self):
expected = 2
actual = self.clf.n_estimators
self.assertEqual(expected, actual)
def test_n_jobs(self):
expected = -1
actual = self.clf.n_jobs
self.assertEqual(expected, actual)
def test_class_weight(self):
expected = 'balanced'
actual = self.clf.class_weight
self.assertEqual(expected, actual)
def test_n_iter_search(self):
expected = 1
actual = self.clf.n_iter_search
self.assertEqual(expected, actual)
def test_clf(self):
expected = None
actual = self.clf.clf
self.assertEqual(expected, actual)
def test_predict_error(self):
with self.assertRaises(ReferenceError):
self.clf.predict(np.array([0, 0, 0]))
def test_fit(self):
X = np.array([
[1.0, 2.0], [0.0, 0.0], [0.5, 0.7],
[0.0, 0.0], [1.0, 2.5], [-1.0, 2.1],
[1.2, 1.2], [1.0, 1.0], [4.0, 0.0],
[-1.0, -1.0]
])
Y = np.array([0, 1, 0, 1, 0, 0, 1, 1, 0, 1])
self.clf.fit(X, Y)
self.assertIsNotNone(self.clf.clf)
def test_predict(self):
X = np.array([
[1.0, 2.0], [0.0, 0.0], [0.5, 0.7],
[0.0, 0.0], [1.0, 2.5], [-1.0, 2.1],
[1.2, 1.2], [1.0, 1.0], [4.0, 0.0],
[-1.0, -1.0]
])
Y = np.array([0, 1, 0, 1, 0, 0, 1, 1, 0, 1])
self.clf.fit(X, Y)
actual = self.clf.predict(
np.array([[1.0, 2.0], [1.0, 3.0], [-2.0, -2.0]])
)
expected = np.array([0.0, 0.0, 1.0])
self.assertTrue(array_equal(actual, expected))
def test_predict_discrete_label(self):
X = np.array([
[1.0, 2.0], [0.0, 0.0], [0.5, 0.7],
[0.0, 0.0], [1.0, 2.5], [-1.0, 2.1],
[1.2, 1.2], [1.0, 1.0], [4.0, 0.0],
[-1.0, -1.0]
])
Y = np.array([0, 1, 0, 1, 0, 0, 1, 1, 0, 1])
self.clf.fit(X, Y)
actual = self.clf.predict_discrete_label(
np.array([[1.0, 2.0], [1.0, 3.0], [-2.0, -2.0]])
)
expected = np.array([0, 0, 1])
self.assertTrue(array_equal(actual, expected))
def test_threshold(self):
expected = 0.5
actual = self.clf.threshold
self.assertEqual(expected, actual)
def test_get_id(self):
expected = 'random_forest'
actual = self.clf.get_id()
self.assertEqual(expected, actual)
def test_score(self):
predicted_scores = np.array([0.0, 0.25, 1.0, 0.5, 0.9])
y_pred = np.array([0, 0, 1, 1, 1])
y_target = np.array([0, 0, 1, 1, 1])
expected = {
'accuracy': 1.0,
'mcc': 1.0,
'precision': np.array([1.0, 1.0]),
'recall': np.array([1.0, 1.0]),
'f1': np.array([1.0, 1.0]),
'support': np.array([2, 3]),
'confusion matrix': np.array([[2, 0], [0, 3]]),
'auc score': 1.0
}
actual = self.clf.score(y_pred, y_target, y_prob=predicted_scores)
self.assertEqual(expected['accuracy'], actual['accuracy'])
self.assertAlmostEqual(expected['mcc'], actual['mcc'], places=1)
self.assertTrue(
array_equal(expected['precision'], actual['precision'])
)
self.assertTrue(array_equal(expected['recall'], actual['recall']))
self.assertTrue(array_equal(expected['f1'], actual['f1']))
self.assertTrue(array_equal(expected['support'], actual['support']))
self.assertTrue(array_equal(
expected['confusion matrix'], actual['confusion matrix']
))
self.assertEqual(expected['auc score'], actual['auc score'])
| 32.585938
| 76
| 0.529369
| 610
| 4,171
| 3.52623
| 0.142623
| 0.041841
| 0.034868
| 0.024175
| 0.470944
| 0.379823
| 0.27894
| 0.208275
| 0.166434
| 0.166434
| 0
| 0.083786
| 0.293215
| 4,171
| 127
| 77
| 32.84252
| 0.645862
| 0.008152
| 0
| 0.327103
| 0
| 0
| 0.048668
| 0
| 0
| 0
| 0
| 0
| 0.17757
| 1
| 0.130841
| false
| 0
| 0.037383
| 0.009346
| 0.186916
| 0.009346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0cf8257e1729da63a070f7fb21ed2b3279418e3
| 7,365
|
py
|
Python
|
awsenv/profile.py
|
KensoDev/awsenv
|
4bf759106d2e0d79221d0ca9188ed7686e119b2c
|
[
"Apache-2.0"
] | 6
|
2016-09-11T08:39:50.000Z
|
2018-10-22T13:41:34.000Z
|
awsenv/profile.py
|
KensoDev/awsenv
|
4bf759106d2e0d79221d0ca9188ed7686e119b2c
|
[
"Apache-2.0"
] | 1
|
2017-01-09T23:58:20.000Z
|
2017-01-09T23:58:20.000Z
|
awsenv/profile.py
|
KensoDev/awsenv
|
4bf759106d2e0d79221d0ca9188ed7686e119b2c
|
[
"Apache-2.0"
] | 5
|
2017-01-09T23:26:12.000Z
|
2021-09-08T09:35:59.000Z
|
"""
Profile-aware session wrapper.
"""
from os import environ
from botocore.exceptions import ProfileNotFound
from botocore.session import Session
from awsenv.cache import CachedSession
def get_default_profile_name():
"""
Get the default profile name from the environment.
"""
return environ.get("AWS_DEFAULT_PROFILE", "default")
class AWSSession(object):
"""
AWS session wrapper.
"""
def __init__(self, profile=None):
self.profile = profile
self.session = Session(profile=self.profile)
@property
def access_key_id(self):
return None
@property
def secret_access_key(self):
return None
@property
def region_name(self):
return environ.get("AWS_REGION", environ.get("AWS_DEFAULT_REGION", "us-west-2"))
@property
def session_token(self):
return None
def create_client(self,
service_name,
api_version=None,
use_ssl=True,
verify=None,
endpoint_url=None,
config=None):
"""
Create a service from the wrapped session.
Automatically populates the region name, access key, secret key, and session token.
Allows other parameters to be passed.
"""
return self.session.create_client(
service_name=service_name,
region_name=self.region_name,
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
aws_session_token=self.session_token,
api_version=api_version,
use_ssl=use_ssl,
verify=verify,
endpoint_url=endpoint_url,
config=config,
)
class AWSProfile(AWSSession):
"""
AWS profile configuration.
"""
def __init__(self,
profile,
session_duration,
cached_session,
account_id=None):
"""
Configure a session for a profile.
:param profile: the name of the profile to use, if any
:param session_duration: the duration of the session (in seconds)
must be in the range 900-3600
:param cached_session: the cached session to use, if any
:param account_id: the account id for profile auto-generation (if any)
"""
self.session_duration = session_duration
self.cached_session = cached_session
self.account_id = account_id
super(AWSProfile, self).__init__(profile)
@property
def access_key_id(self):
return self.merged_config.get("aws_access_key_id")
@property
def secret_access_key(self):
return self.merged_config.get("aws_secret_access_key")
@property
def region_name(self):
return self.merged_config.get("region")
@property
def role_arn(self):
return self.profile_config.get("role_arn")
@property
def session_token(self):
return self.cached_session.token if self.cached_session else None
@property
def session_name(self):
return self.cached_session.name if self.cached_session else None
@property
def profile_config(self):
"""
Return the loaded configuration for the profile.
"""
try:
return self.session.get_scoped_config()
except ProfileNotFound:
if self.account_id is None:
raise
# attempt to generate the profile configuration
self.session._profile_map[self.profile] = dict(
role_arn="arn:aws:iam::{}:role/{}".format(
self.account_id,
self.profile,
),
source_profile=get_default_profile_name(),
)
return self.session.get_scoped_config()
@property
def source_profile_config(self):
"""
Return the loaded configuration for the source profile, if any.
"""
source_profile_name = self.profile_config.get("source_profile")
all_profiles = self.session.full_config["profiles"]
return all_profiles.get(source_profile_name, {})
@property
def merged_config(self):
"""
Merged the profile and source configurations along with the current credentials.
"""
result = self.source_profile_config.copy()
result.update(self.profile_config)
if self.session._credentials:
result.update(
aws_access_key_id=self.session._credentials.access_key,
aws_secret_access_key=self.session._credentials.secret_key,
aws_session_token=self.session._credentials.token,
)
# Override with AWS_REGION environment variable
region_from_envvar = environ.get("AWS_REGION")
if region_from_envvar:
result.update(region=region_from_envvar)
return result
def to_envvars(self):
return {
"AWS_ACCESS_KEY_ID": self.access_key_id,
"AWS_DEFAULT_REGION": self.region_name,
"AWS_PROFILE": self.profile,
"AWS_SECRET_ACCESS_KEY": self.secret_access_key,
"AWS_SESSION_NAME": self.session_name,
"AWS_SESSION_TOKEN": self.session_token,
}
def update_credentials(self):
"""
Update the profile's credentials by assuming a role, if necessary.
"""
if not self.role_arn:
return
if self.cached_session is not None:
# use current role
access_key, secret_key = self.current_role()
else:
# assume role to get a new token
access_key, secret_key = self.assume_role()
if access_key and secret_key:
self.session.set_credentials(
access_key=access_key,
secret_key=secret_key,
token=self.cached_session.token if self.cached_session else None,
)
def current_role(self):
"""
Load credentials for the current role.
"""
return (
environ.get("AWS_ACCESS_KEY_ID", self.access_key_id),
environ.get("AWS_SECRET_ACCESS_KEY", self.secret_access_key),
)
def assume_role(self):
"""
Assume a role.
"""
# we need to pass in the regions and keys because botocore does not
# automatically merge configuration from the source_profile
sts_client = self.session.create_client(
service_name="sts",
region_name=self.region_name,
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
)
session_name = CachedSession.make_name()
result = sts_client.assume_role(**{
"RoleArn": self.role_arn,
"RoleSessionName": session_name,
"DurationSeconds": self.session_duration,
})
# update the cached session
self.cached_session = CachedSession(
name=session_name,
token=result["Credentials"]["SessionToken"],
profile=self.profile,
)
return (
result["Credentials"]["AccessKeyId"],
result["Credentials"]["SecretAccessKey"],
)
| 31.075949
| 91
| 0.60611
| 824
| 7,365
| 5.154126
| 0.167476
| 0.065693
| 0.031081
| 0.024723
| 0.310572
| 0.267954
| 0.183188
| 0.155875
| 0.110431
| 0.078879
| 0
| 0.001583
| 0.313917
| 7,365
| 236
| 92
| 31.207627
| 0.838908
| 0.166056
| 0
| 0.223684
| 0
| 0
| 0.07171
| 0.014718
| 0
| 0
| 0
| 0
| 0
| 1
| 0.138158
| false
| 0
| 0.026316
| 0.072368
| 0.309211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0d0d288568d1ad31c787944a756b68fdcfc394c
| 13,358
|
py
|
Python
|
cail/algo/twoiwil.py
|
Stanford-ILIAD/Confidence-Aware-Imitation-Learning
|
1d8af0e4ab87a025885133a2384d5a937329b2f5
|
[
"MIT"
] | 16
|
2021-10-30T15:19:37.000Z
|
2022-03-23T12:57:49.000Z
|
cail/algo/twoiwil.py
|
syzhang092218-source/Confidence-Aware-Imitation-Learning
|
1d8af0e4ab87a025885133a2384d5a937329b2f5
|
[
"MIT"
] | null | null | null |
cail/algo/twoiwil.py
|
syzhang092218-source/Confidence-Aware-Imitation-Learning
|
1d8af0e4ab87a025885133a2384d5a937329b2f5
|
[
"MIT"
] | 2
|
2021-11-29T11:28:16.000Z
|
2022-03-06T14:12:47.000Z
|
import torch
import os
import torch.nn.functional as F
import numpy as np
import copy
from torch import nn
from torch.optim import Adam
from torch.autograd import Variable
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from typing import Tuple
from .ppo import PPO, PPOExpert
from .utils import CULoss
from cail.network import AIRLDiscrim, Classifier
from cail.buffer import SerializedBuffer
class TwoIWIL(PPO):
"""
Implementation of 2IWIL, using PPO-based AIRL as the backbone IL algorithm
Reference:
----------
[1] Wu, Y.-H., Charoenphakdee, N., Bao, H., Tangkaratt, V.,and Sugiyama, M.
Imitation learning from imperfect demonstration.
In International Conference on MachineLearning, pp. 6818–6827, 2019.
Parameters
----------
buffer_exp: SerializedBuffer
buffer of demonstrations
state_shape: np.array
shape of the state space
action_shape: np.array
shape of the action space
device: torch.device
cpu or cuda
seed: int
random seed
gamma: float
discount factor
rollout_length: int
rollout length of the buffer
mix_buffer: int
times for rollout buffer to mix
batch_size: int
batch size for sampling from current policy and demonstrations
lr_actor: float
learning rate of the actor
lr_critic: float
learning rate of the critic
lr_disc: float
learning rate of the discriminator
units_actor: tuple
hidden units of the actor
units_critic: tuple
hidden units of the critic
units_disc_r: tuple
hidden units of the discriminator r
units_disc_v: tuple
hidden units of the discriminator v
epoch_ppo: int
at each update period, update ppo for these times
epoch_disc: int
at each update period, update the discriminator for these times
clip_eps: float
clip coefficient in PPO's objective
lambd: float
lambd factor
coef_ent: float
entropy coefficient
max_grad_norm: float
maximum gradient norm
classifier_iter: int
iteration of training the classifier
lr_classifier: float
learning rate of the classifier
"""
def __init__(
self,
buffer_exp: SerializedBuffer,
state_shape: np.array,
action_shape: np.array,
device: torch.device,
seed: int,
gamma: float = 0.995,
rollout_length: int = 10000,
mix_buffer: int = 1,
batch_size: int = 64,
lr_actor: float = 3e-4,
lr_critic: float = 3e-4,
lr_disc: float = 3e-4,
units_actor: tuple = (64, 64),
units_critic: tuple = (64, 64),
units_disc_r: tuple = (100, 100),
units_disc_v: tuple = (100, 100),
epoch_ppo: int = 50,
epoch_disc: int = 10,
clip_eps: float = 0.2,
lambd: float = 0.97,
coef_ent: float = 0.0,
max_grad_norm: float = 10.0,
classifier_iter: int = 25000,
lr_classifier: float = 3e-4
):
super().__init__(
state_shape, action_shape, device, seed, gamma, rollout_length,
mix_buffer, lr_actor, lr_critic, units_actor, units_critic,
epoch_ppo, clip_eps, lambd, coef_ent, max_grad_norm
)
# expert's buffer
self.buffer_exp = buffer_exp
# discriminator
self.disc = AIRLDiscrim(
state_shape=state_shape,
gamma=gamma,
hidden_units_r=units_disc_r,
hidden_units_v=units_disc_v,
hidden_activation_r=nn.ReLU(inplace=True),
hidden_activation_v=nn.ReLU(inplace=True)
).to(device)
self.learning_steps_disc = 0
self.optim_disc = Adam(self.disc.parameters(), lr=lr_disc)
self.batch_size = batch_size
self.epoch_disc = epoch_disc
# classifier
self.classifier = Classifier(state_shape, action_shape).to(device)
self.n_label_traj = self.buffer_exp.n_traj
self.classifier_iter = classifier_iter
self.optim_classifier = Adam(self.classifier.parameters(), lr=lr_classifier)
self.train_classifier()
self.save_classifier = False
# label conf
states_exp, action_exp, _, _, _ = self.buffer_exp.get()
self.conf = torch.sigmoid(self.classifier(torch.cat((states_exp, action_exp), dim=-1)))
def train_classifier(self):
"""Train a classifier"""
print('Training classifier')
label_traj_states = copy.deepcopy(self.buffer_exp.traj_states)
label_traj_actions = copy.deepcopy(self.buffer_exp.traj_actions)
label_traj_rewards = copy.deepcopy(self.buffer_exp.traj_rewards)
# use ranking to label confidence
conf_gap = 1.0 / float(self.n_label_traj - 1)
ranking = np.argsort(label_traj_rewards)
traj_lengths = np.asarray([i.shape[0] for i in label_traj_states])
n_label_demos = traj_lengths.sum()
label = np.zeros(n_label_demos)
ptr = 0
for i in range(traj_lengths.shape[0]):
label[ptr: ptr + traj_lengths[i]] = ranking[i] * conf_gap
ptr += traj_lengths[i]
label = torch.from_numpy(label).to(self.device)
label_traj = torch.cat((torch.cat(label_traj_states), torch.cat(label_traj_actions)), dim=-1)
batch = min(128, label_traj.shape[0])
ubatch = int(batch / label_traj.shape[0] * self.buffer_exp.buffer_size)
loss_fun = CULoss(label, beta=1 - self.buffer_exp.label_ratio, device=self.device, non=True)
# start training
for i_iter in tqdm(range(self.classifier_iter)):
idx = np.random.choice(label_traj.shape[0], batch)
labeled = self.classifier(Variable(label_traj[idx, :]))
smp_conf = label[idx]
states_exp, actions_exp, _, _, _ = self.buffer_exp.sample(ubatch)
unlabeled = self.classifier(torch.cat((states_exp, actions_exp), dim=-1))
self.optim_classifier.zero_grad()
risk = loss_fun(smp_conf, labeled, unlabeled)
risk.backward()
self.optim_classifier.step()
if i_iter % 2000 == 0:
tqdm.write(f'iteration: {i_iter}\tcu loss: {risk.data.item():.3f}')
self.classifier = self.classifier.eval()
print("Classifier finished training")
def sample_exp(
self,
batch_size: int
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Sample from expert's demonstrations
Parameters
----------
batch_size: int
number of samples
Returns
-------
states: torch.Tensor
expert's states
actions: torch.Tensor
expert's actions
dones: torch.Tensor
expert's dones
next_states: torch.Tensor
expert's next states
conf: torch.Tensor
confidence of expert's demonstrations
"""
# Samples from expert's demonstrations.
all_states_exp, all_actions_exp, _, all_dones_exp, all_next_states_exp = \
self.buffer_exp.get()
all_conf = Variable(self.conf)
all_conf_mean = Variable(all_conf.mean())
conf = all_conf / all_conf_mean
with torch.no_grad():
self.conf = conf
idxes = np.random.randint(low=0, high=all_states_exp.shape[0], size=batch_size)
return (
all_states_exp[idxes],
all_actions_exp[idxes],
all_dones_exp[idxes],
all_next_states_exp[idxes],
self.conf[idxes]
)
def update(self, writer: SummaryWriter):
"""
Update the algorithm
Parameters
----------
writer: SummaryWriter
writer for logs
"""
self.learning_steps += 1
for _ in range(self.epoch_disc):
self.learning_steps_disc += 1
# samples from current policy's trajectories
states, _, _, dones, log_pis, next_states = self.buffer.sample(self.batch_size)
# samples from expert's demonstrations
states_exp, actions_exp, dones_exp, next_states_exp, conf = self.sample_exp(self.batch_size)
# calculate log probabilities of expert actions
with torch.no_grad():
log_pis_exp = self.actor.evaluate_log_pi(states_exp, actions_exp)
# update discriminator
self.update_disc(
states, dones, log_pis, next_states, states_exp,
dones_exp, log_pis_exp, next_states_exp, conf, writer
)
# we don't use reward signals here
states, actions, _, dones, log_pis, next_states = self.buffer.get()
# calculate rewards
rewards = self.disc.calculate_reward(
states, dones, log_pis, next_states)
# update PPO using estimated rewards
self.update_ppo(
states, actions, rewards, dones, log_pis, next_states, writer)
def update_disc(
self,
states: torch.Tensor,
dones: torch.Tensor,
log_pis: torch.Tensor,
next_states: torch.Tensor,
states_exp: torch.Tensor,
dones_exp: torch.Tensor,
log_pis_exp: torch.Tensor,
next_states_exp: torch.Tensor,
conf: torch.Tensor,
writer: SummaryWriter
):
"""
Update the discriminator
Parameters
----------
states: torch.Tensor
states sampled from current IL policy
dones: torch.Tensor
dones sampled from current IL policy
log_pis: torch.Tensor
log(\pi(s|a)) sampled from current IL policy
next_states: torch.Tensor
next states sampled from current IL policy
states_exp: torch.Tensor
states sampled from demonstrations
dones_exp: torch.Tensor
dones sampled from demonstrations
log_pis_exp: torch.Tensor
log(\pi(s|a)) sampled from demonstrations
next_states_exp: torch.Tensor
next states sampled from demonstrations
conf: torch.Tensor
learned confidence of the demonstration samples
writer: SummaryWriter
writer for logs
"""
# output of discriminator is (-inf, inf), not [0, 1]
logits_pi = self.disc(states, dones, log_pis, next_states)
logits_exp = self.disc(states_exp, dones_exp, log_pis_exp, next_states_exp)
# discriminator is to maximize E_{\pi} [log(1 - D)] + E_{exp} [log(D)]
loss_pi = -F.logsigmoid(-logits_pi).mean()
loss_exp = -(F.logsigmoid(logits_exp).mul(conf)).mean()
loss_disc = loss_pi + loss_exp
self.optim_disc.zero_grad()
loss_disc.backward()
self.optim_disc.step()
if self.learning_steps_disc % self.epoch_disc == 0:
writer.add_scalar(
'loss/disc', loss_disc.item(), self.learning_steps)
# discriminator's accuracies
with torch.no_grad():
acc_pi = (logits_pi < 0).float().mean().item()
acc_exp = (logits_exp > 0).float().mean().item()
writer.add_scalar('stats/acc_pi', acc_pi, self.learning_steps)
writer.add_scalar('stats/acc_exp', acc_exp, self.learning_steps)
def save_models(self, save_dir: str):
"""
Save the model
Parameters
----------
save_dir: str
path to save
"""
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
torch.save(self.disc.state_dict(), f'{save_dir}/disc.pkl')
torch.save(self.actor.state_dict(), f'{save_dir}/actor.pkl')
if not self.save_classifier:
torch.save(self.classifier.state_dict(), f'{save_dir}/../classifier.pkl')
self.save_classifier = True
class TwoIWILExpert(PPOExpert):
"""
Well-trained 2IWIL agent
Parameters
----------
state_shape: np.array
shape of the state space
action_shape: np.array
shape of the action space
device: torch.device
cpu or cuda
path: str
path to the well-trained weights
units_actor: tuple
hidden units of the actor
"""
def __init__(
self,
state_shape: np.array,
action_shape: np.array,
device: torch.device,
path: str,
units_actor: tuple = (64, 64)
):
super(TwoIWILExpert, self).__init__(
state_shape=state_shape,
action_shape=action_shape,
device=device,
path=path,
units_actor=units_actor
)
| 34.786458
| 105
| 0.586166
| 1,593
| 13,358
| 4.700565
| 0.170747
| 0.041132
| 0.019097
| 0.012019
| 0.246528
| 0.148237
| 0.092815
| 0.076255
| 0.051549
| 0.051549
| 0
| 0.012369
| 0.328193
| 13,358
| 383
| 106
| 34.877285
| 0.82193
| 0.269651
| 0
| 0.104712
| 0
| 0
| 0.023275
| 0.005819
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036649
| false
| 0
| 0.078534
| 0
| 0.13089
| 0.010471
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0d0f0826bf05af84c68e2d12e3788dc07ebfcd6
| 7,327
|
py
|
Python
|
data/generation_scripts/MantaFlow/scripts3D/compactifyData.py
|
tum-pbs/VOLSIM
|
795a31c813bf072eb88289126d7abd9fba8b0e54
|
[
"MIT"
] | 7
|
2022-01-28T09:40:15.000Z
|
2022-03-07T01:52:00.000Z
|
data/generation_scripts/MantaFlow/scripts3D/compactifyData.py
|
tum-pbs/VOLSIM
|
795a31c813bf072eb88289126d7abd9fba8b0e54
|
[
"MIT"
] | null | null | null |
data/generation_scripts/MantaFlow/scripts3D/compactifyData.py
|
tum-pbs/VOLSIM
|
795a31c813bf072eb88289126d7abd9fba8b0e54
|
[
"MIT"
] | 1
|
2022-03-14T22:08:47.000Z
|
2022-03-14T22:08:47.000Z
|
import numpy as np
import os, shutil
import imageio
baseDir = "data/train_verbose"
outDir = "data/train"
#baseDir = "data/test_verbose"
#outDir = "data/test"
outDirVidCopy = "data/videos"
combineVidsAll = {"smoke" : ["densMean", "densSlice", "velMean", "velSlice", "presMean", "presSlice"],
"liquid": ["flagsMean", "flagsSlice", "velMean", "velSlice", "phiMean", "phiSlice"] }
convertData = True
processVid = True
copyVidOnly = False
ignoreTop = ["shapes", "waves"]
ignoreSim = []
ignoreFrameDict = {}
excludeIgnoreFrame = False
topDirs = os.listdir(baseDir)
topDirs.sort()
#shutil.rmtree(outDir)
#os.makedirs(outDir)
# top level folders
for topDir in topDirs:
mantaMsg("\n" + topDir)
if ignoreTop and any( item in topDir for item in ignoreTop ) :
mantaMsg("Ignored")
continue
simDir = os.path.join(baseDir, topDir)
sims = os.listdir(simDir)
sims.sort()
# sim_000000 folders
for sim in sims:
if ignoreSim and any( item in sim for item in ignoreSim ) :
mantaMsg(sim + " - Ignored")
continue
currentDir = os.path.join(simDir, sim)
files = os.listdir(currentDir)
files.sort()
destDir = os.path.join(outDir, topDir, sim)
#if os.path.isdir(destDir):
# shutil.rmtree(destDir)
if not os.path.isdir(destDir):
os.makedirs(destDir)
# single files
for file in files:
filePath = os.path.join(currentDir, file)
# copy src folder to destination
if os.path.isdir(filePath) and file == "src":
dest = os.path.join(destDir, "src")
if not os.path.isdir(dest):
shutil.copytree(filePath, dest, symlinks=False)
# combine video files
elif os.path.isdir(filePath) and file == "render":
if not processVid:
continue
dest = os.path.join(destDir, "render")
if copyVidOnly:
shutil.copytree(filePath, dest, symlinks=False)
continue
if not os.path.isdir(dest):
os.makedirs(dest)
#mantaMsg(file)
renderDir = os.path.join(currentDir, "render")
vidFiles = os.listdir(renderDir)
if "smoke" in topDir: combineVids = combineVidsAll["smoke"]
elif "liquid" in topDir: combineVids = combineVidsAll["liquid"]
else: combineVids = [""]
for vidFile in vidFiles:
if combineVids[0] + "00.mp4" not in vidFile:
continue
vidLine = []
for combineVid in combineVids:
# find all video part files corresponding to current one
vidParts = []
i = 0
while os.path.exists(os.path.join(renderDir, vidFile.replace(combineVids[0]+"00.mp4", combineVid+"%02d.mp4" % i))):
vidParts.append(vidFile.replace(combineVids[0]+"00.mp4", combineVid+"%02d.mp4" % i))
i += 1
assert len(vidParts) == 11
# combine each video part file
loadedVids = []
for part in vidParts:
currentFile = os.path.join(renderDir, part)
loaded = imageio.mimread(currentFile)
#mantaMsg(len(loaded))
#mantaMsg(loaded[0].shape)
loadedVids.append(loaded)
#temp1 = np.concatenate(loadedVids[0:4], axis=2)
#temp2 = np.concatenate(loadedVids[4:8], axis=2)
#temp3 = np.concatenate(loadedVids[8:11]+[np.zeros_like(loadedVids[0])], axis=2)
#vidLine.append(np.concatenate([temp1, temp2, temp3], axis=1))
vidLine.append(np.concatenate(loadedVids, axis=2))
combined = np.concatenate(vidLine, axis=1)
# save combined file
if combineVids[0] == "": newName = os.path.join(dest, "%s_%s_%s.mp4" % (topDir, sim, vidFile.replace("00.mp4", ".mp4")))
else: newName = os.path.join(dest, "%s_%s.mp4" % (topDir, sim))
imageio.mimwrite(newName, combined, quality=6, fps=11, ffmpeg_log_level="error")
# save copy
if combineVids[0] == "": newNameCopy = os.path.join(outDirVidCopy, "%s_%s_%s.mp4" % (topDir, sim, vidFile.replace("00.mp4", ".mp4")))
else: newNameCopy = os.path.join(outDirVidCopy, "%s_%s.mp4" % (topDir, sim))
imageio.mimwrite(newNameCopy, combined, quality=6, fps=11, ffmpeg_log_level="error")
# copy description files to destination
elif os.path.splitext(filePath)[1] == ".json" or os.path.splitext(filePath)[1] == ".py" or os.path.splitext(filePath)[1] == ".log":
shutil.copy(filePath, destDir)
# ignore other dirs and non .npz files
elif os.path.isdir(filePath) or os.path.splitext(filePath)[1] != ".npz" or "part00" not in file:
continue
# combine part files
else:
if not convertData:
continue
if ignoreFrameDict:
filterFrames = []
for key, value in ignoreFrameDict.items():
if key in topDir:
filterFrames = value
break
assert (filterFrames != []), "Keys in filterFrameDict don't match dataDir structure!"
# continue for frames when excluding or including according to filter
if excludeIgnoreFrame == any( item in file for item in filterFrames ):
continue
# find all part files corresponding to current one
parts = [file]
i = 1
while os.path.exists(os.path.join(currentDir, file.replace("part00", "part%02d" % i))):
parts.append(file.replace("part00", "part%02d" % i))
i += 1
assert len(parts) == 11
# combine each part file
domain = np.load(os.path.join(currentDir, parts[0]))['arr_0']
res = domain.shape[0]
combined = np.zeros([len(parts), res, res, res, domain.shape[3]])
for f in range(len(parts)):
currentFile = os.path.join(currentDir, parts[f])
loaded = np.load(currentFile)['arr_0']
combined[f] = loaded
# save combined file
newName = file.replace("_part00", "")
np.savez_compressed( os.path.join(destDir, newName), combined )
loaded = np.load( os.path.join(destDir, newName) )['arr_0']
mantaMsg(os.path.join(sim, newName) + "\t" + str(loaded.shape))
| 43.613095
| 153
| 0.512079
| 744
| 7,327
| 5.017473
| 0.240591
| 0.051433
| 0.050897
| 0.026788
| 0.301902
| 0.235735
| 0.128583
| 0.069113
| 0.069113
| 0.047683
| 0
| 0.020792
| 0.376416
| 7,327
| 167
| 154
| 43.874252
| 0.796236
| 0.123106
| 0
| 0.125
| 0
| 0
| 0.071607
| 0
| 0
| 0
| 0
| 0
| 0.026786
| 1
| 0
| false
| 0
| 0.026786
| 0
| 0.026786
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0d159678318f4de46108d8e3c19f4a355d8744f
| 14,238
|
py
|
Python
|
qiskit/aqua/operators/base_operator.py
|
Sahar2/qiskit-aqua
|
a228fbe6b9613cff43e47796a7e4843deba2b051
|
[
"Apache-2.0"
] | null | null | null |
qiskit/aqua/operators/base_operator.py
|
Sahar2/qiskit-aqua
|
a228fbe6b9613cff43e47796a7e4843deba2b051
|
[
"Apache-2.0"
] | null | null | null |
qiskit/aqua/operators/base_operator.py
|
Sahar2/qiskit-aqua
|
a228fbe6b9613cff43e47796a7e4843deba2b051
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from abc import ABC, abstractmethod
import warnings
from qiskit import QuantumCircuit
class BaseOperator(ABC):
"""Operators relevant for quantum applications."""
@abstractmethod
def __init__(self, basis=None, z2_symmetries=None, name=None):
"""Constructor."""
self._basis = basis
self._z2_symmetries = z2_symmetries
self._name = name if name is not None else ''
@property
def name(self):
return self._name
@name.setter
def name(self, new_value):
self._name = new_value
@property
def basis(self):
return self._basis
@property
def z2_symmetries(self):
return self._z2_symmetries
@abstractmethod
def __add__(self, other):
"""Overload + operation."""
raise NotImplementedError
@abstractmethod
def __iadd__(self, other):
"""Overload += operation."""
raise NotImplementedError
@abstractmethod
def __sub__(self, other):
"""Overload - operation."""
raise NotImplementedError
@abstractmethod
def __isub__(self, other):
"""Overload -= operation."""
raise NotImplementedError
@abstractmethod
def __neg__(self):
"""Overload unary - ."""
raise NotImplementedError
@abstractmethod
def __eq__(self, other):
"""Overload == operation."""
raise NotImplementedError
@abstractmethod
def __str__(self):
"""Overload str()."""
raise NotImplementedError
@abstractmethod
def __mul__(self, other):
"""Overload *."""
raise NotImplementedError
@abstractmethod
def construct_evaluation_circuit(self, wave_function):
"""Build circuits to compute the expectation w.r.t the wavefunction."""
raise NotImplementedError
@abstractmethod
def evaluate_with_result(self, result):
"""
Consume the result from the quantum computer to build the expectation,
will be only used along with the `construct_evaluation_circuit` method.
"""
raise NotImplementedError
@abstractmethod
def evolve(self):
"""
Time evolution, exp^(-jt H).
"""
raise NotImplementedError
@abstractmethod
def print_details(self):
raise NotImplementedError
@abstractmethod
def _scaling_weight(self, scaling_factor):
# TODO: will be removed after the deprecated method is removed.
raise NotImplementedError
@abstractmethod
def chop(self, threshold, copy=False):
raise NotImplementedError
def print_operators(self, mode='paulis'):
warnings.warn("print_operators() is deprecated and it will be removed after 0.6, "
"Use `print_details()` instead",
DeprecationWarning)
return self.print_details()
@property
def coloring(self):
warnings.warn("coloring is removed, "
"Use the `TPBGroupedWeightedPauliOperator` class to group a paulis directly",
DeprecationWarning)
return None
def _to_dia_matrix(self, mode=None):
warnings.warn("_to_dia_matrix method is removed, use the `MatrixOperator` class to get diagonal matrix. And "
"the current deprecated method does NOT modify the original object, it returns the dia_matrix",
DeprecationWarning)
from .op_converter import to_matrix_operator
mat_op = to_matrix_operator(self)
return mat_op.dia_matrix
def enable_summarize_circuits(self):
warnings.warn("enable_summarize_circuits method is removed. Enable the summary at QuantumInstance",
DeprecationWarning)
def disable_summarize_circuits(self):
warnings.warn("disable_summarize_circuits method is removed. Disable the summary at QuantumInstance",
DeprecationWarning)
@property
def representations(self):
warnings.warn("representations method is removed. each operator is self-defined, ",
DeprecationWarning)
return None
def eval(self, operator_mode, input_circuit, backend, backend_config=None, compile_config=None,
run_config=None, qjob_config=None, noise_config=None):
warnings.warn("eval method is removed. please use `construct_evaluate_circuit` and submit circuit by yourself "
"then, use the result along with `evaluate_with_result` to get mean and std. "
"Furthermore, if you compute the expectation against a statevector (numpy array), you can "
"use evaluate_with_statevector directly.",
DeprecationWarning)
return None, None
def convert(self, input_format, output_format, force=False):
warnings.warn("convert method is removed. please use the conversion functions in the "
"qiskit.aqua.operators.op_converter module. There are different `to_xxx_operator` functions"
" And the current deprecated method does NOT modify the original object, it returns.",
DeprecationWarning)
from .op_converter import to_weighted_pauli_operator, to_matrix_operator, to_tpb_grouped_weighted_pauli_operator
from .tpb_grouped_weighted_pauli_operator import TPBGroupedWeightedPauliOperator
if output_format == 'paulis':
return to_weighted_pauli_operator(self)
elif output_format == 'grouped_paulis':
return to_tpb_grouped_weighted_pauli_operator(self, TPBGroupedWeightedPauliOperator.sorted_grouping)
elif output_format == 'matrix':
return to_matrix_operator(self)
def two_qubit_reduced_operator(self, m, threshold=10 ** -13):
warnings.warn("two_qubit_reduced_operator method is deprecated and it will be removed after 0.6. "
"Now it is moved to the `Z2Symmetries` class as a classmethod. """
"Z2Symmeteries.two_qubit_reduction(num_particles)",
DeprecationWarning)
from .op_converter import to_weighted_pauli_operator
from .weighted_pauli_operator import Z2Symmetries
return Z2Symmetries.two_qubit_reduction(to_weighted_pauli_operator(self), m)
@staticmethod
def qubit_tapering(operator, cliffords, sq_list, tapering_values):
warnings.warn("qubit_tapering method is deprecated and it will be removed after 0.6. "
"Now it is moved to the `Z2Symmetries` class.",
DeprecationWarning)
from .op_converter import to_weighted_pauli_operator
from .weighted_pauli_operator import Z2Symmetries
sq_paulis = [x.paulis[1][1] for x in cliffords]
symmetries = [x.paulis[0][1] for x in cliffords]
tmp_op = to_weighted_pauli_operator(operator)
z2_symmetries = Z2Symmetries(symmetries, sq_paulis, sq_list, tapering_values)
return z2_symmetries.taper(tmp_op)
def scaling_coeff(self, scaling_factor):
warnings.warn("scaling_coeff method is deprecated and it will be removed after 0.6. "
"Use `* operator` with the scalar directly.",
DeprecationWarning)
self._scaling_weight(scaling_factor)
return self
def zeros_coeff_elimination(self):
warnings.warn("zeros_coeff_elimination method is deprecated and it will be removed after 0.6. "
"Use chop(0.0) to remove terms with 0 weight.",
DeprecationWarning)
self.chop(0.0)
return self
@staticmethod
def construct_evolution_circuit(slice_pauli_list, evo_time, num_time_slices, state_registers,
ancillary_registers=None, ctl_idx=0, unitary_power=None, use_basis_gates=True,
shallow_slicing=False):
from .common import evolution_instruction
warnings.warn("The `construct_evolution_circuit` method is deprecated, use the `evolution_instruction` in "
"the qiskit.aqua.operators.common module instead.",
DeprecationWarning)
if state_registers is None:
raise ValueError('Quantum state registers are required.')
qc_slice = QuantumCircuit(state_registers)
if ancillary_registers is not None:
qc_slice.add_register(ancillary_registers)
controlled = ancillary_registers is not None
inst = evolution_instruction(slice_pauli_list, evo_time, num_time_slices, controlled, 2 ** ctl_idx,
use_basis_gates, shallow_slicing)
qc_slice.append(inst, [q for qreg in qc_slice.qregs for q in qreg])
qc_slice = qc_slice.decompose()
return qc_slice
@staticmethod
def row_echelon_F2(matrix_in):
from .common import row_echelon_F2
warnings.warn("The `row_echelon_F2` method is deprecated, use the row_echelon_F2 function in "
"the qiskit.aqua.operators.common module instead.",
DeprecationWarning)
return row_echelon_F2(matrix_in)
@staticmethod
def kernel_F2(matrix_in):
from .common import kernel_F2
warnings.warn("The `kernel_F2` method is deprecated, use the kernel_F2 function in "
"the qiskit.aqua.operators.common module instead.",
DeprecationWarning)
return kernel_F2(matrix_in)
def find_Z2_symmetries(self):
warnings.warn("The `find_Z2_symmetries` method is deprecated and it will be removed after 0.6, "
"Use the class method in the `Z2Symmetries` class instead",
DeprecationWarning)
from .weighted_pauli_operator import Z2Symmetries
from .op_converter import to_weighted_pauli_operator
wp_op = to_weighted_pauli_operator(self)
self._z2_symmetries = Z2Symmetries.find_Z2_symmetries(wp_op)
return self._z2_symmetries.symmetries, self._z2_symmetries.sq_paulis, \
self._z2_symmetries.cliffords, self._z2_symmetries.sq_list
def to_grouped_paulis(self):
warnings.warn("to_grouped_paulis method is deprecated and it will be removed after 0.6. And the current "
"deprecated method does NOT modify the original object, it returns the grouped weighted pauli "
"operator. Please check the qiskit.aqua.operators.op_convertor for converting to different "
"types of operators. For grouping paulis, you can create your own grouping func to create the "
"class you need.",
DeprecationWarning)
from .op_converter import to_tpb_grouped_weighted_pauli_operator
from .tpb_grouped_weighted_pauli_operator import TPBGroupedWeightedPauliOperator
return to_tpb_grouped_weighted_pauli_operator(self, grouping_func=TPBGroupedWeightedPauliOperator.sorted_grouping)
def to_paulis(self):
warnings.warn("to_paulis method is deprecated and it will be removed after 0.6. And the current deprecated "
"method does NOT modify the original object, it returns the weighted pauli operator."
"Please check the qiskit.aqua.operators.op_convertor for converting to different types of "
"operators",
DeprecationWarning)
from .op_converter import to_weighted_pauli_operator
return to_weighted_pauli_operator(self)
def to_matrix(self):
warnings.warn("to_matrix method is deprecated and it will be removed after 0.6. And the current deprecated "
"method does NOT modify the original object, it returns the matrix operator."
"Please check the qiskit.aqua.operators.op_convertor for converting to different types of "
"operators",
DeprecationWarning)
from .op_converter import to_matrix_operator
return to_matrix_operator(self)
def to_weighted_pauli_operator(self):
warnings.warn("to_weighted_apuli_operator method is temporary helper method and it will be removed after 0.6. "
"Please check the qiskit.aqua.operators.op_convertor for converting to different types of "
"operators",
DeprecationWarning)
from .op_converter import to_weighted_pauli_operator
return to_weighted_pauli_operator(self)
def to_matrix_operator(self):
warnings.warn("to_matrix_operator method is temporary helper method and it will be removed after 0.6. "
"Please check the qiskit.aqua.operators.op_convertor for converting to different types of "
"operators",
DeprecationWarning)
from .op_converter import to_matrix_operator
return to_matrix_operator(self)
def to_tpb_grouped_weighted_pauli_operator(self):
warnings.warn("to_tpb_grouped_weighted_pauli_operator method is temporary helper method and it will be "
"removed after 0.6. Please check the qiskit.aqua.operators.op_convertor for converting to "
"different types of operators",
DeprecationWarning)
from .op_converter import to_tpb_grouped_weighted_pauli_operator
from .tpb_grouped_weighted_pauli_operator import TPBGroupedWeightedPauliOperator
return to_tpb_grouped_weighted_pauli_operator(
self, grouping_func=TPBGroupedWeightedPauliOperator.sorted_grouping)
| 44.633229
| 122
| 0.666877
| 1,626
| 14,238
| 5.613161
| 0.181427
| 0.041306
| 0.066725
| 0.058398
| 0.494467
| 0.426646
| 0.401227
| 0.391585
| 0.331872
| 0.318396
| 0
| 0.007989
| 0.270333
| 14,238
| 318
| 123
| 44.773585
| 0.870536
| 0.069883
| 0
| 0.409283
| 0
| 0
| 0.287783
| 0.052892
| 0
| 0
| 0
| 0.003145
| 0
| 1
| 0.172996
| false
| 0
| 0.097046
| 0.012658
| 0.379747
| 0.021097
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0d37d7e9574c755f53a5c193de3f30cb81ee61a
| 4,447
|
py
|
Python
|
DataAnalysis/utils.py
|
Timlo512/AnomalyStockDetection
|
29f9aaef14f1d9823980d8022cdce1f7f6310813
|
[
"MIT"
] | 2
|
2020-12-19T05:24:29.000Z
|
2021-05-15T19:35:40.000Z
|
DataAnalysis/utils.py
|
Timlo512/AnomalyStockDetection
|
29f9aaef14f1d9823980d8022cdce1f7f6310813
|
[
"MIT"
] | null | null | null |
DataAnalysis/utils.py
|
Timlo512/AnomalyStockDetection
|
29f9aaef14f1d9823980d8022cdce1f7f6310813
|
[
"MIT"
] | 5
|
2020-11-21T02:25:13.000Z
|
2022-01-31T12:46:02.000Z
|
import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix
import re
def convert_data_sparse_matrix(df, row_label = 'stock_code', col_label = 'name_of_ccass_participant', value_label = 'shareholding'):
"""
Pivot table
"""
try:
# Prepare zero matrix
row_dim = len(df[row_label].unique())
col_dim = len(df[col_label].unique())
sparse_matrix = np.zeros((row_dim, col_dim))
# Prepare label to index dictionaries
row_ind_dict = {label: ind for ind, label in enumerate(sorted(df[row_label].unique().tolist()))}
col_ind_dict = {label: ind for ind, label in enumerate(sorted(df[col_label].unique().tolist()))}
# Transform row_label column and col_label column to index
df['row_ind'] = df[row_label].apply(lambda x: row_ind_dict[x])
df['col_ind'] = df[col_label].apply(lambda x: col_ind_dict[x])
for ind, row in df.iterrows():
# Get index and shareholding
row_ind = row['row_ind']
col_ind = row['col_ind']
value = row[value_label]
# Assign to sparse matrix
sparse_matrix[row_ind, col_ind] += value
return sparse_matrix, row_ind_dict, col_ind_dict
except Exception as e:
print(e)
return None
def load_data(data_path):
# Read csv files
df = pd.read_csv(data_path)
# Convert stock code to formatted string
df['stock_code'] = df['stock_code'].apply(lambda x: ('00000' + str(x))[-5:])
return df
def f_score(y_truth, y_pred, beta = 1):
try:
# Run confusion_matrix
tn, fp, fn, tp = confusion_matrix(y_truth, y_pred).ravel()
precision_value = precision(tp, fp)
recall_value = recall(tp, fn)
# print recall
print('True positive: {}, True Negative: {}, False Positive: {}, False Negative: {}'.format(tp, tn, fp, fn))
print('Precision is ', format(precision_value * 100, '.2f'), '%')
print('Recall is ', format(recall_value * 100, '.2f'), '%')
return (1 + beta**2) * (precision_value * recall_value) / ((beta**2 * precision_value + recall_value))
except Exception as e:
print(e)
return None
def precision(tp, fp):
return tp / (tp + fp)
def recall(tp, fn):
return tp / (tp + fn)
def get_truth_label(path, threshold = 0.3):
# Load dataset
df = pd.read_csv(path)
# preprocess the data in order to get a proper data structure
df = df.set_index('Unnamed: 0').transpose().dropna()
df = df.reset_index()
df['index'] = df['index'].apply(lambda x: retrieve_stock_code(x))
df = df.set_index('index')
# Define col_dim and empty dataframe
col_dim = len(df.columns)
temp = pd.DataFrame()
# Create a list of column name without the first element
first_dim = df.columns[0]
col_list = df.columns.to_list()
col_list.remove(first_dim)
for col in col_list:
# Assign the col to second_dim, as current date
second_dim = col
# Calculate the daily % change of stock price
temp[col] = (df[second_dim] - df[first_dim]) / df[first_dim]
# Assign the col to first dim, as previous date
first_dim = col
result = np.sum(temp > threshold, axis = 1)
return {stock_code:1 if count > 0 else 0 for stock_code, count in result.items()}
def retrieve_stock_code(x):
d = re.search('[0-9]*', x)
if d:
return ('00000' + d.group(0))[-5:]
else:
return None
def cluster_predict(label, min_pts = 'auto'):
"""
Input: an array of clsutered label for each instance
return: an array of anomal label for each instance
"""
try:
# Get Unqiue label and its counts
(unique, counts) = np.unique(label, return_counts = True)
# Define minimum points that it should have in a cluster, if auto, it will take the min count
if min_pts == 'auto':
min_pts = min(counts)
print('Minimum points of a cluster among the clusters: ', min_pts)
else:
min_pts = int(min_pts)
# Prepare label_dict for mapping
label_dict = {label: 0 if count > min_pts else 1 for label, count in zip(unique, counts)}
# Map label_dict to label
return np.array([label_dict[i] for i in label])
except Exception as e:
print(e)
return None
| 32.698529
| 132
| 0.614796
| 641
| 4,447
| 4.098284
| 0.268331
| 0.027408
| 0.015226
| 0.020556
| 0.098211
| 0.098211
| 0.075371
| 0.075371
| 0.062429
| 0.03426
| 0
| 0.011448
| 0.273218
| 4,447
| 135
| 133
| 32.940741
| 0.801361
| 0.190241
| 0
| 0.192308
| 0
| 0
| 0.084583
| 0.007072
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.051282
| 0.025641
| 0.307692
| 0.089744
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0d5155e320c1b2b6704a06d42d9b58088cb485b
| 1,429
|
py
|
Python
|
scripts/prepare_upload_files.py
|
MaayanLab/scAVI
|
7f3f83657d749520243535581db1080075e48aa5
|
[
"Apache-2.0"
] | 3
|
2020-01-23T08:48:33.000Z
|
2021-07-21T02:42:28.000Z
|
scripts/prepare_upload_files.py
|
MaayanLab/scAVI
|
7f3f83657d749520243535581db1080075e48aa5
|
[
"Apache-2.0"
] | 21
|
2019-10-25T15:38:37.000Z
|
2022-01-27T16:04:04.000Z
|
scripts/prepare_upload_files.py
|
MaayanLab/scAVI
|
7f3f83657d749520243535581db1080075e48aa5
|
[
"Apache-2.0"
] | 1
|
2019-10-24T18:15:26.000Z
|
2019-10-24T18:15:26.000Z
|
'''
Prepare some files to test the upload functionality.
'''
import sys
sys.path.append('../')
from database import *
from pymongo import MongoClient
mongo = MongoClient(MONGOURI)
db = mongo['SCV']
coll = db['dataset']
from gene_expression import *
expr_df, meta_doc = load_read_counts_and_meta(organism='mouse', gse='GSE96870')
# rename the samples
expr_df.columns = ['sample_%d' % i for i in range(len(expr_df.columns))]
meta_df = pd.DataFrame(meta_doc['meta_df'])
meta_df.index = expr_df.columns
meta_df.index.name = 'sample_ID'
# parse the meta_df a bit
meta_df['Sample_characteristics_ch1'] = meta_df['Sample_characteristics_ch1'].map(lambda x:x.split('\t'))
keys_from_char_ch1 = [item.split(': ')[0] for item in meta_df['Sample_characteristics_ch1'][0]]
for i, key in enumerate(keys_from_char_ch1):
meta_df[key] = meta_df['Sample_characteristics_ch1'].map(lambda x:x[i].split(': ')[1])
# drop unnecessary columns in meta_df
meta_df = meta_df.drop(['Sample_characteristics_ch1',
'Sample_relation', 'Sample_geo_accession', 'Sample_supplementary_file_1'],
axis=1)
# fake a column of continuous values
meta_df['random_continuous_attr'] = np.random.randn(meta_df.shape[0])
meta_df.to_csv('../data/sample_metadata.csv')
# raw read counts
expr_df.to_csv('../data/sample_read_counts_%dx%d.csv' % expr_df.shape)
# CPMs
expr_df = compute_CPMs(expr_df)
expr_df.to_csv('../data/sample_CPMs_%dx%d.csv' % expr_df.shape)
| 30.404255
| 105
| 0.751575
| 233
| 1,429
| 4.313305
| 0.399142
| 0.095522
| 0.119403
| 0.107463
| 0.271642
| 0.157214
| 0.081592
| 0.081592
| 0.081592
| 0
| 0
| 0.014019
| 0.10147
| 1,429
| 46
| 106
| 31.065217
| 0.768692
| 0.131561
| 0
| 0
| 0
| 0
| 0.295844
| 0.220864
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.16
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|