index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
52,396 | anevolina/GoogleCalendarBot | refs/heads/master | /tg_bot/GC_TelegramBot.py | """Telegram Bot module for communicating with the main API"""
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import tg_bot.bot_answers as bot_answers
from core.main_api import (create_event, add_calendar, unbind_calendar, authorise_user_step1, authorise_user_step2,
check_user_settings, logout)
from core.exceptions import GCUnauthorisedUserError
#settings - dict - auxiliary variable to store what kind of settings user is about to update
settings = {}
def handle_user_message(bot, update):
"""Handle simple message from user;
Input:
bot, update - dict - standart variables from telegram bot API
Process:
Check if we have user_id in settings variable - possible variants are ['CALENDAR', 'AUTHORISE'] - if the
user want to change calendar or in the process of authorization.
Else - parse the message and create an event;
Output:
message from bot to the user
"""
user_id = get_user_id(bot, update)
message = get_message(bot, update)
user_set = get_settings(user_id, 'code')
if user_set == 'CALENDAR':
# if user wants to bind/add calendar to their account
try:
calendar_status = add_calendar(user_id=user_id, calendar_name=message)
except GCUnauthorisedUserError:
#if we don't have records about the user in database
bot_answer = bot_answers.get_unauthorised_user_error_message()
else:
bot_answer = bot_answers.get_calendar_status_message(calendar_status, message)
del_settings(user_id)
elif user_set == 'AUTHORISE':
#if user got auth url and now sent us the code
authorised = authorise_user_step2(user_id, message)
if authorised:
bot_answer = bot_answers.get_authorised_message()
message_id = update.message.message_id
del_auth_messages(bot, user_id, message_id)
del_settings(user_id)
else:
bot_answer = bot_answers.get_wrong_code_message()
else:
#assume user just wants to add an event
try:
status, start, attendees, location = create_event(user_id=user_id, message=message)
except GCUnauthorisedUserError:
# if we don't have records about the user in our database
bot_answer = bot_answers.get_unauthorised_user_error_message()
else:
bot_answer = bot_answers.get_event_status_answer(status, start=start, attendees=attendees, location=location)
send_message(bot, user_id, bot_answer)
def del_auth_messages(bot, user_id, message_id=None):
"""Delete two messages with auth url and user's code - just to be enough paranoid"""
url_message = get_settings(user_id, 'url_message')
if message_id:
bot.delete_message(chat_id=user_id, message_id=message_id)
if url_message:
bot.delete_message(chat_id=user_id, message_id=url_message)
pass
def add_calendar_callback(bot, update):
"""Reaction to /bind command - if we have records about the user in database, we will
proceed and add 'CALENDAR' code to the settings variable"""
user_id = get_user_id(bot, update)
try:
check_user_settings(user_id)
except GCUnauthorisedUserError:
# if we don't have records about the user in our database
bot_answer = bot_answers.get_unauthorised_user_error_message()
else:
add_settings(user_id, code='CALENDAR')
bot_answer = bot_answers.get_add_calendar_message()
send_message(bot, user_id, bot_answer)
def chancel_callback(bot, update):
"""Reaction to /cancel command - cancel all started updates - regarding calendar setting or auth"""
user_id = get_user_id(bot, update)
del_auth_messages(bot, user_id)
del_settings(user_id)
bot_answer = bot_answers.get_canceled_message()
send_message(bot, user_id, bot_answer)
def start_callback(bot, update):
"""Reaction to /start command - start auth process, and set 'AUTHORISE' code to the settings variable"""
user_id = get_user_id(bot, update)
url = authorise_user_step1()
bot_answer = bot_answers.get_authorise_url_message(url)
message = send_message(bot, user_id, bot_answer)
add_settings(user_id, code='AUTHORISE', url_message=message.message_id)
def logout_callback(bot, update):
"""Reaction to /logout command - it delete all information about user from our database"""
user_id = get_user_id(bot, update)
try:
status = logout(user_id=user_id)
except GCUnauthorisedUserError:
# if we don't have records about the user in our database
bot_answer = bot_answers.get_unauthorised_user_error_message()
else:
bot_answer = bot_answers.get_logout_user_message(status)
send_message(bot, user_id, bot_answer)
def help_callback(bot, update):
"""Reaction to /help command"""
user_id = get_user_id(bot, update)
bot_answer = bot_answers.get_help_message()
send_message(bot, user_id, bot_answer)
def unbind_calendar_callback(bot, update):
"""Reaction to /unbind command - set calendar from customized to primary"""
user_id = get_user_id(bot, update)
try:
status = unbind_calendar(user_id=user_id)
except GCUnauthorisedUserError:
# if we don't have records about the user in our database
bot_answer = bot_answers.get_unauthorised_user_error_message()
else:
bot_answer = bot_answers.get_del_status_message(status)
send_message(bot, user_id, bot_answer)
def add_settings(user_id, **kwargs):
"""Set settings for user in the settings variable with given kwargs"""
settings[user_id] = {key: val for key, val in kwargs.items()}
def del_settings(user_id):
"""Delete user from the settings variable"""
try:
del settings[user_id]
except KeyError:
pass
return
def get_settings(user_id, key=False):
"""Return value for the particular setting in the settings variable"""
user_set = settings.get(user_id)
if user_set and key:
value = user_set.get(key)
return value
return user_set
def send_message(bot, chat_id, message):
"""Send message with telegram bot"""
message = bot.send_message(chat_id=chat_id, text=message)
return message
def get_user_id(bot, update):
"""Get user id from telegram bot update"""
return update.message.from_user.id
def get_message(bot, update):
"""Get user message from telegram bot update"""
return update.message.text.strip() | {"/tg_bot/add_event_bot.py": ["/tg_bot/GC_TelegramBot.py"], "/core/calendar_core.py": ["/core/logger.py"], "/core/main_api.py": ["/core/calendar_core.py", "/core/exceptions.py", "/core/logger.py"], "/tg_bot/GC_TelegramBot.py": ["/tg_bot/bot_answers.py", "/core/main_api.py", "/core/exceptions.py"]} |
52,413 | JianGuanTHU/UOJ_Offline | refs/heads/master | /data/22/1/checker.py | import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
input, output, answer = sys.argv[1:]
f_in = open(input)
f_out = open(output)
f_answer = open(answer)
f_in_lines = f_in.readlines()
ref_ans, n, m = int(f_in_lines[0].strip()), int(f_in_lines[1].strip()), int(f_in_lines[2].strip())
k = int(f_in_lines[m+3].strip())
f_answer_lines = f_answer.readlines()
f_out_lines = f_out.readlines()
score = 0
for i in range(k):
if f_out_lines[i].strip() != f_answer_lines[i].strip():
eprint("Wrong answer at the line %d." % (i+1))
sys.exit(0)
cnt = list(map(int, f_out_lines[k].strip().split()))
if cnt[0] + cnt[1] + cnt[2] != cnt[5]:
eprint("Memory leak detected. Construction times: %d. Destruction times: %d" %
(cnt[0] + cnt[1] + cnt[2], cnt[5]))
sys.exit(0)
tmp = (cnt[0] + cnt[1] + cnt[3]) * 10 + cnt[2] + cnt[4]
if tmp <= ref_ans:
assert f_out_lines[k+1].strip() == "YES"
eprint("ok")
else:
assert f_out_lines[k+1].strip() == "NO"
eprint("points 0.7\nYour count times: %s > %d" % (f_out_lines[k].strip(), ref_ans))
| {"/data/22/1/uoj_judger_reporter.py": ["/judger_class_lib.py"], "/data/19/1/uoj_judger_compiler.py": ["/judger_class_lib.py"], "/data/24/1/uoj_judger_tester.py": ["/judger_class_lib.py"]} |
52,414 | JianGuanTHU/UOJ_Offline | refs/heads/master | /data/22/1/uoj_judger_reporter.py | #coding=utf-8
from uoj_judger_config import *
import judger_class_lib as lib
import fcntl
class pyjudgerReporter:
def __init__(self, config, test_num):
self.config = config
self.tot_time = 0
self.max_memory = 0
self.details_out = ""
self.tot_score = 0
self.test_num = test_num
def report_judgement_status(self, info):
print(self.config.result_path+"/cur_status.txt")
F = open(self.config.result_path+"/cur_status.txt", "w")
fcntl.flock(F.fileno(), fcntl.LOCK_EX)
F.write(info[:512])
def add_point_info(self, info):
if info.ust >= 0:
self.tot_time += info.ust
if info.usm >= self.max_memory:
self.max_memory = info.usm
self.details_out += "<test num=\"%u\" score=\"%u\" info=\"%s\" time=\"%u\" memory=\"%u\">"\
% (info.num, info.scr / self.test_num, info.info, info.ust, info.usm)
self.tot_score += info.scr / self.test_num
if info.input:
self.details_out += "<in>%s</in>" % (lib.htmlspecialchars(info.input))
if info.output:
self.details_out += "<out>%s</out>" % (lib.htmlspecialchars(info.output))
if info.res:
self.details_out += "<res>%s</res>" % (lib.htmlspecialchars(info.res))
if info.extrainfo:
self.details_out += info.extrainfo
self.details_out += "</test>\n"
def add_subtask_info(self, subTaskIndex, scr=0, info="", points=None):
self.details_out += "<subtask "
self.details_out += " num=\"%u\" "%subTaskIndex
self.details_out += " score=\"%u\" "%scr
self.details_out += " info=\"%s\" "%lib.htmlspecialchars(info)
self.details_out += " >\n"
if points:
for each in points:
self.add_point_info(each)
self.details_out += " </subtask>\n"
def end_judge_ok(self):
F = open(self.config.result_path+"/result.txt", "w")
F.write("score %d\n" % self.tot_score)
F.write("time %d\n" % self.tot_time)
F.write("memory %d\n" % self.max_memory)
F.write("details\n")
F.write("<tests>\n")
F.write(self.details_out)
F.write("</tests>\n")
F.close()
exit(0)
def end_judge_judgement_failed(self, info=""):
F = open(self.config.result_path+"/result.txt", "w")
F.write("error Judgment Failed\n")
F.write("details\n")
F.write("<error>%s</error>\n" % lib.htmlspecialchars(info))
F.close()
exit(0)
def end_judge_compile_error(self, info=""):
F = open(self.config.result_path+"/result.txt", "w")
F.write("error Compile Error\n")
F.write("details\n")
F.write("<error>%s</error>\n" % lib.htmlspecialchars(info))
F.close()
exit(0)
def end_judge_custom_error(self, label, info=""):
F = open(self.config.result_path+"/result.txt", "w")
F.write("error %s\n" % label)
F.write("details\n")
F.write("<error>%s</error>\n" % lib.htmlspecialchars(info))
F.close()
exit(0)
| {"/data/22/1/uoj_judger_reporter.py": ["/judger_class_lib.py"], "/data/19/1/uoj_judger_compiler.py": ["/judger_class_lib.py"], "/data/24/1/uoj_judger_tester.py": ["/judger_class_lib.py"]} |
52,415 | JianGuanTHU/UOJ_Offline | refs/heads/master | /data/37/1/uoj_judger_config.py | #coding=utf-8
import os
class pyjudgerConfig:
def __init__(self, main_path, work_path, result_path, data_path):
self.config = dict()
def load_config(config_file):
configs = open(config_file, 'r').readlines()
for each_config in configs:
key, value = each_config.strip('\n').strip('\r').split(' ')
self.config[key] = value
self.main_path = main_path
self.work_path = work_path
self.result_path = result_path
load_config(self.work_path + "/submission.conf")
self.problem_id = self.config['problem_id']
self.data_path = data_path
load_config(self.data_path + "/problem.conf")
self.exec_file_name = "./main"
#exec("cp %s/require/* %s 2>/dev/null"%(self.__data_path, self.__work_path))
#这里可以修改
if "use_builtin_checker" in self.config:
self.config["checker"] = self.main_path + "/builtin/checker/" + self.config["use_builtin_checker"]
else:
self.config["checker"] = self.data_path + "/chk"
self.config["validator"] = self.data_path + "/val"
#os.chdir(self.work_path)
| {"/data/22/1/uoj_judger_reporter.py": ["/judger_class_lib.py"], "/data/19/1/uoj_judger_compiler.py": ["/judger_class_lib.py"], "/data/24/1/uoj_judger_tester.py": ["/judger_class_lib.py"]} |
52,416 | JianGuanTHU/UOJ_Offline | refs/heads/master | /data/19/1/uoj_judger_compiler.py | #coding=utf-8
import judger_class_lib as lib
class pyjudgerCompiler(object):
def __init__(self, config):
self.config = config
def run_compiler(self, path, arg):
argv = ["--type=compiler", "--work-path=" + path]
argv.extend(arg)
ret = lib.run_program( \
main_path=self.config.main_path, \
work_path=self.config.work_path, \
result_file_name=self.config.result_path + "/run_compiler_result.txt", \
input_file_name="/dev/null", \
output_file_name="stderr", \
error_file_name=self.config.result_path + "/compiler_result.txt", \
limit=lib.RL_COMPILER_DEFAULT, \
para_list=argv, \
uoj_offline=("uoj_offline" in self.config.config))
res = lib.RunCompilerResult(type=ret.type, ust=ret.ust, usm=ret.usm, \
succeeded=(ret.type == lib.RS_AC) and (ret.exit_code == 0))
if not res.succeeded:
if res.type == lib.RS_AC:
res.info = lib.file_preview(self.config.result_path + "/compiler_result.txt", 500)
elif res.type == lib.RS_JGF:
res.info = "No Comment"
else:
res.info = "Compiler " + lib.info_str(res.type)
return res
def compile_cpp(self, name, path=None):
path = path or self.config.work_path
argv = ["/usr/bin/g++", "-o", name, "-x", "c++", "answer.code", "-std=c++11", "-lm", "-O2", "-DONLINE_JUDGE"]
return self.run_compiler(path, argv)
def compile_command(self, para, path=None):
path = path or self.config.work_path
argv = para.split(' ')
return self.run_compiler(path, argv)
def compile(self, para):
name = para
if name + '_language' in self.config.config:
lang = self.config.config[name + '_language']
print("has a language :", lang)
#TODO check language type
return self.compile_cpp(name)
else:
return self.compile_cpp(name)
# if __name__ == "__main__":
# def test():
# C = pyjudgerConfig()
# os.chdir(C.work_path)
# my_compiler = pyjudgerCompiler(C)
# #以下是两种编译方法
# #1. 默认编译
# my_compiler.compile("main")
# #2. 使用 g++ 或者 g++-4.8 或者直接用 /usr/bin/g++-4.8
# #my_compiler.custom_compile("g++ -o main -x c++ main.code -lm -O2 -DONLINE_JUDGE")
# test()
| {"/data/22/1/uoj_judger_reporter.py": ["/judger_class_lib.py"], "/data/19/1/uoj_judger_compiler.py": ["/judger_class_lib.py"], "/data/24/1/uoj_judger_tester.py": ["/judger_class_lib.py"]} |
52,417 | JianGuanTHU/UOJ_Offline | refs/heads/master | /judger_class_lib.py | import os
RS_AC = 0
RS_JGF = 7
RS_MLE = 3
RS_TLE = 4
RS_OLE = 5
RS_RE = 2
RS_DGS = 6
class PointInfo:
def __init__(self, num, scr, ust=0, usm=0 \
, info="", input="", out="", res="", extrainfo=""):
self.num = num
self.scr = scr
self.ust = ust
self.usm = usm
self.info = info
self.input = input
self.output = out
self.res = res
self.extrainfo = extrainfo
class RunLimit:
def __init__(self, time=0, memory=0, output=0):
self.time = time
self.memory = memory
self.output = output
RL_DEFAULT = RunLimit(1, 256, 64)
RL_JUDGER_DEFAULT = RunLimit(600, 1024, 128)
RL_CHECKER_DEFAULT = RunLimit(5, 256, 64)
RL_VALIDATOR_DEFAULT = RunLimit(5, 256, 64)
RL_MARKER_DEFAULT = RunLimit(5, 256, 64)
RL_COMPILER_DEFAULT = RunLimit(15, 512, 64)
class CustomTestInfo:
def __init__(self, ust=0, usm=0, info="", exp="", out=""):
self.ust = ust
self.usm = usm
self.info = info
self.exp = exp
self.out = out
class RunResult:
#TODO exit_code default is -1?
def __init__(self, type=0, ust=0, usm=0, exit_code=-1):
self.type = type
self.ust = ust
self.usm = usm
self.exit_code = exit_code
@property
def info(self):
return info_str(self.type)
def RunResult_failed_result():
return RunResult(type=RS_JGF, ust=-1, usm=-1)
class RunCheckerResult:
def __init__(self, type=0, ust=0, usm=0, scr=0, info=""):
self.type = type
self.ust = ust
self.usm = usm
self.scr = scr
self.info = info
def RunCheckerResult_failed_result():
return RunCheckerResult(type=RS_JGF, ust=-1, usm=-1, \
scr=0, info="Checker Judgment Failed")
class RunValidatorResult:
def __init__(self, type=0, ust=0, usm=0, succeeded=False, info=""):
self.type = type
self.ust = ust
self.usm = usm
self.succeeded = succeeded
self.info = info
def RunValidatorResult_failed_result():
return RunValidatorResult(type=RS_JGF, ust=-1, usm=-1, \
succeeded=False, info="Validator Judgment Failed")
class RunCompilerResult:
def __init__(self, type=0, ust=0, usm=0, succeeded=False, info=""):
self.type = type
self.ust = ust
self.usm = usm
self.succeeded = succeeded
self.info = info
def RunCompilerResult_failed_result():
return RunCompilerResult(type=RS_JGF, ust=-1, usm=-1, \
succeeded=False, info="Compile Failed")
def numbers_to_strings(argument):
switcher = { \
0: "zero", \
1: "one", \
2: "two", \
}
return switcher.get(argument, "nothing")
def info_str(id):
switcher = { \
RS_MLE: "Memory Limit Exceeded", \
RS_TLE: "Time Limit Exceeded", \
RS_OLE: "Output Limit Exceeded", \
RS_RE: "Runtime Error", \
RS_DGS: "Dangerous Syscalls", \
RS_JGF: "Judgement Failed" \
}
return switcher.get(id, "nothing")
def escapeshellarg(arg):
return "'" + arg.replace("\\", "\\\\") + "'"
def run_program(main_path, result_file_name, input_file_name, output_file_name, error_file_name, \
limit, para_list=None, type=None, work_path=None, readable=None, raw_para=None, uoj_offline=False):
para_list = para_list or []
readable = readable or []
#limit : RunLimit
if uoj_offline:
command = "cd %s" % (work_path)
command += " && %s" % (" ".join([para for para in para_list if ("type" not in para and "work-path" not in para)])) if len(para_list) != 0 else ""
command += (" && " + raw_para) if raw_para else ""
command += " <" + escapeshellarg(input_file_name)
command += " >" + escapeshellarg(output_file_name)
command += " 2>" + escapeshellarg(error_file_name)
command += " && cd %s" % main_path
print("command is : ", command)
try:
exit_code_ = os.system(command)
value, exit_code = (exit_code_)>>8, exit_code_&0xff
#print io.open(result_file_name,'r').readline()
data = "\n".join(open(error_file_name,'r').readlines()).strip()
print("value:", value, "exit_code:", exit_code, "data", data)
if value != 0:
result = RunResult(type=RS_RE, exit_code=exit_code)
else:
result = RunResult(exit_code=exit_code)
return result
except:
import traceback; traceback.print_exc();
return RunResult_failed_result()
else:
command = " ".join([main_path + "/run/run_program", \
">" + escapeshellarg(result_file_name), \
"--in=" + escapeshellarg(input_file_name), \
"--out=" + escapeshellarg(output_file_name), \
"--err=" + escapeshellarg(error_file_name), \
"--tl=" + str(limit.time), \
"--ml=" + str(limit.memory), \
"--ol=" + str(limit.output), \
])
command += " --type=" + str(type) if (type) else ""
command += " --work-path=" + work_path if (work_path) else ""
command += " " + " ".join([" --add-readable=" + each for each in readable])
command += " " + " ".join([para for para in para_list])
command += (" " + raw_para) if raw_para else ""
print("command is : ", command)
try:
os.system(command)
#print open(result_file_name,'r').readline()
# data_raw = '\n'.join(open(result_file_name,'r').readline().split(' '))
data = open(result_file_name,'r').readline().strip().split(' ')
print("data", data)
result = RunResult(int(data[0]), int(data[1]), int(data[2]), int(data[3]))
return result
except:
return RunResult_failed_result()
def file_preview(input_file_name, range=100):
try:
str = "".join(open(input_file_name, 'r').readlines())
if len(str) > range * 4:
return str[:range * 4] + "..."
else:
return str
except:
return "no such file:"+input_file_name
def file_hide_token(file_name, token):
# examine token
try:
f = open(file_name, "r")
data = f.read()
f.close()
if data[:len(token)] != token:
raise Exception
f = open(file_name, "w")
f.write(data[len(token):])
f.close()
except:
f = open(file_name, "w")
f.write("Unauthorized output\n")
f.close()
def conf_run_limit(pre, index, val, config):
def init_limit(key, index, default):
if key + "_" + index in config.config:
return config.config[key + "_" + index]
if key in config.config:
return config.config[key]
return default
if pre != "":
pre += "_"
return RunLimit(time=init_limit(pre+"time_limit", str(index), val.time), \
memory=init_limit(pre+"memory_limit", str(index), val.memory), \
output=init_limit(pre+"output_limit", str(index), val.output) \
)
def htmlspecialchars(string=""):
string = string.replace('&', "&")
string = string.replace('<', "<")
string = string.replace('>', ">")
string = string.replace('"', """)
string = string.replace('\0', "<b>\\0</b>")
return string
def check_file_exist(work_path, result_path, assertfile=[], banfile=[]):
os.system("cd %s; ls > %s" % (escapeshellarg(work_path), escapeshellarg(result_path + "/filelist.txt")))
assertfile = set(assertfile)
banfile = set(banfile)
for tmp in open(result_path + "/filelist.txt"):
tmp = tmp.strip()
if tmp in banfile:
return False, "found unexpcted file '" + tmp + "' in your dir"
if tmp in assertfile:
assertfile.remove(tmp)
if not len(assertfile):
return True, "ok"
else:
return False, "didn't find expected file '" + str(list(assertfile)[0]) + "' in your dir"
| {"/data/22/1/uoj_judger_reporter.py": ["/judger_class_lib.py"], "/data/19/1/uoj_judger_compiler.py": ["/judger_class_lib.py"], "/data/24/1/uoj_judger_tester.py": ["/judger_class_lib.py"]} |
52,418 | JianGuanTHU/UOJ_Offline | refs/heads/master | /data/24/1/uoj_judger_tester.py | #coding=utf-8
import os
from uoj_judger_config import *
import judger_class_lib as lib
import traceback
class pyjudger_validator():
def run(self, config, tester):
input_file_name = tester.input_file_name
ret = lib.run_program(
main_path=config.main_path, \
result_file_name=config.result_path + "/run_validator_result.txt", \
input_file_name=input_file_name, \
output_file_name="/dev/null", \
error_file_name=config.result_path + "/validator_error.txt", \
work_path=config.work_path, \
limit=lib.conf_run_limit('validator', tester.num, lib.RL_VALIDATOR_DEFAULT, config), \
para_list=[config.config['validator']], \
uoj_offline=("uoj_offline" in config.config))
return lib.RunValidatorResult(type=ret.type, usm=ret.usm, ust=ret.ust, \
succeeded=(ret.type==lib.RS_AC and ret.exit_code==0), \
info=lib.file_preview(config.result_path + "/validator_error.txt"))
class pyjudger_run_submission_program():
def run(self, config, tester):
ret = lib.run_program( \
main_path=config.main_path, \
result_file_name=config.result_path + "/run_submission_program.txt", \
input_file_name=tester.input_file_name, \
output_file_name=tester.output_file_name, \
error_file_name="/dev/null", \
work_path=config.work_path, \
limit=lib.conf_run_limit("", index=tester.num, val=lib.RL_DEFAULT, config=config), \
type="default", \
para_list=[config.exec_file_name], \
uoj_offline=("uoj_offline" in config.config))
if ret.type == lib.RS_AC and ret.exit_code != 0:
ret.type = lib.RS_RE
print("run submission program:", ret.type)
return ret
class pyjudger_run_checker():
def run(self, config, tester):
ret = lib.run_program( \
main_path=config.main_path, \
result_file_name=config.result_path + "/run_checker_result.txt", \
input_file_name="/dev/null", \
output_file_name="/dev/null", \
error_file_name=config.result_path + "/checker_error.txt", \
work_path=config.work_path, \
limit=lib.conf_run_limit("checker", index=tester.num, val=lib.RL_CHECKER_DEFAULT, config=config), \
readable=[tester.input_file_name, \
tester.output_file_name, \
tester.answer_file_name], \
para_list=[config.config['checker'], \
os.path.abspath(tester.input_file_name), \
os.path.abspath(tester.output_file_name), \
os.path.abspath(tester.answer_file_name), \
], \
uoj_offline=("uoj_offline" in config.config))
if ret.type!=lib.RS_AC or ret.exit_code!=0:
return lib.RunCheckerResult(type=ret.type, usm=ret.usm, ust=ret.ust, scr=0, \
info=lib.file_preview(config.result_path + "/checker_error.txt"))
else:
R = lib.RunCheckerResult(type=ret.type, usm=ret.usm, ust=ret.ust, scr=0, \
info=lib.file_preview(config.result_path + "/checker_error.txt"))
try:
F = lib.file_preview(config.result_path + "/checker_error.txt")
R.info = F
E = F.split(' ')
if E[0] == "ok":
R.scr = 100
elif E[0] == 'points':
if float(E[1]) != 1:
return lib.RunCheckerResult(type="RS_JGF", ust=-1, usm=-1, scr=0, info="Checker Judgment Failed")
else:
R.scr = (int)(100 * float(E[1]) + 0.5)
else:
R.scr = 0
return R
except:
return lib.RunCheckerResult(type="RS_JGF", ust=-1, usm=-1, scr=0, info="Checker Judgment Failed")
class pyjudger_custom_tester():
def __init__(self, config, validater=None, executer=None, checker=None):
self.config = config
self.validater = validater or pyjudger_validator()
self.executer = executer or pyjudger_run_submission_program()
self.checker = checker or pyjudger_run_checker()
def generate_config(self, index):
def init_file_name(pre, default_pre, index, suf):
name = ""
if index < 0:
name += "ex_"
name += self.config.config[pre] if (pre in self.config.config) else default_pre
name += str(abs(index))+"."
name += self.config.config[suf] if (suf in self.config.config) else "txt"
return name
self.input_file_name = \
self.config.data_path + "/" + init_file_name("input_pre", "input", index, 'input_suf')
self.output_file_name = \
self.config.work_path + "/" + init_file_name("output_pre", "output", index, 'output_suf')
self.answer_file_name = \
self.config.data_path + "/" + init_file_name("output_pre", "output", index, 'output_suf')
self.num = index
def test(self, point_index):
#init
self.generate_config(point_index)
#phase1
if self.config.config.get('validate_input_before_test') == 'on':
ok, ret1 = self.run_validator_and_get_result()
if not ok:
return lib.PointInfo(point_index, 0, info="validater error", \
input=lib.file_preview(self.input_file_name))
if ret1.type != lib.RS_AC:
return lib.PointInfo(point_index, 0, ust=ret1.ust, usm=ret1.usm, \
info=ret1.info, input=lib.file_preview(self.input_file_name))
# phase2
if self.config.config.get('submit_answer') == 'on':
ret2 = lib.RunResult(type=lib.RS_AC, ust=-1, usm=-1, exit_code=0)
else:
ok, ret2 = self.run_submission_program_and_get_result()
if not ok:
return lib.PointInfo(point_index, scr=0, ust=0, usm=0, info="Running Error", \
input=lib.file_preview(self.input_file_name))
if 'token' in self.config.config:
lib.file_hide_token(self.output_file_name, self.config.config['token'])
if ret2.type != lib.RS_AC:
print("test:", ret2.info)
return lib.PointInfo(point_index, 0, ust=ret2.ust, usm=ret2.usm, info=ret2.info, \
input=lib.file_preview(self.input_file_name))
# phase3
ok, ret3 = self.run_checker_and_get_result()
if not ok:
return lib.PointInfo(point_index, 0, info="checker error", \
input=lib.file_preview(self.input_file_name))
if ret3.type != lib.RS_AC:
return lib.PointInfo(point_index, 0, ust=ret2.ust, usm=ret2.usm, \
info="Checker " + lib.info_str(ret3.type), \
input=lib.file_preview(self.input_file_name), \
out=lib.file_preview(self.output_file_name))
return lib.PointInfo(point_index, \
ret3.scr, \
usm=ret2.usm, \
ust=ret2.ust, \
info="Wrong Answer" if ret3.scr == 0 else ("Accepted" if ret3.scr==100 else "Acceptable Answer"), \
input=lib.file_preview(self.input_file_name), \
out=lib.file_preview(self.output_file_name), \
res=ret3.info)
def run_validator_and_get_result(self):
try:
ret = self.validater.run(self.config, self)
return True, ret
except:
traceback.print_exc()
return False, None
def run_submission_program_and_get_result(self):
try:
ret = self.executer.run(self.config, self)
return True, ret
except:
traceback.print_exc()
return False, None
def run_checker_and_get_result(self):
try:
ret = self.checker.run(self.config, self)
return True, ret
except:
traceback.print_exc()
return False, None
| {"/data/22/1/uoj_judger_reporter.py": ["/judger_class_lib.py"], "/data/19/1/uoj_judger_compiler.py": ["/judger_class_lib.py"], "/data/24/1/uoj_judger_tester.py": ["/judger_class_lib.py"]} |
52,419 | JianGuanTHU/UOJ_Offline | refs/heads/master | /data/24/1/datagen.py |
import numpy as np
import random
import string
ds = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Subday"]
rm = []
def check(m1, d1):
for r in rm:
if r[0] == m1 and r[1] == d1:
return False
return True
for d in range(1, 11):
print(d)
rm = []
f = open("data"+str(d)+".in", "w")
f.write("100\n")
for i in range(1, 101):
m1, d1 = np.random.randint(1, 13), np.random.randint(1, 29)
while not check(m1, d1):
m1, d1 = np.random.randint(1, 13), np.random.randint(1, 29)
rm.append((m1, d1))
f.write(str(m1) + " " + str(d1) + " " + random.choice(ds) + "\n")
f.write("100\n")
for i in range(1, 101):
r = random.choice(rm)
s = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(15))
f.write(str(r[0]) + " " + str(r[1]) + " " + s + "\n")
| {"/data/22/1/uoj_judger_reporter.py": ["/judger_class_lib.py"], "/data/19/1/uoj_judger_compiler.py": ["/judger_class_lib.py"], "/data/24/1/uoj_judger_tester.py": ["/judger_class_lib.py"]} |
52,420 | JianGuanTHU/UOJ_Offline | refs/heads/master | /find_problem_id.py | #!/usr/bin/env python
with open("./submission.conf", "r") as fin:
run_config = {}
for line in fin:
key, value = line.strip('\n').strip('\r').split(' ')
run_config[key] = value
tmp = line.strip().split()
if line.strip().split()[0] == "problem_id":
print tmp[1]
run_config["uoj_offline"] = "on"
with open("./work/submission.conf", "w") as fout:
for key in run_config:
fout.write("%s %s\n"%(key, run_config[key])) | {"/data/22/1/uoj_judger_reporter.py": ["/judger_class_lib.py"], "/data/19/1/uoj_judger_compiler.py": ["/judger_class_lib.py"], "/data/24/1/uoj_judger_tester.py": ["/judger_class_lib.py"]} |
52,421 | JianGuanTHU/UOJ_Offline | refs/heads/master | /find_python_version.py | #!/usr/bin/env python
import sys
with open("./data/%s/1/problem.conf"%(sys.argv[1]), "r") as fin:
run_config = {}
configs = fin.readlines()
for each_config in configs:
key, value = each_config.strip('\n').strip('\r').split(' ')
run_config[key] = value
if ("use_python3_judger" in run_config) and (run_config["use_python3_judger"] == "on"):
print "python3"
else:
print "python"
| {"/data/22/1/uoj_judger_reporter.py": ["/judger_class_lib.py"], "/data/19/1/uoj_judger_compiler.py": ["/judger_class_lib.py"], "/data/24/1/uoj_judger_tester.py": ["/judger_class_lib.py"]} |
52,429 | wstk/pbar | refs/heads/master | /setup.py | from setuptools import setup
with open("VERSION.txt") as ver:
VERSION = ver.read().strip()
def readme():
with open("README.txt") as rm:
return rm.read()
setup(name="pbar",
version=VERSION,
description="Simple console progress bar widget",
long_description=readme(),
url=None,
author="Will Stokes",
author_email="william.stokes@zf.com",
license="MIT",
packages=["pbar"],
install_requires=[],
entry_points={
"console_scripts": []
},
zip_safe=True,
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"Operation System :: Windows"]) | {"/test_pbar.py": ["/pbar/__init__.py"]} |
52,430 | wstk/pbar | refs/heads/master | /test_pbar.py | """
Inspection tests for pbar
--------------------------
Unit tests for this does not really make sense
"""
from pbar import Bar
from time import sleep
import logging
import unittest
import nose.tools as nosetools
class TestPBar(unittest.TestCase):
def setUp(self):
self.duration = 5
self.steps = 100
@nosetools.nottest
def delay(self):
sleep(self.duration/self.steps)
@nosetools.nottest
def do(self, bar):
for x in range(self.steps):
bar.step()
self.delay()
def test_bar(self):
print("Basic Bar")
self.do(Bar(self.steps))
def test_message(self):
self.do(Bar(self.steps, message="A bar with a message"))
def test_symbols(self):
self.do(Bar(self.steps, message="A bar with different symbols", marker="*", bar_left="<", bar_right="}"))
def test_width(self):
self.do(Bar(self.steps, message="Width is 100", width=100))
self.do(Bar(self.steps, message="Width is 20", width=20))
def test_suffix(self):
self.do(Bar(self.steps, message="Test suffix", suffix="{o.progress}% {o.idx} {o.time}"))
def test_slow(self):
self.duration=65
self.do(Bar(self.steps, message="Slow (> 1 min!)", suffix="{o.time}s"))
def test_log_capture(self):
log = logging.getLogger(__name__)
self.steps = 5
bar = Bar(self.steps, message="Log capturing...")
for x in range(self.steps):
bar.step()
log.info("A message!")
self.delay()
| {"/test_pbar.py": ["/pbar/__init__.py"]} |
52,431 | wstk/pbar | refs/heads/master | /pbar/__init__.py | """
Installation
===============
Unzip ``pbar-<version>.zip`` to a location of your choice, ``cd`` to the directory and run::
python setup.py install
.. note::
Ensure you install pbar to the correct Python interpreter. ``pbar`` is designed to work with Python3.
Verify the installation with::
pip show pbar
Usage
=============
Basic
--------------
Importing the ``Bar`` class is the first step::
from pbar import Bar
Initialise a ``Bar`` object with the total number of iterations required, and then call ``step()`` for each iteration::
bar = Bar(100)
for x in range(100):
bar.step()
# Do work in the loop here...
There is no need to manually closedown the progress bar as this is handled automatically when the bar completes.
However, if you wish to exit early from the loop, you can use the ``end()`` method::
for x in range(100):
bar.step()
# Do some work in the loop here...
if x == 75:
bar.end()
break
Calling ``end()`` ensures the buffers are flushed and the console line formatting returns to normal.
Logging
---------------
When using the progress bar, all log messages are captured. Once the bar completes (or ``end()`` is called manually)
the log messages are flushed and printed to the console.
The following example illustrates this concept::
def work():
log.info("Do some work!")
bar = Bar(3)
for idx in range(3):
bar.step()
work()
log.info("Done!")
The above fragment of code would capture the logging from ``work()`` and then display it once the bar had completed::
>>> [||||||||||||||||] 100%
>>>
>>> root:INFO: Do some work!
>>> root:INFO: Do some work!
>>> root:INFO: Do some work!
>>> root:INFO: Done!
.. note::
Whatever log formatting has been specified should be preserved.
Logging to file will continue as normal.
Customisation
---------------
The look of the bar and the information displayed can be customised by the user using key word arguments.
The default bar looks like this::
[||||||||| ] 42%
Use the following keyword arguments to customise the look of the bar::
message A message to the left of the progress bar (default = "")
marker The symbol used to the fill the bar ( | )
bar_left The left hand delimiter ([)
bar_right The right hand delimiter (])
width The width of the bar in characters (50)
The information displayed in the suffix (to the right of the bar) can also be customised using the ``suffix`` keyword
and a formatted string of the form ``{o.variable}``. ``variable`` can be any of the following::
idx The current iteration
tot The total number of iterations
progress The progress, as a percentage
time The current elapsed time as mins:secs
These can be combined to display information as you require::
b = Bar(100, message="My progress bar", marker="*", suffix="{o.progress}% [{o.time}s]")
would produce a bar that looked like this...::
My progress bar [****** ] 35% [00:12s]
"""
from sys import stdout
from math import ceil, floor
import logging
from time import time
from io import StringIO as strbuff
log = logging.getLogger(__name__)
class Bar(object):
def __init__(self, tot, **kwargs):
try:
self.tot = int(tot)
self.idx = 0
self.visible = False
self.pbar = []
self.progress = 0
self.tstart = None
self.time = 0
self.done = False
# User customisation
self.message = kwargs.get("message", "")
self.marker = kwargs.get("marker", "|")[0]
self.bar_left = kwargs.get("bar_left", "[")[0]
self.bar_right = kwargs.get("bar_right", "]")[0]
self.width = min(kwargs.get("width", 50), 100)
self.inc = 100/self.width
self.suffix = kwargs.get("suffix", "{o.progress}%")
# Create a string buffer to hold the redirected logging output
self.buff = strbuff()
self.hbuff = logging.StreamHandler(self.buff)
root = logging.getLogger()
self.hbuff.setFormatter(logging.Formatter(root.handlers[0].formatter._fmt))
self.hbuff.setLevel(root.level)
self.streams = []
except (ValueError, TypeError) as err:
log.error(err)
def step(self):
"""Increment the progress bar"""
if self.idx == 0:
self._start()
self.idx += 1
self.progress = int(ceil(float(self.idx)/float(self.tot)*100))
self._update()
if self.idx == self.tot:
self.end()
def end(self):
"""Destroy the progress bar once completed"""
if not self.done:
self.done = True
stdout.flush()
print("\n")
stdout.flush()
try:
root = logging.getLogger()
root.removeHandler(self.hbuff)
for stream in self.streams:
root.addHandler(stream)
except AttributeError:
pass
for logs in self.buff.getvalue().split("\n"):
if logs.strip():
print(logs)
def _start(self):
"""Initialise the bar and redirect the logging output"""
# Redirect the logging
root = logging.getLogger()
for stream in root.handlers:
if type(stream) is logging.StreamHandler:
self.streams.append(stream)
root.removeHandler(stream)
root.addHandler(self.hbuff)
self.visible = True
self.tstart = time()
def _update(self):
"""update the displayed progress bar"""
elapsed = time() - self.tstart
mins = floor(elapsed/60)
secs = int(elapsed - (mins*60))
self.time = "{:02d}:{:02d}".format(mins, secs)
while len(self.pbar) * self.inc < self.progress:
self.pbar.append(self.marker)
bar = "{}{}{}{}".format(self.bar_left, "".join(self.pbar), " " * (self.width - len(self.pbar)), self.bar_right)
disp = " ".join([self.message, bar, self.suffix.format(o=self)])
stdout.flush()
print("\r" + disp, end="")
stdout.flush()
def __del__(self):
self.end()
| {"/test_pbar.py": ["/pbar/__init__.py"]} |
52,442 | qiuyingyue/ONV2SEQ | refs/heads/master | /onv_classification.py | # TensorFlow and tf.keras
import sys
import os
sys.path.append("/usr/local/lib/python2.7/dist-packages")
sys.path.append("./mnist")
import tensorflow as tf
import mnist
# Helper libraries
import numpy as np
import nn
from sketch_rnn_train_onv import load_dataset
import model_dnn_encoder as sketch_rnn_model
from PIL import Image
from onv_process import onv_convert_fromarr, show_onv
def network(x):
print ("x", x)
fc1 = tf.layers.dense(x, 1000, name="fc1")
print ("fc1, ", fc1)
fc2 = tf.layers.dense(fc1, 500, name="fc2")#nn.fc_layer(fc1, "fc2", output_dim=500, apply_dropout=True)
print ("fc2, ", fc2)
fc3 = tf.layers.dense(fc2, 250, name="fc3")#nn.fc_layer(fc2, "fc3", output_dim=250, apply_dropout=True)
print ("fc3, ", fc3)
fc4 = tf.layers.dense(fc3, 100, name="fc4")# nn.fc_layer(fc2, "fc4", output_dim=100, apply_dropout=True)
print ("fc4, ", fc4)
fc5 = tf.layers.dense(fc4, 50, name="fc5")#nn.fc_layer(fc4, "fc5", output_dim=50, apply_dropout=True)
print ("fc5, ", fc5)
fc6 = tf.layers.dense(fc5, 25, name="fc6")#nn.fc_layer(fc4, "fc6", output_dim=25, apply_dropout=True)
print ("fc6, ", fc6)
fc7 = tf.layers.dense(fc6, 4, name="fc7")#nn.fc_layer(fc6, "fc7", output_dim=10, apply_dropout=True)
print ("fc7, ", fc7)
return fc7
def network_dropout(x):
fc1 = tf.layers.dense(x, 200, name="vector_rnn/fc1")#1000 200 256
fc1 = tf.layers.dropout(fc1, rate=0.5, name="fc1_drop")
#print ("fc1, ", fc1)
fc2 = tf.layers.dense(fc1, 100, name="vector_rnn/fc2")#500 100 128
fc2 = tf.layers.dropout(fc2, rate=0.5, name="fc2_drop")
#print ("fc2, ", fc2)
fc3 = tf.layers.dense(fc2, 50, name="vector_rnn/fc3")#250 50 64
fc3 = tf.layers.dropout(fc3, rate=0.5, name="fc3_drop")
#print ("fc3, ", fc3)
fc4 = tf.layers.dense(fc3, 25, name="vector_rnn/fc4")#100 25 32
fc4 = tf.layers.dropout(fc4, rate=0.5, name="fc4_drop")
fc_last = tf.layers.dense(fc4, 5, name="fc_last")
print ("fc_last, ", fc_last)
return fc_last
batch_size = 100
onv_size = 9936
def load_data():
# Load dataset
data_dir = "/home/qyy/workspace/data"
model_params = sketch_rnn_model.get_default_hparams()
datasets = load_dataset(data_dir, model_params, contain_labels=True)
train_onvs = datasets[6]
valid_onvs = datasets[7]
test_onvs = datasets[8]
train_labels = datasets[9]
valid_labels = datasets[10]
test_labels = datasets[11]
train_onvs = train_onvs / 255.0
valid_onvs = valid_onvs / 255.0
test_onvs = test_onvs / 255.0
return train_onvs, train_labels, valid_onvs, valid_labels, test_onvs, test_labels
def convert_label(idx):
arr = np.zeros((10))
arr[idx] = 1
return arr
def load_mnist_data():
train_images = mnist.train_images()
train_labels = mnist.train_labels()
train_labels = [convert_label(idx) for idx in train_labels]
train_labels = np.array(train_labels)
train_onvs = [onv_convert_fromarr(I, resize=True) for I in train_images]
train_onvs = np.array(train_onvs)
print (train_images.shape, train_labels.shape, train_onvs.shape)
test_images = mnist.test_images()
test_labels = mnist.test_labels()
test_labels = [convert_label(idx) for idx in test_labels]
test_labels = np.array(test_labels)
test_onvs = [onv_convert_fromarr(I, resize=True) for I in test_images]
test_onvs = np.array(test_onvs)
train_onvs = train_onvs / 255.0
test_onvs = test_onvs / 255.0
return train_onvs, train_labels, test_onvs, test_labels
# Construct model
X = tf.placeholder(tf.float32, [None, onv_size])
Y = tf.placeholder(tf.int32, [None,5])
logits = network_dropout(X)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.0005)
train_op = optimizer.minimize(loss_op)
# Evaluate model
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
def random_batch(batch_size, data, label):
indices = np.random.randint(0, len(data), batch_size)
return data[indices], label[indices]
#create saver
saver = tf.train.Saver(tf.global_variables())
model_save_path = '../backup_models/onv_classification_model'
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
checkpoint_path = os.path.join(model_save_path, 'vector')
t_vars = tf.trainable_variables()
vars_list=[]
for var in t_vars:
if ("fc1" in var.name or "fc2" in var.name or "fc3" in var.name or "fc4" in var.name):
vars_list.append(var)
print (vars_list)
#train_onvs, train_labels, test_onvs, test_labels = load_mnist_data()
train_onvs, train_labels, valid_onvs, valid_labels, test_onvs, test_labels = load_data()
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
num_steps = 1000000
display_step = 1000
for step in range(1, num_steps+1):
batch_x, batch_y = random_batch(batch_size, train_onvs, train_labels)
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
# Calculate batch loss and training accuracy
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
#print("prediction[0]", prediction[0], "logits[0]", logits[0])
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
# Calculate batch loss and validation accuracy
batch_x, batch_y = random_batch(batch_size, valid_onvs, valid_labels)
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", validation Accuracy= " + \
"{:.3f}".format(acc))
# saving models
if (step % 10 == 0):
checkpoint_path_step = checkpoint_path + str(step)
tf.logging.info('saving model %s.', checkpoint_path_step)
tf.logging.info('global_step %i.', step)
saver.save(sess, checkpoint_path, global_step=step)
print("Optimization Finished!")
# Calculate accuracy for 256 MNIST test onvs
batch_x, batch_y = random_batch(batch_size, test_onvs, test_labels)
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={X: batch_x,
Y: batch_y}))
'''for index in range(len(test_onvs)):
onv = test_onvs[index]
label = test_labels[index]
pred, accu = sess.run([correct_pred, accuracy], feed_dict={X:[onv], Y:[label]})'''
#print ("pred label:", pred, "accu", accu)
# if (accu==0):
# im = onv.fromarray(datasets[8][index])
# im.save(str(index)+".jpeg")
| {"/onv_classification.py": ["/nn.py", "/onv_process.py"], "/classification.py": ["/nn.py"], "/mnist_classification.py": ["/nn.py"], "/temp.py": ["/onv_process.py"]} |
52,443 | qiuyingyue/ONV2SEQ | refs/heads/master | /sketch_rnn.py |
# coding: utf-8
# In this notebook, we will show how to load pre-trained models and draw things with sketch-rnn
# In[ ]:
# In[1]:
# import the required libraries
import numpy as np
import time
import random
import cPickle
import codecs
import collections
import os
import math
import json
import tensorflow as tf
from six.moves import xrange
# libraries required for visualisation:
from IPython.display import SVG, display
import svgwrite # conda install -c omnia svgwrite=1.1.6
import PIL
from PIL import Image
import matplotlib.pyplot as plt
# set numpy output to something sensible
np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True)
# In[2]:
tf.logging.info("TensorFlow Version: %s", tf.__version__)
# In[3]:
# import our command line tools
#from magenta.models.sketch_rnn.sketch_rnn_train import *
#from magenta.models.sketch_rnn.model import *
#from magenta.models.sketch_rnn.utils import *
#from magenta.models.sketch_rnn.rnn import *
from sketch_rnn_train import *
from model import *
from utils import *
from rnn import *
# In[4]:
# little function that displays vector images and saves them to .svg
def draw_strokes(data, factor=0.2, svg_filename = '/tmp/sketch_rnn/svg/sample.svg'):
tf.gfile.MakeDirs(os.path.dirname(svg_filename))
min_x, max_x, min_y, max_y = get_bounds(data, factor)
dims = (50 + max_x - min_x, 50 + max_y - min_y)
dwg = svgwrite.Drawing(svg_filename, size=dims)
dwg.add(dwg.rect(insert=(0, 0), size=dims,fill='white'))
lift_pen = 1
abs_x = 25 - min_x
abs_y = 25 - min_y
p = "M%s,%s " % (abs_x, abs_y)
command = "m"
for i in xrange(len(data)):
if (lift_pen == 1):
command = "m"
elif (command != "l"):
command = "l"
else:
command = ""
x = float(data[i,0])/factor
y = float(data[i,1])/factor
lift_pen = data[i, 2]
p += command+str(x)+","+str(y)+" "
the_color = "black"
stroke_width = 1
dwg.add(dwg.path(p).stroke(the_color,stroke_width).fill("none"))
dwg.save()
display(SVG(dwg.tostring()))
# generate a 2D grid of many vector drawings
def make_grid_svg(s_list, grid_space=10.0, grid_space_x=16.0):
def get_start_and_end(x):
x = np.array(x)
x = x[:, 0:2]
x_start = x[0]
x_end = x.sum(axis=0)
x = x.cumsum(axis=0)
x_max = x.max(axis=0)
x_min = x.min(axis=0)
center_loc = (x_max+x_min)*0.5
return x_start-center_loc, x_end
x_pos = 0.0
y_pos = 0.0
result = [[x_pos, y_pos, 1]]
for sample in s_list:
s = sample[0]
grid_loc = sample[1]
grid_y = grid_loc[0]*grid_space+grid_space*0.5
grid_x = grid_loc[1]*grid_space_x+grid_space_x*0.5
start_loc, delta_pos = get_start_and_end(s)
loc_x = start_loc[0]
loc_y = start_loc[1]
new_x_pos = grid_x+loc_x
new_y_pos = grid_y+loc_y
result.append([new_x_pos-x_pos, new_y_pos-y_pos, 0])
result += s.tolist()
result[-1][2] = 1
x_pos = new_x_pos+delta_pos[0]
y_pos = new_y_pos+delta_pos[1]
return np.array(result)
# define the path of the model you want to load, and also the path of the dataset
# In[6]:
data_dir = 'http://github.com/hardmaru/sketch-rnn-datasets/raw/master/aaron_sheep/'
models_root_dir = '/tmp/sketch_rnn/models'
#model_dir = '/tmp/sketch_rnn/models/aaron_sheep/lstm'
model_dir = '/home/tzhou/Desktop/sheepmodeldesktop'
#cat
#model_dir = '/tmp/sketch_rnn/models/aaron_sheep/layer_norm'
#model_dir = '/tmp/sketch_rnn/models/default'
# In[7]:
#download_pretrained_models(models_root_dir=models_root_dir)
# In[8]:
[train_set, valid_set, test_set, hps_model, eval_hps_model, sample_hps_model] = load_env(data_dir, model_dir)
# In[9]:
# construct the sketch-rnn model here:
reset_graph()
model = Model(hps_model)
eval_model = Model(eval_hps_model, reuse=True)
sample_model = Model(sample_hps_model, reuse=True)
# In[10]:
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# In[11]:
# loads the weights from checkpoint into our model
load_checkpoint(sess, model_dir)
# We define two convenience functions to encode a stroke into a latent vector, and decode from latent vector to stroke.
# In[12]:
def encode(input_strokes):
strokes = to_big_strokes(input_strokes).tolist()
strokes.insert(0, [0, 0, 1, 0, 0])
seq_len = [len(input_strokes)]
draw_strokes(to_normal_strokes(np.array(strokes)))
return sess.run(eval_model.batch_z, feed_dict={eval_model.input_data: [strokes], eval_model.sequence_lengths: seq_len})[0]
# In[13]:
def decode(z_input=None, draw_mode=True, temperature=0.1, factor=0.2):
z = None
if z_input is not None:
z = [z_input]
sample_strokes, m = sample(sess, sample_model, seq_len=eval_model.hps.max_seq_len, temperature=temperature, z=z)
strokes = to_normal_strokes(sample_strokes)
if draw_mode:
draw_strokes(strokes, factor)
return strokes
# In[57]:
# get a sample drawing from the test set, and render it to .svg
print('get a sample drawing from the test set, and render it to .svg')
stroke = test_set.random_sample()
draw_strokes(stroke)
# Let's try to encode the sample stroke into latent vector $z$
# In[58]:
z = encode(stroke)
# In[67]:
_ = decode(z, temperature=0.8) # convert z back to drawing at temperature of 0.8
_ = decode(z)
# Create generated grid at various temperatures from 0.1 to 1.0
# In[60]:
stroke_list = []
for i in range(10):
stroke_list.append([decode(z, draw_mode=False, temperature=0.1*i+0.1), [0, i]])
stroke_grid = make_grid_svg(stroke_list)
draw_strokes(stroke_grid)
# Latent Space Interpolation Example between $z_0$ and $z_1$
# In[61]:
# get a sample drawing from the test set, and render it to .svg
print('# Latent Space Interpolation Example between $z_0$ and $z_1$')
z0 = z
_ = decode(z0)
# In[62]:
stroke = test_set.random_sample()
z1 = encode(stroke)
_ = decode(z1)
# Now we interpolate between sheep $z_0$ and sheep $z_1$
# In[63]:
z_list = [] # interpolate spherically between z0 and z1
N = 10
for t in np.linspace(0, 1, N):
z_list.append(slerp(z0, z1, t))
# In[64]:
# for every latent vector in z_list, sample a vector image
reconstructions = []
for i in range(N):
reconstructions.append([decode(z_list[i], draw_mode=False), [0, i]])
# In[65]:
stroke_grid = make_grid_svg(reconstructions)
draw_strokes(stroke_grid)
###################################################################################################
## Let's load the Flamingo Model, and try Unconditional (Decoder-Only) Generation
#
## In[68]:
#
#
#model_dir = '/tmp/sketch_rnn/models/flamingo/lstm_uncond'
#
#
## In[69]:
#
#
#[hps_model, eval_hps_model, sample_hps_model] = load_model(model_dir)
#
#
## In[70]:
#
#
## construct the sketch-rnn model here:
#reset_graph()
#model = Model(hps_model)
#eval_model = Model(eval_hps_model, reuse=True)
#sample_model = Model(sample_hps_model, reuse=True)
#
#
## In[71]:
#
#
#sess = tf.InteractiveSession()
#sess.run(tf.global_variables_initializer())
#
#
## In[72]:
#
#
## loads the weights from checkpoint into our model
#load_checkpoint(sess, model_dir)
#
#
## In[73]:
#
#
## randomly unconditionally generate 10 examples
#N = 10
#reconstructions = []
#for i in range(N):
# reconstructions.append([decode(temperature=0.5, draw_mode=False), [0, i]])
#
#
## In[74]:
#
#
#stroke_grid = make_grid_svg(reconstructions)
#draw_strokes(stroke_grid)
#
#
## Let's load the owl model, and generate two sketches using two random IID gaussian latent vectors
#
## In[75]:
#
#
#model_dir = '/tmp/sketch_rnn/models/owl/lstm'
#
#
## In[76]:
#
#
#[hps_model, eval_hps_model, sample_hps_model] = load_model(model_dir)
## construct the sketch-rnn model here:
#reset_graph()
#model = Model(hps_model)
#eval_model = Model(eval_hps_model, reuse=True)
#sample_model = Model(sample_hps_model, reuse=True)
#sess = tf.InteractiveSession()
#sess.run(tf.global_variables_initializer())
## loads the weights from checkpoint into our model
#load_checkpoint(sess, model_dir)
#
#
## In[77]:
#
#
#z_0 = np.random.randn(eval_model.hps.z_size)
#_ = decode(z_0)
#
#
## In[79]:
#
#
#z_1 = np.random.randn(eval_model.hps.z_size)
#_ = decode(z_1)
#
#
## Let's interpolate between the two owls $z_0$ and $z_1$
#
## In[86]:
#
#
#z_list = [] # interpolate spherically between z_0 and z_1
#N = 10
#for t in np.linspace(0, 1, N):
# z_list.append(slerp(z_0, z_1, t))
## for every latent vector in z_list, sample a vector image
#reconstructions = []
#for i in range(N):
# reconstructions.append([decode(z_list[i], draw_mode=False, temperature=0.1), [0, i]])
#
#
## In[87]:
#
#
#stroke_grid = make_grid_svg(reconstructions)
#draw_strokes(stroke_grid)
#
#
## Let's load the model trained on both cats and buses! catbus!
#
## In[96]:
#
#
#model_dir = '/tmp/sketch_rnn/models/catbus/lstm'
#
#
## In[97]:
#
#
#[hps_model, eval_hps_model, sample_hps_model] = load_model(model_dir)
## construct the sketch-rnn model here:
#reset_graph()
#model = Model(hps_model)
#eval_model = Model(eval_hps_model, reuse=True)
#sample_model = Model(sample_hps_model, reuse=True)
#sess = tf.InteractiveSession()
#sess.run(tf.global_variables_initializer())
## loads the weights from checkpoint into our model
#load_checkpoint(sess, model_dir)
#
#
## In[152]:
#
#
#z_1 = np.random.randn(eval_model.hps.z_size)
#_ = decode(z_1)
#
#
## In[172]:
#
#
#z_0 = np.random.randn(eval_model.hps.z_size)
#_ = decode(z_0)
#
#
## Let's interpolate between a cat and a bus!!!
#
## In[173]:
#
#
#z_list = [] # interpolate spherically between z_1 and z_0
#N = 10
#for t in np.linspace(0, 1, N):
# z_list.append(slerp(z_1, z_0, t))
## for every latent vector in z_list, sample a vector image
#reconstructions = []
#for i in range(N):
# reconstructions.append([decode(z_list[i], draw_mode=False, temperature=0.15), [0, i]])
#
#
## In[174]:
#
#
#stroke_grid = make_grid_svg(reconstructions)
#draw_strokes(stroke_grid)
#
#
## Why stop here? Let's load the model trained on both elephants and pigs!!!
#
## In[175]:
#
#
#model_dir = '/tmp/sketch_rnn/models/elephantpig/lstm'
#
#
## In[176]:
#
#
#[hps_model, eval_hps_model, sample_hps_model] = load_model(model_dir)
## construct the sketch-rnn model here:
#reset_graph()
#model = Model(hps_model)
#eval_model = Model(eval_hps_model, reuse=True)
#sample_model = Model(sample_hps_model, reuse=True)
#sess = tf.InteractiveSession()
#sess.run(tf.global_variables_initializer())
## loads the weights from checkpoint into our model
#load_checkpoint(sess, model_dir)
#
#
## In[188]:
#
#
#z_0 = np.random.randn(eval_model.hps.z_size)
#_ = decode(z_0)
#
#
## In[195]:
#
#
#z_1 = np.random.randn(eval_model.hps.z_size)
#_ = decode(z_1)
#
#
## Tribute to an episode of [South Park](https://en.wikipedia.org/wiki/An_Elephant_Makes_Love_to_a_Pig): The interpolation between an Elephant and a Pig
#
## In[202]:
#
#
#z_list = [] # interpolate spherically between z_1 and z_0
#N = 10
#for t in np.linspace(0, 1, N):
# z_list.append(slerp(z_0, z_1, t))
## for every latent vector in z_list, sample a vector image
#reconstructions = []
#for i in range(N):
# reconstructions.append([decode(z_list[i], draw_mode=False, temperature=0.15), [0, i]])
#
#
## In[203]:
#
#
#stroke_grid = make_grid_svg(reconstructions, grid_space_x=25.0)
#
#
## In[204]:
#
#
#draw_strokes(stroke_grid, factor=0.3)
# In[ ]:
| {"/onv_classification.py": ["/nn.py", "/onv_process.py"], "/classification.py": ["/nn.py"], "/mnist_classification.py": ["/nn.py"], "/temp.py": ["/onv_process.py"]} |
52,444 | qiuyingyue/ONV2SEQ | refs/heads/master | /onv_process.py | """
This script generate the photoreceptor input for the face dataset
The log polar sampling process is converted from the C++ implementation
Please refer to "Log Polar" section for more details
"""
import sys
sys.path.append("/usr/local/lib/python2.7/dist-packages")
sys.path.append("/home/tao/miniconda2/lib/python2.7/dist-packages")
import os
import struct
import numpy as np
import gzip
import csv
from PIL import Image
import matplotlib.pyplot as plt
global R,T,x0,y0,r_min,r_max,factor, dr, dt
global coeff_00, coeff_01, coeff_10, coeff_11
global index_00,index_01,index_10,index_11
from scipy import misc
# ========================== #
# Log Polar #
# ========================== #
img_size = 600
x0 = 0.5+img_size/2;
y0 = 0.5+img_size/2;
r_min = 0.1
r_max = np.linalg.norm(np.array([x0,y0]))
R = 138 #radius 138
T = 360 # 360
factor = R/8 #R/8
Tfactor = 5 #1
#read noise file
ovn_size = R*T/Tfactor
csvfilename = "noise_"+str(ovn_size)+"_0.15_right.csv" #left eye: noise_onv_0.15.csv; right eye: noise_onv_0.15_right.csv
if (os.path.exists(csvfilename)):
print ("reading noise from", csvfilename)
#gernerated from: np.random.normal(0, 0.1, R*T)
with open(csvfilename) as csvfile:
csvreader = csv.reader(csvfile, quoting=csv.QUOTE_NONNUMERIC)
noise = next(csvreader)
noise = np.array(noise)
else:
noise = np.random.normal(0, 0.15, R*T/Tfactor)
with open(csvfilename, 'wb') as csvfile:
csvwriter = csv.writer(csvfile, quoting=csv.QUOTE_NONNUMERIC)
csvwriter.writerow(noise)
print ("writing noise to", csvfilename)
#print ("noise:", noise.shape, np.max(noise), np.min(noise))
dr = R/factor
dt = Tfactor*2.0*np.pi/T #4*2.0*np.pi/T
r = np.arange(0, R, 1.0) #It must be float number instead of integer
t = np.arange(0, T, Tfactor*1.0)#4
phi = t * dt
rr = np.tile(r, T/Tfactor)
#print ("r_max", r_max, "rr", len(rr), "noise", len(noise), "R",R)
tau = r_max * np.exp(rr/factor + noise - R/factor)
tt = np.tile(phi, R) + noise
X = x0 + np.multiply(tau, np.cos(tt))
Y = y0 + np.multiply(tau, np.sin(tt))
#print ("X", X, "Y", Y)
X_min = np.floor(X).astype(int)
Y_min = np.floor(Y).astype(int)
X_max = X_min + 1
Y_max = Y_min + 1
U = X - X_min
V = Y - Y_min
X_min = np.clip(X_min, 0, img_size-1)
Y_min = np.clip(Y_min, 0, img_size-1)
X_max = np.clip(X_max, 0, img_size-1)
Y_max = np.clip(Y_max, 0, img_size-1)
index_00 = X_min * img_size + Y_min
index_01 = X_min * img_size + Y_max
index_10 = X_max * img_size + Y_min
index_11 = X_max * img_size + Y_max
coeff_00 = np.multiply(1-U, 1-V)
coeff_01 = np.multiply(1-U, V)
coeff_10 = np.multiply(U, 1-V)
coeff_11 = np.multiply(U, V)
#print ("index_00", index_00, "index_01", index_01, "index_10", index_10, "index_11", index_11)
'''onv = np.zeros((R*T/Tfactor))
plt.scatter(Y,-X, c=onv/255.0, cmap='gray', s=1, marker='.')
plt.show()'''
def onv_convert_fromarr(I, resize=False):
if (resize):
I = misc.imresize(I, (600,600))
f00 = np.take(I, index_00)
f01 = np.take(I, index_01)
f10 = np.take(I, index_10)
f11 = np.take(I, index_11)
temp = np.multiply(coeff_00,f00) + np.multiply(coeff_01,f01) + np.multiply(coeff_10,f10) + np.multiply(coeff_11,f11)
temp = temp.astype(np.dtype('uint8'))
#print ("temp", type(temp), temp.shape, np.mean(temp), temp)
#show_onv(temp)
return temp
def show_onv(onv, filename=None):
"""onv: the onv vector for display"""
plt.scatter(Y,-X, c=onv/255.0, cmap='gray', s=10, marker='.',facecolor='0.5', lw = 0, edgecolor='r')
plt.ylim(-600, 0)
plt.xlim(0, 600)
if (filename is None):
plt.show()
else:
plt.savefig(filename)
plt.show()
# =========================== #
# Process the train dataset #
# =========================== #
#import h5py
#import cv2
# from pylab import *
'''def process(file_dir, save_dir):
result = np.zeros((R*T,1))
for f in os.listdir(file_dir):
if (not f.endswith(".png") and not f.endswith(".jpg")):
continue
filename = os.path.join(file_dir, f)
print ("filename",filename)
I = cv2.imread(filename, 0)
I = cv2.resize(I, dsize=(img_size, img_size))
temp = onv_convert_fromarr(I)
print (temp.shape)
result = temp.reshape((R*T, 1))
# test = filename.split('_p')[1].split('_e')[0]
# # TODO: Modified the new label for new data
# result[R * T, :] = int(test)
# result = result.astype(np.dtype('uint8'))
#plt.scatter(X,Y,c=temp/255.0)
plt.scatter(Y,-X, c=temp/255.0, cmap='gray', s=1, marker='.')
plt.show()
h5f = h5py.File(os.path.join(save_dir, '%s.h5'%f), 'w')
h5f.create_dataset('data', data=np.transpose(result), compression="gzip", compression_opts=9)
h5f.close()
process(file_dir = '../data/testimage', save_dir = '../data/testonv')'''
def png_to_onv(file_dir, onv_filepath):
for classname in os.listdir(file_dir):
if ( classname == "sketchrnn_tree"):
continue
print ("classname", classname)
npy=[]
idx = 0
for f in os.listdir(os.path.join(file_dir, classname, "train")):
#for idx in range(100):
png_filename = os.path.join(file_dir, classname, "train", str(idx)+".png")
#print png_filename
im = misc.imread(png_filename, mode='L')
#uni,count = np.unique(im, return_counts=True)
#print (uni, count)
# print (im.shape)
#misc.imshow(im)
onv = onv_convert_fromarr(im)
npy.append(onv)
idx+=1
train_onv = np.array(npy)
npy=[]
idx = 0
for f in os.listdir(os.path.join(file_dir, classname, "valid")):
#for idx in range(100):
png_filename = os.path.join(file_dir, classname, "valid", str(idx)+".png")
im = misc.imread(png_filename, mode='L')
onv = onv_convert_fromarr(im)
npy.append(onv)
idx+=1
valid_onv = np.array(npy)
npy=[]
idx = 0
for f in os.listdir(os.path.join(file_dir, classname, "test")):
#for idx in range(100):
png_filename = os.path.join(file_dir, classname, "test", str(idx)+".png")
im = misc.imread(png_filename, mode='L')
onv = onv_convert_fromarr(im)
npy.append(onv)
idx+=1
test_onv = np.array(npy)
outfile = os.path.join(onv_filepath, classname)
if (not os.path.exists(onv_filepath)):
os.makedirs(onv_filepath)
np.savez(outfile, train=train_onv, valid=valid_onv,test=test_onv)
# data = np.load("../data/onv/sketchrnn_rabbit.npz")
# plt.scatter(Y,-X, c=data["valid"][0]/255.0, cmap='gray', s=1, marker='.')
# print (np.mean(data["valid"][0]))
# plt.show()
if __name__ == "__main__":
png_to_onv("../data/png_thick", "../data/onv_9936_thick_right")
| {"/onv_classification.py": ["/nn.py", "/onv_process.py"], "/classification.py": ["/nn.py"], "/mnist_classification.py": ["/nn.py"], "/temp.py": ["/onv_process.py"]} |
52,445 | qiuyingyue/ONV2SEQ | refs/heads/master | /png_to_binary.py | import cv2
import os
data_path = "../data/png"
for root, dirs, files in os.walk(data_path):
cv2.namedWindow("ori")
cv2.namedWindow("bin")
if (len(dirs)==0):
for f in files:
im = cv2.imread(os.path.join(root, f), cv2.CV_LOAD_IMAGE_GRAYSCALE)
#print (im.shape)
#cv2.imshow("ori",im)
ret, im_bin = cv2.threshold(im, 127, 255, cv2.THRESH_BINARY)
#print (im_bin.shape)
#cv2.imshow("bin", im_bin)
#cv2.waitKey(0)
dirname = root.replace("png", "png_binary")
if (not os.path.exists(dirname)):
os.makedirs(dirname)
cv2.imwrite(os.path.join(dirname, f), im_bin) | {"/onv_classification.py": ["/nn.py", "/onv_process.py"], "/classification.py": ["/nn.py"], "/mnist_classification.py": ["/nn.py"], "/temp.py": ["/onv_process.py"]} |
52,446 | qiuyingyue/ONV2SEQ | refs/heads/master | /demean.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 22 20:38:39 2017
@author: tzhou
"""
#calculate mean
import skimage
import skimage.io
import skimage.transform
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from PIL import Image
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
#imname = '/home/tzhou/Desktop/0.png'
#im = skimage.io.imread(imname)
#imcrop = skimage.img_as_ubyte(skimage.transform.resize(im, [224, 224]))
#imgplot = plt.imshow(imcrop)
#gray = rgb2gray(imcrop)
#imgplot = plt.imshow(gray)
#%%
a = []
i_size = 112
s_train = np.zeros((70000,i_size,i_size))
for i in range(70000):
print(i)
imname = '/home/tzhou/Desktop/cat/trainpng/' + str(i) +'.png'
im = skimage.io.imread(imname)
#plt.imsave('test1.png', im, cmap = plt.cm.gray)
#imgplot = plt.imshow(im)
imcrop = skimage.img_as_ubyte(skimage.transform.resize(im, [i_size, i_size]))
gray = rgb2gray(imcrop)
a.append(np.mean(gray))
s_train[i,:,:] = gray
print(np.mean(a))
np.save('train.npy', s_train)
#%%
import numpy as np
train = np.load('train.npy')
valid = np.load('valid.npy')
test = np.load('test.npy')
np.savez('cat', train = train, valid = valid, test = test )
#%%
#s_test = np.zeros((300,224,224))
#for i in range(300):
# print(i)
# imname = '/home/tzhou/Desktop/sheep/test_png/' + str(i) +'.png'
# im = skimage.io.imread(imname)
# #plt.imsave('test1.png', im, cmap = plt.cm.gray)
# #imgplot = plt.imshow(im)
# imcrop = skimage.img_as_ubyte(skimage.transform.resize(im, [224, 224]))
# gray = rgb2gray(imcrop)
# s_test[i,:,:] = gray
| {"/onv_classification.py": ["/nn.py", "/onv_process.py"], "/classification.py": ["/nn.py"], "/mnist_classification.py": ["/nn.py"], "/temp.py": ["/onv_process.py"]} |
52,447 | qiuyingyue/ONV2SEQ | refs/heads/master | /svg2img_gif.py |
from cairosvg import svg2png
import sys
sys.path.append("/usr/local/lib/python2.7/dist-packages")
import os
import re
import svgwrite
import numpy as np
from utils import *
from PIL import Image, ImageOps
from tempfile import TemporaryFile
#from scipy import misc
#from onv_process import onv_convert_fromarr
# little function that displays vector images and saves them to .svg
def save_strokes(data, factor=0.2, padding=10, svg_filename = '/tmp/sketch_rnn/svg/sample.svg'):
#tf.gfile.MakeDirs(os.path.dirname(svg_filename))
min_x, max_x, min_y, max_y = get_bounds(data, factor)
#dims = (50 + max_x - min_x, 50 + max_y - min_y)
#print (max_x, min_x, max_y, min_y)
#return
diff_x = max_x - min_x
diff_y = max_y - min_y
size = max(diff_x, diff_y) + padding/factor
dims = (size, size)
padding_x = size - diff_x
padding_y = size - diff_y
#print (dims, diff_x, diff_y, padding_x, padding_y)
dwg = svgwrite.Drawing(svg_filename, size=dims)
dwg.add(dwg.rect(insert=(0, 0), size=dims,fill='white'))
lift_pen = 1
abs_x = padding_x/2 - min_x
abs_y = padding_y/2 - min_y
p = "M%s,%s " % (abs_x, abs_y)
command = "m"
for i in range(len(data)):
if (lift_pen == 1):
command = "m"
elif (command != "l"):
command = "l"
else:
command = ""
x = float(data[i,0])/factor
y = float(data[i,1])/factor
lift_pen = data[i, 2]
p += command+str(x)+","+str(y)+" "
the_color = "black"
stroke_width = 30
dwg.add(dwg.path(p).stroke(the_color,stroke_width).fill("none"))
dwg.save()
#display(SVG(dwg.tostring()))
def resize_strokes(strokes, size):
new_strokes = []
for data in strokes:
data = resize_single_stroke(data, size)
new_strokes.append(data)
new_strokes = np.array(new_strokes)
print (new_strokes.shape)
return new_strokes
def resize_single_stroke(data, size):
min_x, max_x, min_y, max_y = get_bounds(data, 1)
#print ("before", min_x, max_x, min_y, max_y)
width = max_x - min_x
height = max_y - min_y
center_x = width / 2.0 + min_x
center_y = height / 2.0 + min_y
#print ("width:", width, "height", height, "center_x", center_x, "center_y", center_y)
data[0,0] = data[0,0] - center_x
data[0,1] = data[0,1] - center_y
min_x, max_x, min_y, max_y = get_bounds(data, 1)
#print ("middle", min_x, max_x, min_y, max_y)
factor = float(size) / max(width, height)
#print ("factor", factor)
data[:,0] = data[:,0] * factor
data[:,1] = data[:,1] * factor
min_x, max_x, min_y, max_y = get_bounds(data, 1)
#print ("after", min_x, max_x, min_y, max_y)
return data
def convert_with_cairosvg(svg_filename, png_filename):
'''if os.path.exists(png_filename):
return'''
svg2png(open(svg_filename, 'rb').read(), write_to=open(png_filename, 'wb'))
def strokes_to_npy(strokes, is_grey=True, size=64):
npy = []
print (type(strokes), strokes.shape)
index = 0
for data in strokes:
#print (type(data), data.shape)
save_strokes(data, factor=0.5, padding=10, svg_filename=svg_filename)
convert_with_cairosvg(svg_filename, png_filename)
im = Image.open(png_filename)
im = im.resize((size,size), Image.ANTIALIAS)
im2arr = np.array(im)
if (is_grey):
im2arr = im2arr[:,:, 1]
npy.append(im2arr)
'''index += 1
if (index >= 500):
break'''
npy = np.array(npy)
print (npy.shape)
return npy
def strokes_to_png(strokes, classname, batchname, is_grey=True, size=600, padding=125, save_onv=False):
index = 0
npy=[]
for data in strokes:
save_strokes(data, factor=0.5, padding=10, svg_filename=svg_filename)
dirname = os.path.join(data_filepath, "png_thick", classname, batchname)
if not os.path.exists(dirname):
os.makedirs(dirname)
png_filename = os.path.join(dirname, str(index) + ".png")
convert_with_cairosvg(svg_filename, png_filename)
im = Image.open(png_filename)
im = im.resize((size-2*padding, size-2*padding), Image.ANTIALIAS)
padding_arr = (padding, padding, padding, padding)
new_im = ImageOps.expand(im, padding_arr, fill=(255, 255, 255))
new_im.save(png_filename)
#array to onv
if (save_onv):
im2arr = np.array(new_im)#error
misc.imshow(im2arr)
onv = onv_convert_fromarr(im2arr)#eerror
npy.append(onv)
index+=1
#if (index >= 100):
# break
npy = np.array(npy)
print (npy.shape)
return npy
# svg_filepath = '/home/qyy/workspace/data/svg/sketchrnn_bus.full.npz'
# svg_data = np.load(svg_filepath, encoding="bytes")
# train_strokes = svg_data['train']
# i = 0
# for data in train_strokes:
# svg_filename = "sample.svg"
# png_filename = "sample.png"
# save_strokes(data, 0.2, svg_filename)
# convert_with_cairosvg(svg_filename, png_filename)
# i+=1
# if (i > 1):
# break
size = 300
padding = 125
img_ind = [0, 2500, 7520, 10010]
index = 0
img_seq = "/home/qyy/workspace/test/image_sequence"
img_onv = "/home/qyy/workspace/display_image/onv_left"
orig_img = "/home/qyy/workspace/test/original"
in_folder = os.path.join(orig_img, str(img_ind[0]))
in_files = [name for name in os.listdir(in_folder) if os.path.isfile(os.path.join(in_folder, name))]
in_files.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
for ind in range(len(in_files)):
svg_filename = os.path.join(orig_img, str(img_ind[index]), in_files[ind])
png_folder = os.path.join(orig_img, str(img_ind[index]) + '_png')
if not os.path.exists(png_folder):
os.makedirs(png_folder)
png_filename = os.path.join(orig_img, str(img_ind[index]) + '_png', in_files[ind][:-4] + '.png')
convert_with_cairosvg(svg_filename, png_filename)
im = Image.open(png_filename)
new_im = im.resize((600, 600))
new_im.save(png_filename)
# in_folder = os.path.join(img_seq, str(img_ind[index]))
# in_files = [name for name in os.listdir(in_folder) if os.path.isfile(os.path.join(in_folder, name))]
# in_files.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
# for ind in range(len(in_files)):
# svg_filename = os.path.join(img_seq, str(img_ind[index]), str(ind) + '.svg')
# png_folder = os.path.join(img_seq, str(img_ind[index]) + '_png')
# if not os.path.exists(png_folder):
# os.makedirs(png_folder)
# png_filename = os.path.join(img_seq, str(img_ind[index]) + '_png', str(ind) + '.png')
# convert_with_cairosvg(svg_filename, png_filename)
# im = Image.open(png_filename)
# new_im = im.resize((100, 100))
# new_im.save(png_filename) | {"/onv_classification.py": ["/nn.py", "/onv_process.py"], "/classification.py": ["/nn.py"], "/mnist_classification.py": ["/nn.py"], "/temp.py": ["/onv_process.py"]} |
52,448 | qiuyingyue/ONV2SEQ | refs/heads/master | /svg2img.py |
from cairosvg import svg2png
import sys
sys.path.append("/usr/local/lib/python2.7/dist-packages")
import os
import svgwrite
import numpy as np
from utils import *
from PIL import Image, ImageOps
from tempfile import TemporaryFile
#from scipy import misc
#from onv_process import onv_convert_fromarr, show_onv
def save_strokes(data, factor=0.2, padding=10, svg_filename = '/tmp/sketch_rnn/svg/sample.svg'):
"""little function that transfer the data(strokes) to .svg"""
#tf.gfile.MakeDirs(os.path.dirname(svg_filename))
min_x, max_x, min_y, max_y = get_bounds(data, factor)
#dims = (50 + max_x - min_x, 50 + max_y - min_y)
#print (max_x, min_x, max_y, min_y)
#return
diff_x = max_x - min_x
diff_y = max_y - min_y
size = max(diff_x, diff_y) + padding/factor
dims = (size, size)
padding_x = size - diff_x
padding_y = size - diff_y
#print (dims, diff_x, diff_y, padding_x, padding_y)
dwg = svgwrite.Drawing(svg_filename, size=dims)
dwg.add(dwg.rect(insert=(0, 0), size=dims,fill='white'))
lift_pen = 1
abs_x = padding_x/2 - min_x
abs_y = padding_y/2 - min_y
p = "M%s,%s " % (abs_x, abs_y)
command = "m"
for i in range(len(data)):
if (lift_pen == 1):
command = "m"
elif (command != "l"):
command = "l"
else:
command = ""
x = float(data[i,0])/factor
y = float(data[i,1])/factor
lift_pen = data[i, 2]
p += command+str(x)+","+str(y)+" "
the_color = "black"
stroke_width = 30
dwg.add(dwg.path(p).stroke(the_color,stroke_width).fill("none"))
dwg.save()
#display(SVG(dwg.tostring()))
def resize_strokes(strokes, size):
"""return resized strokes"""
new_strokes = []
for data in strokes:
data = resize_single_stroke(data, size)
new_strokes.append(data)
new_strokes = np.array(new_strokes)
print (new_strokes.shape)
return new_strokes
def resize_single_stroke(data, size):
"""helper function for resize_single_stroke()"""
min_x, max_x, min_y, max_y = get_bounds(data, 1)
#print ("before", min_x, max_x, min_y, max_y)
width = max_x - min_x
height = max_y - min_y
center_x = width / 2.0 + min_x
center_y = height / 2.0 + min_y
#print ("width:", width, "height", height, "center_x", center_x, "center_y", center_y)
data[0,0] = data[0,0] - center_x
data[0,1] = data[0,1] - center_y
min_x, max_x, min_y, max_y = get_bounds(data, 1)
#print ("middle", min_x, max_x, min_y, max_y)
factor = float(size) / max(width, height)
#print ("factor", factor)
data[:,0] = data[:,0] * factor
data[:,1] = data[:,1] * factor
min_x, max_x, min_y, max_y = get_bounds(data, 1)
#print ("after", min_x, max_x, min_y, max_y)
return data
def convert_with_cairosvg(svg_filename, png_filename):
"""helper function to convert a svg file to a png file"""
'''if os.path.exists(png_filename):
return'''
svg2png(open(svg_filename, 'rb').read(), write_to=open(png_filename, 'wb'))
def strokes_to_npy(strokes, is_grey=True, size=64):
"""convert strokes to svg files and then to png files and then resize to size*size and return numpy array"""
npy = []
print (type(strokes), strokes.shape)
for data in strokes:
#print (type(data), data.shape)
svg_filename = "tmp.svg"
png_filename = "tmp.png"
save_strokes(data, factor=0.5, padding=10, svg_filename=svg_filename)
convert_with_cairosvg(svg_filename, png_filename)
im = Image.open(png_filename)
im = im.resize((size,size), Image.ANTIALIAS)
im2arr = np.array(im)
if (is_grey):
im2arr = im2arr[:,:, 1]
npy.append(im2arr)
npy = np.array(npy)
print (npy.shape)
return npy
def strokes_to_png(strokes, classname, batchname, png_filepath, size=600, padding=125, save_onv=False):
"""convert strokes to svg files and then to png files and then resize to size*size with padding arround
save the png files and return the numpy array"""
""" set save_ong to True to save the onv files"""
npy=[]
index = 0
for data in strokes:
svg_filename = "tmp.svg"
save_strokes(data, factor=0.5, padding=10, svg_filename=svg_filename)
dirname = os.path.join(png_filepath, classname, batchname)
if not os.path.exists(dirname):
os.makedirs(dirname)
png_filename = os.path.join(dirname, str(index) + ".png")
convert_with_cairosvg(svg_filename, png_filename)
im = Image.open(png_filename)
im = im.resize((size-2*padding, size-2*padding), Image.ANTIALIAS)
padding_arr = (padding, padding, padding, padding)
new_im = ImageOps.expand(im, padding_arr, fill=(255, 255, 255))
new_im.save(png_filename)
index += 1
#array to onv
if (save_onv):
im2arr = np.array(new_im)#error
#misc.imshow(im2arr)
onv = onv_convert_fromarr(im2arr)#eerror
show_onv(onv)
npy.append(onv)
npy = np.array(npy)
print (npy.shape)
return npy
### Data preprocessing on original sketch files to obtain conrresponding png/onv/numpy image
def main1():
data_filepath = '../data/'
input_dirname = "sketch"
output_dirname = "testmain1" # change
is_grey = True
for f in os.listdir(os.path.join(data_filepath, input_dirname)):
print(f)
if ("full" in f):
continue
classname = f.split('.')[0]
outfile = os.path.join(data_filepath, output_dirname, f)
if os.path.exists(outfile):
continue
if not os.path.exists(os.path.dirname(outfile)):
os.makedirs(os.path.dirname(outfile))
# load data
fname = os.path.join(data_filepath, input_dirname, f)
data = np.load(fname, encoding="bytes")
train_strokes = data['train']
valid_strokes = data['valid']
test_strokes = data['test']
### For(1)(2)(3) only one can work at a time
# (1)resize the strokes and save to outfile
'''resized_train_strokes = resize_strokes(train_strokes, size = 600)
resized_valid_strokes = resize_strokes(valid_strokes, size = 600)
resized_test_strokes = resize_strokes(test_strokes, size = 600)
np.savez(outfile, train=resized_train_strokes, valid=resized_valid_strokes,test=resized_test_strokes)'''
# (2)save the strokes into .png (and .onv optionally), intended for sketch-onv2seq
png_filepath = os.path.join(data_filepath, "testpng")
train_onv = strokes_to_png(train_strokes, classname, 'train', png_filepath=png_filepath, save_onv=False)
valid_onv = strokes_to_png(valid_strokes, classname, 'valid', png_filepath=png_filepath, save_onv=False)
test_onv = strokes_to_png(test_strokes, classname, 'test', png_filepath=png_filepath, save_onv=False)
if save_onv:
np.savez(outfile, train=train_onv, valid = valid_onv, test = test_onv)
# (3)save the strokes into numpy array of images, intended for sketch-pix2seq
'''train_images = strokes_to_npy(train_strokes, is_grey)
valid_images = strokes_to_npy(valid_strokes, is_grey)
test_images = strokes_to_npy(test_strokes, is_grey)
np.savez(outfile, train=train_images, valid=valid_images,test=test_images)'''
### convert .svg to .png from svg_filepath/dirname to png_filepath/dirname
def main2():
svg_filepath = '../display_svg'
png_filepath = '../display_image'
svg_dirlist = ['original']
for dirname in svg_dirlist:
for f in os.listdir(os.path.join(svg_filepath, dirname)):
if not 'svg' in f:
continue
svg_filename = os.path.join(svg_filepath, dirname, f)
png_filename = os.path.join(png_filepath, dirname, f.replace('svg','png'))
print ("svg_filename", svg_filename, "png_filename", png_filename)
if (not os.path.exists(os.path.dirname(png_filename))):
os.makedirs(os.path.dirname(png_filename))
convert_with_cairosvg(svg_filename=svg_filename, png_filename=png_filename)
im = Image.open(png_filename)
im.save(png_filename)
def gallery(array, ncols=3):
print (array.shape, array[0].shape)
nindex, height, width, intensity = array.shape
nrows = nindex//ncols
assert nindex == nrows*ncols
# want result.shape = (height*nrows, width*ncols, intensity)
result = (array.reshape(nrows, ncols, height, width, intensity)
.swapaxes(1,2)
.reshape(height*nrows, width*ncols, intensity))
return result
###place multiple images to a grid for display
def main3():
source_path = '../display_image'
dirlist = ['original', 'onv_left_resize', 'rnn_encoder_5classes_0.2','cnn_encoder_5classes_0.2',
'dnn_encoder_5classes_binocular_0.2','dnn_encoder_5classes_pretrainedrnn_binocular_0.2','dnn_encoder_5classes_pretrainedcnn_binocular_0.2']
'''idx_list = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95,
2500, 2505, 2510, 2515, 2520, 2525, 2530, 2535, 2540, 2545, 2550, 2555, 2560, 2565, 2570, 2575, 2580, 2585, 2590, 2595,
5000, 5005, 5010, 5015, 5020, 5025, 5030, 5035, 5040, 5045, 5050, 5055, 5060, 5065, 5070, 5075, 5080, 5085, 5090, 5095,
7500, 7505, 7510, 7515, 7520, 7525, 7530, 7535, 7540, 7545, 7550, 7555, 7560, 7565, 7570, 7575, 7580, 7585, 7590, 7595,
10000, 10005, 10010, 10015, 10020, 10025, 10030, 10035, 10040, 10045, 10050, 10055, 10060, 10065, 10070, 10075, 10080, 10085, 10090, 10095]
'''
''' '''
idx_list = [0, 25, 45, 50, #75,
2525, 2530, 2535, 2560, #2555,
7515, 7540,7575, 7580, # 7570,
10005, 10010, 10045, 10060]#, 10090
img_list = []#None
for dirname in dirlist:
for idx in idx_list:
png_filename = os.path.join(source_path, dirname, str(idx)+'.png')
img = Image.open(png_filename)
img = img.resize((100,100), Image.ANTIALIAS)
img_arr = np.array(img)
img_list.append(img_arr[:,:,0:3])
img_list = np.array(img_list)
result = gallery(img_list, len(idx_list))
display_img = Image.fromarray(result)
display_img.show()
display_img.save("../display_20_4.png")
if __name__ == "__main__":
main1()
#main2()
#main3() | {"/onv_classification.py": ["/nn.py", "/onv_process.py"], "/classification.py": ["/nn.py"], "/mnist_classification.py": ["/nn.py"], "/temp.py": ["/onv_process.py"]} |
52,449 | qiuyingyue/ONV2SEQ | refs/heads/master | /classification.py | # TensorFlow and tf.keras
import sys
sys.path.append("/usr/local/lib/python2.7/dist-packages")
import tensorflow as tf
# Helper libraries
import numpy as np
import nn
from sketch_rnn_train_image import load_dataset
import model_cnn_encoder as sketch_rnn_model
from PIL import Image
def network(batch):
#cnn layers
print ("batch: ", batch)
h = tf.layers.conv2d(batch, 32, 4, strides=2, activation=tf.nn.relu, name="enc_conv1")
print ("h_conv1: ", h)
h = tf.layers.conv2d(h, 64, 4, strides=2, activation=tf.nn.relu, name="enc_conv2")
print ("h_conv2: ", h)
h = tf.layers.conv2d(h, 128, 4, strides=2, activation=tf.nn.relu, name="enc_conv3")
print ("h_conv3: ", h)
h = tf.layers.conv2d(h, 256, 4, strides=2, activation=tf.nn.relu, name="enc_conv4")
print ("h_conv4: ", h)
h = tf.reshape(h, [-1, 2*2*256])
print ("h", h)
fc1 = tf.layers.dense(h, 288, name="enc_fc1")
fc2 = tf.layers.dense(fc1, 3, name="enc_fc2")
return fc2
batch_size = 100
image_size = 64
# Load dataset
data_dir = "../data"
model_params = sketch_rnn_model.get_default_hparams()
datasets = load_dataset(data_dir, model_params, do_filter=False, contain_labels=True)
print ("shape of datasets[6]", datasets[6].shape)
train_images = np.reshape(datasets[6], (datasets[6].shape[0], image_size, image_size, 1))
valid_images = np.reshape(datasets[7], (datasets[7].shape[0], image_size, image_size, 1))
test_images = np.reshape(datasets[8], (datasets[8].shape[0], image_size, image_size, 1))
train_labels = datasets[9]
valid_labels = datasets[10]
test_labels = datasets[11]
train_images = train_images / 255.0
valid_images = valid_images / 255.0
test_images = test_images / 255.0
# Construct model
X = tf.placeholder(tf.float32, [None, image_size, image_size, 1])
Y = tf.placeholder(tf.int32, [None,3])
logits = network(X)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss_op)
# Evaluate model
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
def random_batch(batch_size, data, label):
indices = np.random.randint(0, len(data), batch_size)
return data[indices], label[indices]
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
num_steps = 300000
display_step = 1000
for step in range(1, num_steps+1):
batch_x, batch_y = random_batch(batch_size, train_images, train_labels)
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
# Calculate batch loss and training accuracy
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
# Calculate batch loss and validation accuracy
batch_x, batch_y = random_batch(batch_size, valid_images, valid_labels)
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", validation Accuracy= " + \
"{:.3f}".format(acc))
print("Optimization Finished!")
# Calculate accuracy for 256 MNIST test images
batch_x, batch_y = random_batch(batch_size, test_images, test_labels)
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={X: batch_x,
Y: batch_y}))
for index in range(len(test_images)):
image = test_images[index]
label = test_labels[index]
pred, accu = sess.run([correct_pred, accuracy], feed_dict={X:[image], Y:[label]})
#print ("pred label:", pred, "accu", accu)
if (accu==0):
im = Image.fromarray(datasets[8][index])
im.save("error/"+str(index)+".jpeg")
| {"/onv_classification.py": ["/nn.py", "/onv_process.py"], "/classification.py": ["/nn.py"], "/mnist_classification.py": ["/nn.py"], "/temp.py": ["/onv_process.py"]} |
52,450 | qiuyingyue/ONV2SEQ | refs/heads/master | /nn.py | from __future__ import absolute_import, division, print_function
import sys
sys.path.append("/usr/local/lib/python2.7/dist-packages")
import numpy as np
import tensorflow as tf
# components
from tensorflow.python.ops.nn import dropout as drop
from util.cnn import conv_layer as conv
from util.cnn import conv_relu_layer as conv_relu
from util.cnn import conv_relu_layer_bn as conv_relu_bn
from util.cnn import pooling_layer as pool
from util.cnn import fc_layer as fc
from util.cnn import fc_relu_layer as fc_relu
def conv_net(input_batch, name):
with tf.variable_scope(name):
#conv1: 2*2@4/2
conv1 = conv_relu('conv1', input_batch,
kernel_size=2, stride=2, output_dim=4)
print("conv1: ", conv1)
#conv2: 2*2@4/1
conv2 = conv_relu('conv2', conv1,
kernel_size=2, stride=1, output_dim=4)
print("conv2: ", conv2)
#conv3: 2*2@8/2
conv3 = conv_relu('conv3', conv2,
kernel_size=2, stride=2, output_dim=8)
print("conv3: ", conv3)
#conv4: 2*2@8/1
conv4 = conv_relu('conv4', conv3,
kernel_size=2, stride=1, output_dim=8)
print("conv4: ", conv4)
#conv5: 2*2@8/2
conv5 = conv_relu('conv5', conv4,
kernel_size=2, stride=2, output_dim=8)
print("conv5: ", conv5)
#conv6: 2*2@8/1 tanh
conv6 = conv('conv6', conv5,
kernel_size=2, stride=1, output_dim=8)
print("conv6: ", conv6)
tanh = tf.nn.tanh(conv6)
return tanh
def conv_net_shallow(input_batch, name):
with tf.variable_scope(name):
#conv1: 2*2@4/2
conv1 = conv_relu('conv1', input_batch,
kernel_size=2, stride=2, output_dim=4)
print("conv1: ", conv1)
#conv2: 2*2@4/1
conv2 = conv_relu('conv2', conv1,
kernel_size=2, stride=1, output_dim=8)
print("conv2: ", conv2)
#conv3: 2*2@8/2
conv3 = conv_relu('conv3', conv2,
kernel_size=2, stride=2, output_dim=16)
print("conv3: ", conv3)
#conv4: 2*2@8/1
conv4 = conv_relu('conv4', conv3,
kernel_size=2, stride=1, output_dim=16)
print("conv4: ", conv4)
#conv5: 2*2@8/2
tanh = tf.nn.tanh(conv4)
return tanh
def conv_net_bn(input_batch, name, phase):
with tf.variable_scope(name):
#conv1: 2*2@4/2
conv1 = conv_relu_bn('conv1', input_batch, phase,
kernel_size=2, stride=2, output_dim=4)
print("conv1: ", conv1)
#conv2: 2*2@4/1
conv2 = conv_relu_bn('conv2', conv1, phase,
kernel_size=2, stride=1, output_dim=4)
print("conv2: ", conv2)
#conv3: 2*2@8/2
conv3 = conv_relu_bn('conv3', conv2, phase,
kernel_size=2, stride=2, output_dim=8)
print("conv3: ", conv3)
#conv4: 2*2@8/1
conv4 = conv_relu_bn('conv4', conv3, phase,
kernel_size=2, stride=1, output_dim=8)
print("conv4: ", conv4)
#conv5: 2*2@8/2
conv5 = conv_relu_bn('conv5', conv4, phase,
kernel_size=2, stride=2, output_dim=8)
print("conv5: ", conv5)
#conv6: 2*2@8/1 tanh
conv6 = conv('conv6', conv5, kernel_size=2, stride=1, output_dim=8)
conv6 = tf.contrib.layers.batch_norm(conv6, center=True, scale=True, is_training=phase, scope='bn')
print("conv6: ", conv6)
tanh = tf.nn.tanh(conv6)
return tanh
def my_fc_layer(input_batch, name, output_dim, apply_dropout=False):
with tf.variable_scope(name):
print("input_batch: ", input_batch)
fc7 = fc('fc', input_batch, output_dim=output_dim)
print("fc7: ", fc7)
if apply_dropout: fc7 = drop(fc7, 0.5)
return fc7
| {"/onv_classification.py": ["/nn.py", "/onv_process.py"], "/classification.py": ["/nn.py"], "/mnist_classification.py": ["/nn.py"], "/temp.py": ["/onv_process.py"]} |
52,451 | qiuyingyue/ONV2SEQ | refs/heads/master | /util/loss.py | from __future__ import absolute_import, division, print_function
import tensorflow as tf
import numpy as np
def weighed_logistic_loss(scores, labels, pos_loss_mult=1.0, neg_loss_mult=1.0):
# Apply different weights to loss of positive samples and negative samples
# positive samples have label 1 while negative samples have label 0
loss_mult = tf.add(tf.mul(labels, pos_loss_mult-neg_loss_mult), neg_loss_mult)
# Classification loss as the average of weighed per-score loss
cls_loss = tf.reduce_mean(tf.mul(
tf.nn.sigmoid_cross_entropy_with_logits(scores, labels),
loss_mult))
return cls_loss
def l2_regularization_loss(variables, weight_decay):
l2_losses = [tf.nn.l2_loss(var) for var in variables]
total_l2_loss = weight_decay * tf.add_n(l2_losses)
return total_l2_loss
| {"/onv_classification.py": ["/nn.py", "/onv_process.py"], "/classification.py": ["/nn.py"], "/mnist_classification.py": ["/nn.py"], "/temp.py": ["/onv_process.py"]} |
52,452 | qiuyingyue/ONV2SEQ | refs/heads/master | /mnist_classification.py | #mnist_classification.py
import sys
sys.path.append("/usr/local/lib/python2.7/dist-packages")
sys.path.append("./mnist")
import tensorflow as tf
from tensorflow.contrib import keras
import mnist
import numpy as np
import nn
#mnist = keras.datasets.mnist
def network(x):
x = tf.reshape(x, [-1,28*28])
print ("x", x)
#fc1 = tf.layers.dense(x, 500, name="fc1")
fc1=nn.my_fc_layer(x, "fc1", output_dim=500, apply_dropout=True)
print ("fc1", fc1)
#fc2 = tf.layers.dense(fc1, 25, name="fc2")
fc2=nn.my_fc_layer(fc1, "fc2", output_dim=25, apply_dropout=True)
print ("fc2", fc2)
#fc3 = tf.layers.dense(fc2, 10, name="fc3")
fc3=nn.my_fc_layer(fc2, "fc3", output_dim=10, apply_dropout=True)
print ("fc3", fc3)
return fc3
def convert_label(idx):
arr = np.zeros((10))
arr[idx] = 1
return arr
#load data
def load_mnist_data():
train_images = mnist.train_images()
train_labels = mnist.train_labels()
train_labels = [convert_label(idx) for idx in train_labels]
train_labels = np.array(train_labels)
test_images = mnist.test_images()
test_labels = mnist.test_labels()
test_labels = [convert_label(idx) for idx in test_labels]
test_labels = np.array(test_labels)
train_images = np.reshape(train_images, (train_images.shape[0], 28, 28, 1))
test_images = np.reshape(test_images, (test_images.shape[0], 28, 28, 1))
train_images = train_images / 255.0
test_images = test_images / 255.0
return train_images, train_labels, test_images, test_labels
batch_size = 100
img_size = 28
# Construct model
X = tf.placeholder(tf.float32, [None, img_size, img_size, 1])
Y = tf.placeholder(tf.int32, [None,10])
logits = network(X)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss_op)
# Evaluate model
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
def random_batch(batch_size, data, label):
indices = np.random.randint(0, len(data), batch_size)
return data[indices], label[indices]
train_images, train_labels, test_images, test_labels = load_mnist_data()
print (train_images.shape, train_labels.shape, test_images.shape, test_labels.shape)
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
num_steps = 100000
display_step = 100
for step in range(1, num_steps+1):
batch_x, batch_y = random_batch(batch_size, train_images, train_labels)
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
# Calculate batch loss and training accuracy
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
#print("prediction[0]", prediction[0], "logits[0]", logits[0])
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
# Calculate batch loss and validation accuracy
batch_x, batch_y = random_batch(batch_size, test_images, test_labels)
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", validation Accuracy= " + \
"{:.3f}".format(acc))
print("Optimization Finished!")
# Calculate accuracy for 256 MNIST test onvs
batch_x, batch_y = random_batch(batch_size, test_images, test_labels)
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={X: batch_x,
Y: batch_y}))
for index in range(len(test_onvs)):
onv = test_onvs[index]
label = test_labels[index]
pred, accu = sess.run([correct_pred, accuracy], feed_dict={X:[onv], Y:[label]})
#print ("pred label:", pred, "accu", accu)
# if (accu==0):
# im = onv.fromarray(datasets[8][index])
# im.save(str(index)+".jpeg") | {"/onv_classification.py": ["/nn.py", "/onv_process.py"], "/classification.py": ["/nn.py"], "/mnist_classification.py": ["/nn.py"], "/temp.py": ["/onv_process.py"]} |
52,453 | qiuyingyue/ONV2SEQ | refs/heads/master | /data_interpolate.py | #!/usr/bin/env python
# coding: utf-8
# In[2]:
# import the required libraries# impor
import numpy as np
import time
import random
import cPickle
import codecs
import collections
import os
import math
import json
import tensorflow as tf
from six.moves import xrange
# libraries required for visualisation:
from IPython.display import SVG, display
import svgwrite # conda install -c omnia svgwrite=1.1.6
import PIL
from PIL import Image
import matplotlib.pyplot as plt
# set numpy output to something sensible
np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True)
# In[3]:
tf.logging.info("TensorFlow Version: %s", tf.__version__)
# In[4]:
# import our command line tools# impor
from sketch_rnn_train import *
from model import *
from utils import *
from rnn import *
# In[5]:
# little function that displays vector images and saves them to .svg
img_default_dir = 'sample.svg'
def draw_strokes(data, factor=0.5, svg_filename = img_default_dir):
tf.gfile.MakeDirs(os.path.dirname(svg_filename))
min_x, max_x, min_y, max_y = get_bounds(data, factor)
dims = (50 + max_x - min_x, 50 + max_y - min_y)
dwg = svgwrite.Drawing(svg_filename, size=dims)
dwg.add(dwg.rect(insert=(0, 0), size=dims,fill='white'))
lift_pen = 1
abs_x = 25 - min_x
abs_y = 25 - min_y
p = "M%s,%s " % (abs_x, abs_y)
command = "m"
for i in xrange(len(data)):
if (lift_pen == 1):
command = "m"
elif (command != "l"):
command = "l"
else:
command = ""
x = float(data[i,0])/factor
y = float(data[i,1])/factor
lift_pen = data[i, 2]
p += command+str(x)+","+str(y)+" "
the_color = "black"
stroke_width = 1
dwg.add(dwg.path(p).stroke(the_color,stroke_width).fill("none"))
dwg.save()
#display(SVG(dwg.tostring()))
# In[6]:
def generate_np_arr(data, size_bound = 600, factor=1.0):
arr = []
x_pos = 0
y_pos = 0
min_x, max_x, min_y, max_y = get_bounds(data, factor)
width= max_x-min_x
height= max_y-min_y
size = max(width, height)
factor = float(size)/size_bound
min_x = 0
min_y = 0
max_x = 0
max_y = 0
print factor
for i in range(len(data)):
point = data[i]
deltax = float(point[0])/factor
deltay = float(point[1])/factor
pen_lift = point[2]
x_pos+=deltax
min_x = min(x_pos, min_x)
max_x = max(x_pos, max_x)
y_pos+=deltay
min_y = min(-y_pos, min_y)
max_y = max(-y_pos, max_y)
arr.append([x_pos,-y_pos, pen_lift])
arr = np.asarray(arr)
print min_x,min_y, max_x, max_y
start_x = 0
start_y = 0
print start_y-min_y
arr[:,0] = np.add(arr[:,0],start_x-min_x)
arr[:,1] = np.add(arr[:,1],start_y-min_y)
return arr
# In[7]:
def add_data_point(my_data,offset_x=0,offset_y=0):
new_data=[]
for i in range(0, len(my_data)-1):
point_x = my_data[i:i+2,0]
point_y = my_data[i:i+2,1]
point_x = np.add(point_x,offset_x)
point_y = np.add(point_y,offset_y)
abs_x = np.abs(point_x[0]-point_x[1])
abs_y = np.abs(point_y[0]-point_y[1])
dis = np.int(max(abs_x,abs_y))
new_x_arr = np.linspace(point_x[0],point_x[1],dis)
new_y_arr = np.linspace(point_y[0],point_y[1],dis)
new_x_arr = np.round(new_x_arr,2)
new_y_arr = np.round(new_y_arr,2)
for j in range(0,dis):
new_data.append([new_x_arr[j],new_y_arr[j],my_data[i,2]])
new_data = np.asarray(new_data)
return new_data
# In[9]:
import numpy as np
from numpy import genfromtxt
import matplotlib.pyplot as plt
if __name__ == "__main__":
# input stroke
file_path = '../data/sketch/sketchrnn_cat.full.npz'
data = np.load(file_path)
train_set = data['train']
valid_set = data['valid']
test_set = data['test']
train_set_idx = 200
stroke = train_set[train_set_idx]
# display the original image for comparison
draw_strokes(stroke,factor=1, svg_filename=img_default_dir)
# generate data point coordinates based on stroke sequences
arr= generate_np_arr(stroke, size_bound=600)
row1 = arr[0,:]
row1 = row1.reshape(1,3)
row1[:,2]=1
print row1
arr=np.append(arr,row1,axis=0)
print arr.shape
# intepolate data points
new_data = add_data_point(arr)
# display image with new data points
for i in range(0, len(new_data)-1):
if new_data[i,2] ==0:
plt.plot(new_data[i:i+2,0],new_data[i:i+2,1])
plt.show()
# save data points to file
save_file_name = 'cat.csv'
np.savetxt(save_file_name, new_data, delimiter=",")
| {"/onv_classification.py": ["/nn.py", "/onv_process.py"], "/classification.py": ["/nn.py"], "/mnist_classification.py": ["/nn.py"], "/temp.py": ["/onv_process.py"]} |
52,454 | qiuyingyue/ONV2SEQ | refs/heads/master | /onv_to_binary.py | #onv_to_binary.py
import os
import numpy as np
from onv_process import show_onv
data_path = "../data/onv_9936_thick"
output_path = "../data/onv_9936_thick_binary"
def convert_to_binary(onv_batch):
print onv_batch.shape
#uni,count = np.unique(onv_batch, return_counts=True)
#print (uni, count)
#show_onv(onv_batch[0])
onv_batch[onv_batch >= 127] = 255
onv_batch[onv_batch < 127] = 0
uni,count = np.unique(onv_batch, return_counts=True)
print (uni, count)
#show_onv(onv_batch[0])
return onv_batch
if __name__ == "__main__":
for f in os.listdir(data_path):
onvs = np.load(os.path.join(data_path, f))
train_onvs = onvs['train']
valid_onvs = onvs['valid']
test_onvs = onvs['test']
train_onvs = convert_to_binary(train_onvs)
valid_onvs = convert_to_binary(valid_onvs)
test_onvs = convert_to_binary(test_onvs)
outfile = os.path.join(output_path, f)
if (not os.path.exists(output_path)):
os.makedirs(output_path)
np.savez(outfile, train=train_onvs, valid = valid_onvs, test = test_onvs)
| {"/onv_classification.py": ["/nn.py", "/onv_process.py"], "/classification.py": ["/nn.py"], "/mnist_classification.py": ["/nn.py"], "/temp.py": ["/onv_process.py"]} |
52,455 | qiuyingyue/ONV2SEQ | refs/heads/master | /temp.py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import sys
sys.path.append("/usr/local/lib/python2.7/dist-packages")
#sys.path.append("/home/tao/miniconda2/lib/python2.7/site-packages")
#sys.path.append("./code/")
# import the required libraries
import numpy as np
import time
import random
import cPickle
import codecs
import collections
import os
import math
import json
import tensorflow as tf
from six.moves import xrange
# In[1]
"""Blocks in use: from [1] to [20]:
need modifications to test different models: In[7] [10] [11] [20]
"""
# In[2]:
# libraries required for visualisation:
from IPython.display import SVG, display
import PIL
from PIL import Image
#from matplotlib import pyplot as plt
#import matplotlib.pyplot as plt
# set numpy output to something sensible
np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True)
# In[3]:
get_ipython().system(u'pip install -qU svgwrite')
# In[4]:
import svgwrite # conda install -c omnia svgwrite=1.1.6
# In[5]:
tf.logging.info("TensorFlow Version: %s", tf.__version__)
# In[6]:
#!pip install -q magenta
# In[7]:
# import our command line tools
# (1) for testing sketch-rnn
'''from sketch_rnn_train import *
from model import *'''
# (2) for testing sketch-pix2seq
from sketch_rnn_train_image import *
from model_cnn_encoder import *
'''# (3) for testing sketch-onv2seq
from sketch_rnn_train_onv import *
from model_dnn_encoder import *
'''
from utils import *
from rnn import *
# In[8]:
def draw_helper(data, factor, min_x, max_x, min_y, max_y, svg_filename, padding=50):
diff_x = max_x - min_x
diff_y = max_y - min_y
size = max(diff_x, diff_y) + padding
dims = (size, size)
padding_x = size - diff_x
padding_y = size - diff_y
#print (dims, diff_x, diff_y, padding_x, padding_y)
dwg = svgwrite.Drawing(svg_filename, size=dims)
dwg.add(dwg.rect(insert=(0, 0), size=dims,fill='white'))
lift_pen = 1
abs_x = padding_x/2 - min_x
abs_y = padding_y/2 - min_y
p = "M%s,%s " % (abs_x, abs_y)
command = "m"
for i in xrange(len(data)):
if (lift_pen == 1):
command = "m"
elif (command != "l"):
command = "l"
else:
command = ""
x = float(data[i,0])/factor
y = float(data[i,1])/factor
lift_pen = data[i, 2]
p += command+str(x)+","+str(y)+" "
the_color = "black"
stroke_width = 1
dwg.add(dwg.path(p).stroke(the_color,stroke_width).fill("none"))
dwg.save()
#display(SVG(dwg.tostring()))
def draw_strokes_sequence(data, factor, svg_prefix):
tf.gfile.MakeDirs(svg_prefix)
print (data.shape)
min_x, max_x, min_y, max_y = get_bounds(data, factor)
for i in reversed(range(data.shape[0])):
draw_helper(data[0:i], factor, min_x, max_x, min_y, max_y, os.path.join(svg_prefix,str(i)+'.svg'))
# In[9]:
# little function that displays vector images and saves them to .svg
def draw_strokes(data, factor=0.2, svg_filename = '/tmp/sketch_rnn/svg/sample.svg', padding=50):
tf.gfile.MakeDirs(os.path.dirname(svg_filename))
min_x, max_x, min_y, max_y = get_bounds(data, factor)
'''dims = (50 + max_x - min_x, 50 + max_y - min_y)
dwg = svgwrite.Drawing(svg_filename, size=dims)
dwg.add(dwg.rect(insert=(0, 0), size=dims,fill='white'))
lift_pen = 1
abs_x = 25 - min_x
abs_y = 25 - min_y'''
diff_x = max_x - min_x
diff_y = max_y - min_y
size = max(diff_x, diff_y) + padding
dims = (size, size)
padding_x = size - diff_x
padding_y = size - diff_y
#print (dims, diff_x, diff_y, padding_x, padding_y)
dwg = svgwrite.Drawing(svg_filename, size=dims)
dwg.add(dwg.rect(insert=(0, 0), size=dims,fill='white'))
lift_pen = 1
abs_x = padding_x/2 - min_x
abs_y = padding_y/2 - min_y
p = "M%s,%s " % (abs_x, abs_y)
command = "m"
for i in xrange(len(data)):
if (lift_pen == 1):
command = "m"
elif (command != "l"):
command = "l"
else:
command = ""
x = float(data[i,0])/factor
y = float(data[i,1])/factor
lift_pen = data[i, 2]
p += command+str(x)+","+str(y)+" "
the_color = "black"
stroke_width = 2
dwg.add(dwg.path(p).stroke(the_color,stroke_width).fill("none"))
dwg.save()
#display(SVG(dwg.tostring()))
# generate a 2D grid of many vector drawings
def make_grid_svg(s_list, grid_space=10.0, grid_space_x=16.0):
def get_start_and_end(x):
x = np.array(x)
x = x[:, 0:2]
x_start = x[0]
x_end = x.sum(axis=0)
x = x.cumsum(axis=0)
x_max = x.max(axis=0)
x_min = x.min(axis=0)
center_loc = (x_max+x_min)*0.5
return x_start-center_loc, x_end
x_pos = 0.0
y_pos = 0.0
result = [[x_pos, y_pos, 1]]
for sample in s_list:
s = sample[0]
grid_loc = sample[1]
grid_y = grid_loc[0]*grid_space+grid_space*0.5
grid_x = grid_loc[1]*grid_space_x+grid_space_x*0.5
start_loc, delta_pos = get_start_and_end(s)
loc_x = start_loc[0]
loc_y = start_loc[1]
new_x_pos = grid_x+loc_x
new_y_pos = grid_y+loc_y
result.append([new_x_pos-x_pos, new_y_pos-y_pos, 0])
result += s.tolist()
result[-1][2] = 1
x_pos = new_x_pos+delta_pos[0]
y_pos = new_y_pos+delta_pos[1]
return np.array(result)
# define the path of the model you want to load, and also the path of the dataset
# In[10]:
## change data directory for testing
data_dir = '../data'#add '/sketch' for sketch-rnn model and remove '/sketch' for sketch-pix2seq and sketch-onv2seq
models_root_dir = '../backup_models'
model_dir = os.path.join(models_root_dir, 'cnn_encoder_5classes')#'dnn_encoder_5classes_pretrainedcnn_binocular')#cat_bus_rnn_encoder_pretrained/
##cat_bus_cnn_encoder_lr0.001_bs400_64*64
# In[11]:
## change returned data
# (1) testing sketch-rnn
# [train_set, valid_set, test_set, hps_model, eval_hps_model, sample_hps_model] = load_env(data_dir, model_dir)
# (2) testing sketch-pix2seq
[train_set, valid_set, test_set, hps_model, eval_hps_model, sample_hps_model, train_images, valid_images, test_images] = load_env(data_dir, model_dir)
# (3) testing sketch-onv2seq
#[train_set, valid_set, test_set, hps_model, eval_hps_model, sample_hps_model,
# train_onvs_left, valid_onvs_left, test_onvs_left, train_onvs_right, valid_onvs_right, test_onvs_right] = load_env(data_dir, model_dir)
# In[12]:
download_pretrained_models(models_root_dir=models_root_dir)
# In[13]:
# construct the sketch-rnn model here:
reset_graph()
model = Model(hps_model)
eval_model = Model(eval_hps_model, reuse=True)
sample_model = Model(sample_hps_model, reuse=True)
# In[14]:
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# In[15]:
# loads the weights from checkpoint into our model
load_checkpoint(sess, model_dir)
# We define two convenience functions to encode a stroke into a latent vector, and decode from latent vector to stroke.
# In[16]:
def encode(input_strokes):
with tf.gfile.Open(os.path.join(model_dir, 'model_config.json'), 'r') as f:
model_param = json.load(f)
max_seq_len = int(model_param['max_seq_len'])
strokes = to_big_strokes(input_strokes, max_seq_len).tolist()
strokes.insert(0, [0, 0, 1, 0, 0])
seq_len = [len(input_strokes)]
draw_strokes(to_normal_strokes(np.array(strokes)))
return sess.run(eval_model.batch_z, feed_dict={eval_model.input_data: [strokes], eval_model.sequence_lengths: seq_len})[0]
# In[17]:
def encode_image(image):
with tf.gfile.Open(os.path.join(model_dir, 'model_config.json'), 'r') as f:
model_param = json.load(f)
return sess.run(eval_model.batch_z, feed_dict={eval_model.img_data: [image]})[0]
# In[18]:
def encode_onv(onv):
with tf.gfile.Open(os.path.join(model_dir, 'model_config.json'), 'r') as f:
model_param = json.load(f)
return sess.run(eval_model.batch_z, feed_dict={eval_model.onv_data: [onv] })[0]
def encode_binocular_onv(onv_left, onv_right):
with tf.gfile.Open(os.path.join(model_dir, 'model_config.json'), 'r') as f:
model_param = json.load(f)
return sess.run(eval_model.batch_z, feed_dict={eval_model.onv_data_left: [onv_left],eval_model.onv_data_right: [onv_right] })[0]
# In[19]:
def decode(z_input=None, draw_mode=True, temperature=0.1, factor=0.05, modelname='test', filename='test.svg'):
z = None
if z_input is not None:
z = [z_input]
#print(type(sample), sample)
sample_strokes, m = sample(sess, sample_model, seq_len=eval_model.hps.max_seq_len, temperature=temperature, z=z)
strokes = to_normal_strokes(sample_strokes)
if draw_mode:
draw_strokes(strokes, factor, os.path.join('../display_svg/', modelname, filename))
return strokes
# Let's try to encode the sample stroke into latent vector $z$
# In[20]:
# test single image (1) (2)
# (1) testing for sketch-rnn sequence
'''stroke = test_set.random_sample()
draw_strokes(stroke)
print (stroke.shape)
z = encode(stroke)
'''
# (2) testing for sketch-pix2seq sequence
'''sample_image = np.copy(random.choice(test_images))
display(Image.fromarray(sample_image))
sample_image = np.resize(sample_image,(64,64,1))
z = encode_image(sample_image)
'''
from onv_process import show_onv
indices = np.array([0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95])
test_indices =list(indices) + list(indices+2500) + list(indices+5000) + list(indices+7500)+ list(indices+10000)
print (test_indices)
# test batch images (1) (2)
for index in test_indices:
# save .svg of original sketch
#draw_strokes(test_set.strokes[index], 0.02, '../display_svg/original/'+str(index)+'.svg')
# (1) testing for sketch-rnn
'''stroke = test_set.strokes[index]
z = encode(stroke)'''
# (2) testing for sketch-pix2seq
sample_image = np.copy(test_images[index])
display(Image.fromarray(sample_image))
sample_image = np.resize(sample_image,(64,64,1))
z = encode_image(sample_image)
# (3) testing for sketch-onv2seq
'''sample_onv_left = np.copy(test_onvs_left[index])
sample_onv_right = np.copy(test_onvs_right[index])
show_onv(sample_onv_left)
show_onv(sample_onv_right, '/home/qyy/workspace/display_image/onv_right/'+str(index)+'.png')
z = encode_binocular_onv(sample_onv_left, sample_onv_right)'''
decoded_stroke = decode(z, temperature=0.2, modelname='cnn_encoder_5classes_0.2', filename=str(index)+'.svg')
# draw image sequences
#draw_strokes_sequence(decoded_stroke, factor=0.02, svg_prefix='/home/qyy/workspace/test/image_sequence_0.02/'+str(index))
print ("Decoding finished")
| {"/onv_classification.py": ["/nn.py", "/onv_process.py"], "/classification.py": ["/nn.py"], "/mnist_classification.py": ["/nn.py"], "/temp.py": ["/onv_process.py"]} |
52,456 | MexTermin/damesgame2 | refs/heads/master | /exceptions.py | class InvalidMove(Exception):
pass
class InvalidTab(Exception):
pass
class InvalidRange(Exception):
pass
class IncorrectInt(Exception):
pass
| {"/modulotabs.py": ["/exceptions.py"], "/mainGame.py": ["/modulotabs.py", "/exceptions.py"]} |
52,457 | MexTermin/damesgame2 | refs/heads/master | /modulotabs.py | from exceptions import *
class Tab:
def __init__(self):
self.pos = []
self.symbol = ""
self.isDame = False
def counter(self,player):
if player.lower() == "b":
return "n"
elif player.lower()== "n":
return "b"
def dameValidation(self,matrice,y,x,step):
"""[summary]
Arguments:
matrice {[list]} -- [is a matrice where are all tokens object]
y {[type]} -- [the position y of the tab]
x {[type]} -- [the position x of the tab]
step {[type]} -- [how pass how many steps should you move be move]
Returns:
[bool] -- [will return if True or False if the chip can move]
"""
by = y
bx = x
for i in range(1,step+1):
y*=i
x*=i
if (self.pos[0] + y) < 8 and ( self.pos[0] + y) > 0 and (self.pos[1] + x ) < 8 and (self.pos[1] + x) > 0:
if matrice[self.pos[0]+y][self.pos[1]+x] != [] :
if matrice[self.pos[0]+by*(i+1)][self.pos[1]+bx*(i+1)] != [] or matrice[self.pos[0]+y][self.pos[1]+x].symbol.lower() == self.symbol.lower():
return False
y=by
x=bx
return True
def mRightUp(self,matrice,step):
x,y = step,step
if step==1:
if matrice[self.pos[0]-y][self.pos[1]+x] == [] and self.pos[0] >1 and self.pos[1]<8:
matrice[self.pos[0]-y][self.pos[1]+x] = matrice[self.pos[0]][self.pos[1]]
else:
raise InvalidMove("you cant move here")
else:
if self.dameValidation(matrice,-1,1,step):
matrice[self.pos[0]-y][self.pos[1]+x] = matrice[self.pos[0]][self.pos[1]]
else:
raise InvalidMove("you cant move here")
matrice[self.pos[0]][self.pos[1]] = []
self.pos[0] = self.pos[0] - y
self.pos[1] = self.pos[1] + x
def mLeftUp(self,matrice,step):
x,y = step,step
if step == 1:
if matrice[self.pos[0]-y][self.pos[1]-x] == [] and self.pos[0] >1 and self.pos[1]>1:
matrice[self.pos[0]-y][self.pos[1]-x] = matrice[self.pos[0]][self.pos[1]]
else:
raise InvalidMove("you cant move here")
else:
if self.dameValidation(matrice,-1,-1,step):
matrice[self.pos[0]-y][self.pos[1]-x] = matrice[self.pos[0]][self.pos[1]]
else:
raise InvalidMove("you cant move here")
matrice[self.pos[0]][self.pos[1]] = []
self.pos[0] = self.pos[0] - y
self.pos[1] = self.pos[1] - x
def mLeftDown(self,matrice,step):
x,y = step,step
if step==1:
if matrice[self.pos[0]+y][self.pos[1]-x] == [] and self.pos[0] <8 and self.pos[1]>1:
matrice[self.pos[0]+y][self.pos[1]-x] = matrice[self.pos[0]][self.pos[1]]
else:
raise InvalidMove("you cant move here")
else:
if self.dameValidation(matrice,1,-1,step):
matrice[self.pos[0]+y][self.pos[1]-x] = matrice[self.pos[0]][self.pos[1]]
else:
raise InvalidMove("you cant move here")
matrice[self.pos[0]][self.pos[1]] = []
self.pos[0] = self.pos[0] + y
self.pos[1] = self.pos[1] - x
def mRightDown(self,matrice,step):
x,y = step,step
if step==1:
if matrice[self.pos[0]+y][self.pos[1]+x] == [] and self.pos[0] <8 and self.pos[1]<8:
matrice[self.pos[0]+y][self.pos[1]+x] = matrice[self.pos[0]][self.pos[1]]
else:
raise InvalidMove("you cant move here")
else:
if self.dameValidation(matrice,1,1,step):
matrice[self.pos[0]+y][self.pos[1]+x] = matrice[self.pos[0]][self.pos[1]]
else:
raise InvalidMove("you cant move here")
matrice[self.pos[0]][self.pos[1]] = []
self.pos[0] = self.pos[0] + y
self.pos[1] = self.pos[1] + x
def move(self,direction,turn,matrice):
"""[summary]
Arguments:
direction {[string]} -- [it must be the address in which the card is moved]
turn {[string]} -- [It is the symbol that represents each team]
matrice {[list]} -- [it is the matrix where all the cards are]
Raises:
InvalidRange: [an exception will return if you enter a number that goes outside the range of the matrix]
InvalidMove: [an exception will return if you choose an incorrect destination]
Returns:
[string] -- [return the next player turn]
"""
if direction.upper() == "RU":
if (turn == "n" and self.symbol =="n") :
self.mRightUp(matrice,1)
return self.counter(turn)
elif self.isDame == True and self.symbol.lower() == turn.lower():
answer = int(input("please enter the box number to be moved: "))
if answer < 1 or answer >8:
raise InvalidRange("please write a number in the range from 1 to 7")
self.mRightUp(matrice,answer)
return self.counter(turn)
else:
if self.symbol == turn.lower():
raise InvalidMove("you can't move here")
else:
raise InvalidMove("you can't move this tab")
#-----------------------------------------------------------------------------------------------
if direction.upper() == "LU":
if (turn == "n" and self.symbol =="n") :
self.mLeftUp(matrice,1)
return self.counter(turn)
elif self.isDame == True and self.symbol.lower() == turn.lower():
answer = int(input("please enter the box number to be moved: "))
self.mLeftUp(matrice,answer)
return self.counter(turn)
else:
if self.symbol == turn.lower():
raise InvalidMove("you can't move here")
else:
raise InvalidMove("you can't move this tab")
#-----------------------------------------------------------------------------------------------
if direction.upper() == "LD":
if (turn == "b" and self.symbol =="b") :
self.mLeftDown(matrice,1)
return self.counter(turn)
elif self.isDame == True and self.symbol.lower() == turn.lower():
answer = int(input("please enter the box number to be moved: "))
self.mLeftDown(matrice,answer)
return self.counter(turn)
else:
if self.symbol == turn.lower():
raise InvalidMove("you can't move here")
else:
raise InvalidMove("you can't move this tab")
#-----------------------------------------------------------------------------------------------
if direction.upper() == "RD":
if (turn == "b" and self.symbol =="b") :
self.mRightDown(matrice,1)
return self.counter(turn)
elif self.isDame == True and self.symbol.lower() == turn.lower():
answer = int(input("please enter the box number to be moved: "))
self.mRightDown(matrice,answer)
return self.counter(turn)
else:
if self.symbol == turn.lower():
raise InvalidMove("you can't move here")
else:
raise InvalidMove("you can't move this tab")
#-----------------------------------------------------------------------------------------------
def target(self,matrice,player):
"""[summary]
Arguments:
matrice {list} -- [receive an array where the location of all the chips is]
player {string} -- [receive the player symbol that is n or b]
returns:
[tuple] -- [returns a tuple with the card (only single tabs) he can eat, the address, and in how many boxes is the enemy to eat ]
"""
targets = ()
if matrice[self.pos[0]] [self.pos[1]] != []:
if self.pos[0] < 7 and self.pos[1] < 7:
#----------------------------------------------------------------------------------------------------------------
if matrice[self.pos[0]+1] [self.pos[1]+1] != []:
if matrice[self.pos[0]+1][self.pos[1]+1].symbol.lower() == self.counter(player) and self.isDame == False:
if matrice[self.pos[0]+2][self.pos[1]+2] == []:
targets = (self.pos,"RD")
if self.pos[0] >2 and self.pos[1] < 7 :
#----------------------------------------------------------------------------------------------------------------
if matrice[self.pos[0]-1][self.pos[1]+1] != []:
if matrice[self.pos[0]-1][self.pos[1]+1].symbol.lower() == self.counter(player) and self.isDame == False:
if matrice[self.pos[0]-2][self.pos[1]+2] == []:
targets = (self.pos,"RU")
if self.pos[0] > 2 and self.pos[1] > 2 :
#----------------------------------------------------------------------------------------------------------------
if matrice[self.pos[0]-1][self.pos[1]-1] != []:
if matrice[self.pos[0]-1][self.pos[1]-1].symbol.lower() == self.counter(player) and self.isDame == False:
if matrice[self.pos[0]-2][self.pos[1]-2] == []:
targets = (self.pos,"LU")
if self.pos[0] < 7 and self.pos[1] > 2 :
#----------------------------------------------------------------------------------------------------------------
if matrice[self.pos[0]+1][self.pos[1]-1] != []:
if matrice[self.pos[0]+1][self.pos[1]-1].symbol.lower() == self.counter(player ) and self.isDame == False:
if matrice[self.pos[0]+2][self.pos[1]-2] == []:
targets = (self.pos,"LD")
return targets
def eat(self,direction,matrice,point,*args):
"""[summary]
this function make all way of eat for the simple tokens and the dames
Arguments:
direction {[string]} -- [must have as input the address in which the file must eat (ru, rd, lu, ld)]
matrice {[list]} -- [it must be the matrix where all the object tabs are]
point {[int]} -- [the points that each team carries]
Raises:
Exception: [generates an exception if one of the validated entries is incorrect]
Returns:
[list,string,int,bool] -- [the matrix returns with the modified elements, the player that corresponds
to the next turn, the points it has and if there is an error or not]
"""
validation = True
if len(args) > 0 :
try:
answer = int(input("Write the final box after eating: "))
if answer == 0:
raise Exception
except:
input("Type a correcty end pos ")
return matrice,self.symbol.lower(),point,False
if direction.upper() == "RU" :
if len(args)==0:
y1,x1,y2,x2 = -1,1,-2,2
else:
y1,x1,y2,x2 = -args[0], args[0], -args[0]-answer, args[0]+answer
ty, tx = self.pos[0]+y2, self.pos[1]+x2
if ty > 0 and tx < 9 :
validation = False if (matrice[ty][tx] != []) else True
else:
validation = False
elif direction.upper() == "LU" :
if len(args)==0:
y1,x1,y2,x2 = -1,-1,-2,-2
else:
y1,x1,y2,x2 = -args[0], -args[0], -args[0]-answer, -args[0]-answer
ty, tx = self.pos[0]+y2, self.pos[1]+x2
if ty > 0 and tx > 0 :
validation = False if (matrice[ty][tx] != []) else True
else:
validation = False
elif direction.upper() == "RD" :
if len(args)==0:
y1,x1,y2,x2 = 1,1,2,2
else:
y1,x1,y2,x2 = args[0] ,args[0], args[0]+answer, args[0]+answer
ty, tx = self.pos[0]+y2, self.pos[1]+x2
if ty < 9 and tx < 9:
validation = False if (matrice[ty][tx] != []) else True
else:
validation = False
elif direction.upper() == "LD" :
if len(args)==0:
y1,x1,y2,x2 = 1,-1,2,-2
else:
y1,x1,y2,x2 = args[0], -args[0], args[0]+answer, -args[0]-answer
ty, tx = self.pos[0]+y2, self.pos[1]+x2
if ty < 9 and tx > 0 :
validation = False if (matrice[ty][tx] != []) else True
else:
validation = False
if validation == False:
input("you can move here")
return matrice,self.symbol.lower(),point,False
#---------------- Make the eat----------------
matrice[self.pos[0]+y2] [self.pos[1]+x2] = matrice[self.pos[0]] [self.pos[1]]
matrice[self.pos[0]] [self.pos[1]] = []
matrice[self.pos[0]+y1] [self.pos[1]+x1] = []
self.pos[0] = self.pos[0] + y2
self.pos[1] = self.pos[1] + x2
point += 5
if self.isDame == False :
targt = self.target(matrice,self.symbol)
else:
targt = self.targetDame(matrice,self.symbol.lower())
if len(targt)>0:
for element in targt:
if len(element) ==2:
if element[0] == self.pos[0] and element[1] == self.pos[1] :
return self.eat(targt[1],matrice,point)
else:
if element[0] == self.pos[0] and element[1] == self.pos[1] :
return self.eat(targt[1],matrice,point,targt[0][2])
else:
return matrice,self.counter(self.symbol),point,True
def targetDame(self,matrice,player):
"""[summary]
Arguments:
matrice {list} -- [receive an array where the location of all the chips is]
player {string} -- [receive the player symbol that is n or b]
returns:
[tuple] -- [returns a tuple with the card(only dames tabs) he can eat, the address, and in how many boxes is the enemy to eat ]
"""
targets, y, x = [], self.pos[0], self.pos[1]
for new in range(1,7):
if matrice[self.pos[0]] [self.pos[1]] != []:
#----------------------------------------------------------------------------------------------------------------
if (y + new) < 8 and (x + new) < 8:
if matrice[y + new] [x + new] != []:
if matrice[y + new][x + new].symbol.lower() == self.counter(player) :
if matrice[y + new + 1][x + new + 1] == []:
if self.dameValidation(matrice,1,1,new) :
return (self.pos,"RD",new)
#----------------------------------------------------------------------------------------------------------------
if ( y - new )> 1 and (x + new) < 8 :
if matrice[y - new][x + new] != []:
if matrice[y-new][x + new].symbol.lower() == self.counter(player) :
if matrice[y - new - 1][x + new + 1] == []:
if self.dameValidation(matrice,-1,1,new) :
return (self.pos,"RU",new)
#----------------------------------------------------------------------------------------------------------------
if (y - new) > 1 and (x - new ) > 1 :
if matrice[y - new][x - new] != []:
if matrice[y - new][x - new].symbol.lower() == self.counter(player) :
if matrice[y - new - 1][x - new - 1] == []:
if self.dameValidation(matrice,-1,-1,new) :
return (self.pos,"LU",new)
#----------------------------------------------------------------------------------------------------------------
if (y + new) < 8 and (x-new )> 1 :
if matrice[y + new][x - new] != []:
if matrice[y + new][ x- new].symbol.lower() == self.counter(player ):
if matrice[y + new + 1][x - new - 1] == []:
if self.dameValidation(matrice,1,-1,new) :
return (self.pos,"LD",new)
return targets
| {"/modulotabs.py": ["/exceptions.py"], "/mainGame.py": ["/modulotabs.py", "/exceptions.py"]} |
52,458 | MexTermin/damesgame2 | refs/heads/master | /mainGame.py | from board import *
from modulotabs import *
from exceptions import *
import sys, os
tablet = Board()
tablet.makeMatriz()
# g = Tab()
# g.symbol="n"
tablet.teamsGenerate("b", 1, 1)
tablet.teamsGenerate("n", 2, 6)
# f = Tab()
# f.symbol = "N"
# f.isDame = True
# tablet.matrice[1][7] = f
# tablet.matrice[1][7].pos = [1,7]
# #------------------------------------
# f = Tab()
# f.symbol = "b"
# tablet.matrice[3][5] = f
# tablet.matrice[3][5].pos = [3,5]
# f = Tab()
# f.symbol = "b"
# tablet.matrice[5][3] = f
# tablet.matrice[5][3].pos = [5,3]
# f = Tab()
# f.symbol = "b"
# tablet.matrice[7][3] = f
# tablet.matrice[7][3].pos = [7,3]
# f = Tab()
# f.symbol = "b"
# tablet.matrice[8][7] = f
# tablet.matrice[8][7].pos = [8,7]
tablet
def restart_program():
"""Restarts the current program.
Note: this function does not return. Any cleanup action (like
saving data) must be done before calling this function."""
python = sys.executable
os.execl(python, python, * sys.argv)
def start():
tablet.clearWindows()
if len(tablet.position(tablet.matrice)["n"]) == 0 and len(tablet.position(tablet.matrice)["N"]) == 0:
tablet.clearWindows()
input(" Congratulation player *B* you have won the game ")
return
if len(tablet.position(tablet.matrice)["b"]) == 0 and len(tablet.position(tablet.matrice)["B"]) == 0:
tablet.clearWindows()
input(" Congratulation player *N* you have won the game ")
return
if tablet.turn == "n":
poin = tablet.pteam1
else:
poin = tablet.pteam2
print("Is turn of the team -", tablet.turn, "- you have {} points".format(poin))
print(tablet.view())
# -------------------------------------------------------------------------------
searching = ["n", "N"] if (tablet.turn == "n") else ["b", "B"]
pos = tablet.position(tablet.matrice)
killer = []
# -------------------------------------------------make dame-----------------------------------------------
tablet.makeDame()
# ------------------------------------------------Multi-target------------------------------------------------
for element in searching:
for tabs in pos[element]:
if element.isupper():
if len(tabs.targetDame(tablet.matrice, tablet.turn)) > 0:
killer.append(tabs.targetDame(tablet.matrice, tablet.turn))
elif len(tabs.target(tablet.matrice, tablet.turn)) > 0:
killer.append(tabs.target(tablet.matrice, tablet.turn))
# ----------------------------------------------Single eat(obligatory)-----------------------------1------------------------------
if len(killer) == 1:
input("You should eat whit the tab, " + str(killer[0]))
if len(killer[0]) == 3:
tablet.matrice, tablet.turn, poin,verification = tablet.matrice[killer[0][0][0]][killer[0][0][1]].eat(
killer[0][1], tablet.matrice, poin, killer[0][2])
if verification == False:
start()
else:
tablet.matrice, tablet.turn, poin,verification = tablet.matrice[killer[0][0][0]][killer[0][0][1]].eat(
killer[0][1], tablet.matrice, poin)
if verification == False:
start()
if tablet.turn == "n":
tablet.pteam2 = poin
else:
tablet.pteam1 = poin
tablet.makeDame()
start()
# ---------------------------------------------------------MultiEat---------------------------------------------------------------------
elif len(killer) > 1:
for i in range(len(killer)):
view = killer[i]
view[0][1] = tablet.translate(view[0][1])
print("*"+str(i)+"*:"+str(view)+" ", end="")
view[0][1] = tablet.translate(view[0][1])
try:
answer = int(input(" Select one target to eat: "))
if answer < 0 or answer > len(killer):
raise Exception
except:
tablet.clearWindows()
input("You should type a number between 0 and {}: ".format( len(killer)-1 ))
start()
if len(killer[answer])==3:
tablet.matrice, tablet.turn, poin,verification = tablet.matrice[killer[answer][0][0]][killer[answer][0][1]].eat(
killer[answer][1], tablet.matrice, poin, killer[answer][2])
if verification == False:
start()
else:
tablet.matrice, tablet.turn, poin,verification = tablet.matrice[killer[answer][0][0]][killer[answer][0][1]].eat(
killer[answer][1], tablet.matrice, poin)
if verification == False:
start()
# --addated point to each team--
if tablet.turn == "n":
tablet.pteam2 = poin
else:
tablet.pteam1 = poin
start()
# ------------------------------------------make the move--------------------------------------------------------------------------
types = input(
"Type de tab and its directions, example '3A RU' ").split(" ")
try:
# ----------------------------------Controller the steps------------------------------------
tab = (int(types[0][0]), int(tablet.translate(types[0][1])))
direction = str(types[1])
if direction.upper() != "RD" and direction.upper() != "LD":
if direction.upper() != "LU" and direction.upper() != "RU":
tablet.clearWindows()
input("You should typing correctly direction")
start()
tablet.turn = tablet.matrice[tab[0]][tab[1]].move( direction, tablet.turn, tablet.matrice)
# ------------------------------------Controller the exceptions-----------------------------
except InvalidMove as e:
tablet.clearWindows()
input(str(e))
except InvalidTab as e:
tablet.clearWindows()
input(str(e))
except InvalidRange as e:
tablet.clearWindows()
input(str(e))
except:
tablet.clearWindows()
input("Type a correctly tab")
# # -------------- verify is there's a new dame---------
tablet.makeDame()
start()
start()
while True:
answer = input("Type 'Yes' if you wan play again, else type 'No': ")
if answer.lower() == "yes":
restart_program()
elif answer.lower() == "no":
exit()
| {"/modulotabs.py": ["/exceptions.py"], "/mainGame.py": ["/modulotabs.py", "/exceptions.py"]} |
52,459 | Air-Zhuang/yangzhuchang | refs/heads/master | /test.py | from test2 import A
# a=A()
# print(a.x)
# a=globals()["A"]()
# print(a.x)
print(eval("A"+".x")) | {"/app/api/v1/cate.py": ["/app/models/cate.py", "/app/models/slady.py"], "/app/api/v1/pic.py": ["/app/models/slady.py"]} |
52,460 | Air-Zhuang/yangzhuchang | refs/heads/master | /app/config/setting.py | DEBUG=True
ORI_URL_DOMAIN="http://tpzy5.com"
LOCAL_URL_DOMAIN="先用orl_url的" | {"/app/api/v1/cate.py": ["/app/models/cate.py", "/app/models/slady.py"], "/app/api/v1/pic.py": ["/app/models/slady.py"]} |
52,461 | Air-Zhuang/yangzhuchang | refs/heads/master | /app/api/v1/cate.py | from flask import jsonify
from app.libs.error_code import parameter_exception
from app.libs.redprint import Redprint
from app.models.cate import Cate
from app.validators.forms import CateListForm
from app.models.base import db
from app.models.aiyouwu import Aiyouwu
from app.models.bololi import Bololi
from app.models.girlt import Girlt
from app.models.legbaby import Legbaby
from app.models.missleg import Missleg
from app.models.slady import Slady
from app.models.tgod import Tgod
from app.models.toutiao import Toutiao
from app.models.tuigirl import Tuigirl
from app.models.ugirls import Ugirls
api=Redprint('cate')
@api.route('',methods=['GET'])
def get_cate():
resp={"value":[]}
cate=Cate.query.filter_by().all() #所有公司名
resp["value"] = [i.hide('id').cate_name for i in cate] #隐藏id列
return jsonify(resp)
@api.route('/list',methods=['GET'])
def get_cate_list():
resp = {"value": []}
form=CateListForm().validate_for_api() #form.kind.data为查询参数
with parameter_exception():
catelist = eval("db.session.query("+form.kind.data+".title).filter("+form.kind.data+".status == 1).distinct().all()")
resp["value"]=[i[0] for i in catelist] #多个list合并成一个list
return jsonify(resp)
| {"/app/api/v1/cate.py": ["/app/models/cate.py", "/app/models/slady.py"], "/app/api/v1/pic.py": ["/app/models/slady.py"]} |
52,462 | Air-Zhuang/yangzhuchang | refs/heads/master | /app/api/v1/pic.py | from flask import jsonify
from app.libs.redprint import Redprint
from app.validators.forms import GetPicForm
from app.libs.error_code import parameter_exception
from app.models.base import db
from app.models.aiyouwu import Aiyouwu
from app.models.bololi import Bololi
from app.models.girlt import Girlt
from app.models.legbaby import Legbaby
from app.models.missleg import Missleg
from app.models.slady import Slady
from app.models.tgod import Tgod
from app.models.toutiao import Toutiao
from app.models.tuigirl import Tuigirl
from app.models.ugirls import Ugirls
api=Redprint('pic')
@api.route('/<kind>',methods=['GET'])
def get_pic(kind):
resp = {"value": []}
form=GetPicForm().validate_for_api()
with parameter_exception():
piclist = eval(kind+".query.filter_by(title='" + form.title.data + "').all()")
resp["value"]=piclist
return jsonify(resp)
| {"/app/api/v1/cate.py": ["/app/models/cate.py", "/app/models/slady.py"], "/app/api/v1/pic.py": ["/app/models/slady.py"]} |
52,463 | Air-Zhuang/yangzhuchang | refs/heads/master | /app/api/v1/domain.py | from flask import jsonify,current_app
from app.libs.redprint import Redprint
api=Redprint('domain')
@api.route('',methods=['GET'])
def get_domain():
resp={"value":{}}
resp["value"]["ori_url"]=current_app.config["ORI_URL_DOMAIN"]
resp["value"]["local_url"]=current_app.config["LOCAL_URL_DOMAIN"]
return jsonify(resp)
| {"/app/api/v1/cate.py": ["/app/models/cate.py", "/app/models/slady.py"], "/app/api/v1/pic.py": ["/app/models/slady.py"]} |
52,464 | Air-Zhuang/yangzhuchang | refs/heads/master | /app/models/slady.py | from sqlalchemy import Column, String, Integer, orm
from app.models.base import Base
class Slady(Base):
id = Column(Integer, primary_key=True, autoincrement=True)
title = Column(String(255), nullable=False)
ori_url = Column(String(255))
local_url = Column(String(255))
@orm.reconstructor
def __init__(self):
self.fields = ['id', 'title', 'ori_url', 'local_url']
| {"/app/api/v1/cate.py": ["/app/models/cate.py", "/app/models/slady.py"], "/app/api/v1/pic.py": ["/app/models/slady.py"]} |
52,465 | Air-Zhuang/yangzhuchang | refs/heads/master | /app/models/cate.py | from sqlalchemy import Column, String, Integer, orm
from app.models.base import Base
class Cate(Base):
id = Column(Integer, primary_key=True)
cate_name=Column(String(255))
@orm.reconstructor
def __init__(self):
self.fields = ['id', 'cate_name']
| {"/app/api/v1/cate.py": ["/app/models/cate.py", "/app/models/slady.py"], "/app/api/v1/pic.py": ["/app/models/slady.py"]} |
52,466 | Air-Zhuang/yangzhuchang | refs/heads/master | /app/config/secure.py | SQLALCHEMY_DATABASE_URI='mysql+cymysql://root:123456@127.0.0.1:3306/yangzhuchang'
SECRET_KEY='xxxxxx'
TOKEN_EXPIRATION=30*24*3600 | {"/app/api/v1/cate.py": ["/app/models/cate.py", "/app/models/slady.py"], "/app/api/v1/pic.py": ["/app/models/slady.py"]} |
52,467 | Air-Zhuang/yangzhuchang | refs/heads/master | /app/api/v1/__init__.py | from flask import Blueprint
from app.api.v1 import user,client,token,cate,pic,domain
def create_blueprint_v1():
bp_v1=Blueprint('v1',__name__)
user.api.register(bp_v1) #参数url_prefix='/user'可写可不写
client.api.register(bp_v1)
token.api.register(bp_v1)
cate.api.register(bp_v1)
pic.api.register(bp_v1)
domain.api.register(bp_v1)
return bp_v1 | {"/app/api/v1/cate.py": ["/app/models/cate.py", "/app/models/slady.py"], "/app/api/v1/pic.py": ["/app/models/slady.py"]} |
52,492 | majgaard/DIANA-2.0 | refs/heads/master | /view.py | import numpy as np
import math
# View class
# Holds the current viewing parameters
# and can build a view transformation
# matrix [VTM] based on the parameters.
# Carl-Philip Majgaard
# CS 251
# Spring 2016
class View:
def __init__(self):
self.vrp = np.matrix([0.5, 0.5, 1])
self.vpn = np.matrix([0, 0, -1])
self.vup = np.matrix([0, 1, 0])
self.u = np.matrix([-1, 0, 0])
self.extent = np.matrix([1, 1, 1])
self.screen = np.matrix([600, 600])
self.offset = np.matrix([20, 20])
def build(self):
vtm = np.identity(4, float)
t1 = np.matrix( [[1, 0, 0, -self.vrp[0, 0]],
[0, 1, 0, -self.vrp[0, 1]],
[0, 0, 1, -self.vrp[0, 2]],
[0, 0, 0, 1] ] )
vtm = t1 * vtm
tu = np.cross(self.vup, self.vpn)
tvup = np.cross(self.vpn, tu)
tvpn = self.vpn.copy()
tu = self.normalize(tu)
tvup = self.normalize(tvup)
tvpn = self.normalize(tvpn)
self.vpn = tvpn.copy()
self.vup = tvup.copy()
self.u = tu.copy()
r1 = np.matrix( [[ tu[0, 0], tu[0, 1], tu[0, 2], 0.0 ],
[ tvup[0, 0], tvup[0, 1], tvup[0, 2], 0.0 ],
[ tvpn[0, 0], tvpn[0, 1], tvpn[0, 2], 0.0 ],
[ 0.0, 0.0, 0.0, 1.0 ]] )
vtm = r1 * vtm
t2 = np.matrix( [[1, 0, 0, 0.5 * self.extent[0,0]],
[0, 1, 0, 0.5 * self.extent[0,1]],
[0, 0, 1, 0],
[0, 0, 0, 1]] )
vtm = t2 * vtm
sx = (-self.screen[0,0])/self.extent[0,0]
sy = (-self.screen[0,1])/self.extent[0,1]
sz = (1.0 / self.extent[0,2])
s1 = np.matrix( [[sx, 0, 0, 0],
[0, sy, 0, 0],
[0, 0, sz, 0],
[0, 0, 0, 1]] )
vtm = s1 * vtm
t3 = np.matrix( [[1, 0, 0, self.screen[0,0] + self.offset[0,0]],
[0, 1, 0, self.screen[0,1] + self.offset[0,1]],
[0, 0, 1, 0],
[0, 0, 0, 1]] )
vtm = t3 * vtm
return vtm
def normalize(self, vector):
length = np.linalg.norm(vector)
vector = vector/length
return vector
def rotateVRC(self, vupa, ua):
point = self.vrp + self.vpn * self.extent[0,2]*0.5
t1 = np.matrix( [[1, 0, 0, -point[0,0]],
[0, 1, 0, -point[0,1]],
[0, 0, 1, -point[0,2]],
[0, 0, 0, 1]] )
rxyz = np.matrix( [[self.u[0,0], self.u[0,1], self.u[0,2], 0],
[self.vup[0,0], self.vup[0,1], self.vup[0,2], 0],
[self.vpn[0,0], self.vpn[0,1], self.vpn[0,2], 0],
[0,0,0,1]])
r1 = np.matrix( [[math.cos(vupa), 0, math.sin(vupa), 0],
[0,1,0,0],
[-math.sin(vupa), 0, math.cos(vupa), 0],
[0,0,0,1]])
r2 = np.matrix( [[1,0,0,0],
[0,math.cos(ua),-math.sin(ua),0],
[0, math.sin(ua), math.cos(ua), 0],
[0,0,0,1]])
t2 = np.matrix( [[1, 0, 0, point[0,0]],
[0, 1, 0, point[0,1]],
[0, 0, 1, point[0,2]],
[0, 0, 0, 1]] )
tvrc = np.matrix( [[self.vrp[0,0], self.vrp[0,1], self.vrp[0,2], 1],
[self.u[0,0],self.u[0,1],self.u[0,2],0],
[self.vup[0,0],self.vup[0,1],self.vup[0,2],0],
[self.vpn[0,0],self.vpn[0,1],self.vpn[0,2],0]])
tvrc = (t2*rxyz.T*r2*r1*rxyz*t1*tvrc.T).T
self.vrp = tvrc[0, range(0,3)]
self.u = tvrc[1, range(0,3)]
self.vup = tvrc[2, range(0,3)]
self.vpn = tvrc[3, range(0,3)]
self.vrp = self.vrp
self.u = self.normalize(self.u)
self.vup = self.normalize(self.vup)
self.vpn = self.normalize(self.vpn)
def clone(self):
v = View()
v.vrp = self.vrp.copy()
v.vpn = self.vpn.copy()
v.vup = self.vup.copy()
v.u = self.u.copy()
v.extent = self.extent.copy()
v.screen = self.screen.copy()
v.offset = self.offset.copy()
return v
if __name__ == "__main__":
v = View()
v.build()
| {"/analysis.py": ["/data.py", "/pcadata.py"]} |
52,493 | majgaard/DIANA-2.0 | refs/heads/master | /pcadata.py | import numpy as np
import copy
from data import Data
import csv
class PCAData(Data):
def __init__(self, ogDataHeaders, projectedData, eigenValues, eigenVectors, ogDataMeans):
Data.__init__(self)
self.eigenValues = eigenValues
self.eigenVectors = eigenVectors
self.meanDataValues = ogDataMeans
self.projectedHeaders = ogDataHeaders
self.matrix = projectedData
self.rawHeaders = ["P"+`i` for i in range(len(ogDataHeaders))]
self.rawTypes = ["numeric" for i in range(len(ogDataHeaders))]
for idx, i in enumerate(ogDataHeaders):
self.header2raw[i] = idx
self.rawPoints = projectedData.tolist()
self.rawPointsCopy = projectedData.tolist()
self.headersNumeric = ogDataHeaders
for idx, i in enumerate(self.rawHeaders):
self.header2matrix[i] = idx
print(self.projectedHeaders)
def get_eigenvalues(self):
return np.matrix(self.eigenValues.copy())
def get_eigenvectors(self):
return np.matrix(self.eigenVectors.copy())
def get_data_means(self):
return self.meanDataValues.copy()
def get_data_headers(self):
return copy.copy(self.projectedHeaders)
def toFile(self, filename="dataDump.csv"):
with open(filename, "wb") as csvfile:
cwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
cwriter.writerow(self.rawHeaders)
cwriter.writerow(self.rawTypes)
print self.rawTypes
print "yikes"
dat = self.getData(self.rawHeaders).tolist()
for row in dat:
cwriter.writerow(row)
| {"/analysis.py": ["/data.py", "/pcadata.py"]} |
52,494 | majgaard/DIANA-2.0 | refs/heads/master | /data.py | import csv
import os
import numpy as np
import copy
#import analysis as an
# Data API
# Carl-Philip Majgaard
# CS 251
# Spring 2016
class Data:
def __init__(self,filename = None):
#Set a bunch of fields
self.rawHeaders = []
self.rawTypes = []
self.header2raw = {}
self.rawPoints = []
self.rawPointsCopy = []
self.headersNumeric = []
self.matrix = np.matrix([])
self.header2matrix = {}
self.enumDict = {}
if filename != None:
self.read(filename)
# Read a file into our fields
def read(self, filename):
#Setting my working directory. Commented out for submission
#os.chdir("/Users/CarlPhilipMajgaard/Desktop/CS251/Project5/")
with open(filename, 'rU') as f: #Open file
reader = csv.reader(f)
for row in reader: #for each row
if "#" not in row[0]: #if not a comment
self.rawPoints.append(row) #add to raw points
self.rawHeaders = [i.strip() for i in list(self.rawPoints.pop(0))]#add headers by pop
self.rawTypes = [i.strip() for i in list(self.rawPoints.pop(0))] #add types by pop
for idx, row in enumerate(self.rawPoints):
for element in row:
if element == '-9999' or element == '38110':
print "Popping row: ", self.rawPoints.pop(idx)
break
for idx, header in enumerate(self.rawHeaders):
self.header2raw[header] = idx #fill dictionary
self.rawPointsCopy = copy.deepcopy(self.rawPoints)
for idx, header in enumerate(self.rawHeaders):
if self.rawTypes[idx].lower() == 'enum': #If enum
self.headersNumeric.append(header) #append the header
self.processEnum(idx, header) #call for enum processing
elif self.rawTypes[idx].lower() == 'numeric':
self.headersNumeric.append(header) #append the header
matrixList = [] #will be turned into matrix
for row in self.rawPointsCopy:
rowList = []
for header in self.headersNumeric: #add only numeric columns
rowList.append(float(row[self.header2raw[header]])) #add it
matrixList.append(rowList) #append row to list
self.matrix = np.matrix(matrixList) #make matrix
for idx, header in enumerate(self.headersNumeric): #make new dict
self.header2matrix[header] = idx #fill it
#Processes columns with enums
def processEnum(self, column, header):
self.enumDict[header] = {} #dict of dicts - for multiple enum cols
for row in self.rawPointsCopy:
if not self.enumDict[header].has_key(row[column]): #if new key
self.enumDict[header][row[column]] = len(self.enumDict[header]) #make the key
row[column] = self.enumDict[header][row[column]] #edit the cell
else:
row[column] = self.enumDict[header][row[column]] #edit the cell
## Accessors for matrix data ##
def getHeaders(self):
return self.headersNumeric
def getHeader(self, header):
return self.header2matrix[header]
def getNumColumns(self):
return len(self.getHeaders())
def getRow(self, row):
return self.matrix[row,:]
def getValue(self, column, row):
return self.matrix[row,self.header2matrix[column]]
def getData(self, columns, min=None, max=None): #gets range
colList = []
for col in columns:
if self.header2matrix.has_key(col):
colList.append(self.header2matrix[col])
if min != None and max != None:
step1 = self.matrix[range(min,max),:] #some matric work.
step2 = step1[:, colList] #must be in 2 steps, else numpy throws a fit
else:
step2 = self.matrix[:, colList]
return step2
## End accessors for matrix data ##
## Accessors for raw data ##
def getRawHeaders(self):
return self.rawHeaders
def getRawTypes(self):
return self.rawTypes
def getTypes(self, headers):
types = []
for head in headers:
for idx, header in enumerate(self.rawHeaders):
if head == header:
types.append(self.rawTypes[idx])
return types
def getRawNumColumns(self):
return len(self.rawPoints[0])
def getRawNumRows(self):
return len(self.rawPoints)
def getRawRow(self, row):
return self.rawPoints[row]
def getRawValue(self, column, row):
return self.rawPoints[row][self.header2raw[column]]
def textDump(self):
print(self.getRawHeaders())
print(self.getRawTypes())
for row in self.rawPoints:
print(row)
def toFile(self, filename="dataDump.csv", headers=None):
with open(filename, "wb") as csvfile:
cwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
if headers != None:
cwriter.writerow(headers)
cwriter.writerow(self.getTypes(headers))
dat = self.getData(headers).tolist()
for row in dat:
cwriter.writerow(row)
else:
cwriter.writerow(self.headersNumeric)
cwriter.writerow(self.getTypes(self.headersNumeric))
dat = self.getData(self.headersNumeric).tolist()
for row in dat:
cwriter.writerow(row)
def addColumn(self, header, data):
if data.shape[0] == self.matrix.shape[0]:
self.headersNumeric.append(header)
self.rawTypes.append("numeric")
self.rawHeaders.append(header)
self.header2matrix[header] = self.matrix.shape[1]
a = np.matrix(np.zeros((data.shape[0], self.matrix.shape[1]+1)))
a[:,-1] = data
a[:,:-1] = self.matrix
self.matrix = a
## End accessors for raw data ##
#testing with supplied testdata1.csv
if __name__ == "__main__":
d = Data("AustraliaCoast.csv")
codebook, codes, errors = an.kmeans(d, ["Latitude"], 6)
d.addColumn("ClusterIds", codes)
d.toFile()
#show that we can get a value
# print(d.getValue("thing2", 2))
#
# #show that we can get all data for certain columns
# print(d.getData(["thing2", "thing3"]))
#
# print("SEP")
# print(d.getData(["thing2", "thing3"], 1, 5))
| {"/analysis.py": ["/data.py", "/pcadata.py"]} |
52,495 | majgaard/DIANA-2.0 | refs/heads/master | /engine.py | # TK Skeleton
# Modified by Carl-Philip Majgaard
# CS 251
# Spring 2016
import Tkinter as tk
import tkFileDialog
import tkFont as tkf
import math
import subprocess
import random
from view import *
from data import *
import analysis
from scipy import stats
import copy
#Dialog Class to be inherited from
class Dialog(tk.Toplevel):
def __init__(self, parent, title = None):
tk.Toplevel.__init__(self, parent)
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = None
body = tk.Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
self.buttonbox()
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
parent.winfo_rooty()+50))
self.initial_focus.focus_set()
self.wait_window(self)
#
# construction hooks
def body(self, master):
# create dialog body. return widget that should have
# initial focus. this method should be overridden
pass
def buttonbox(self):
# add standard button box. override if you don't want the
# standard buttons
box = tk.Frame(self)
w = tk.Button(box, text="OK", width=10, command=self.ok, default=tk.ACTIVE)
w.pack(side=tk.LEFT, padx=5, pady=5)
w = tk.Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=tk.LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
#
# standard button semantics
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
self.withdraw()
self.update_idletasks()
self.apply()
print "Cancelling"
self.cancel()
print "Canceled"
def cancel(self, event=None):
print "Cancelling"
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
#
# command hooks
def validate(self):
return 1 # override
def apply(self):
pass # override
class LinRegDialog(Dialog):
def __init__(self, parent, data, title = 'Choose Variables'):
tk.Toplevel.__init__(self, parent)
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = None
body = tk.Frame(self)
self.initial_focus = self.body(body, data)
body.pack(padx=5, pady=5)
self.buttonbox()
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
parent.winfo_rooty()+50))
self.initial_focus.focus_set()
self.wait_window(self)
def body(self, master, data):
w = tk.Label(master, text="Independent Variable")
w.pack()
self.l0 = tk.Listbox(master, selectmode=tk.SINGLE, exportselection=0)
for item in data.headersNumeric:
self.l0.insert(tk.END, item)
self.l0.config(height=6)
self.l0.pack()
self.l0.selection_set(0)
self.l0.activate(0)
w = tk.Label(master, text="Dependent Variable")
w.pack()
self.l1 = tk.Listbox(master, selectmode=tk.SINGLE, exportselection=0)
for item in data.headersNumeric:
self.l1.insert(tk.END, item)
self.l1.config(height=6)
self.l1.pack()
self.l1.selection_set(0)
self.l1.activate(0)
def validate(self):
self.result = self.l0.get(tk.ACTIVE), self.l1.get(tk.ACTIVE)
return 1
class NameDialog(Dialog):
def __init__(self, parent, length, title = 'Name this Analysis'):
tk.Toplevel.__init__(self, parent)
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = None
body = tk.Frame(self)
self.initial_focus = self.body(body, length)
body.pack(padx=5, pady=5)
self.buttonbox()
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
parent.winfo_rooty()+50))
self.initial_focus.focus_set()
self.wait_window(self)
def body(self, master, length):
w = tk.Label(master, text="Name:")
w.pack()
self.name = tk.Entry(master)
self.name.pack()
string = "PCA %d" % (length)
self.name.insert(0, string)
def validate(self):
self.result = self.name.get()
if self.result != "":
return 1
else:
return 0
class ColDialog(Dialog):
def __init__(self, parent, headers, title = 'Choose Columns'):
self.headers = headers
Dialog.__init__(self, parent, title=title)
def body(self, master):
headers = self.headers
w = tk.Label(master, text="Choose Columns:")
w.pack()
self.colBox = tk.Listbox(master, selectmode=tk.EXTENDED)
self.colBox.pack()
for i in headers:
self.colBox.insert(tk.END, i)
self.var = tk.StringVar()
c = tk.Checkbutton(
master, text="Normalize", variable=self.var,
onvalue="True", offvalue="False"
)
c.pack()
c.select()
def validate(self):
self.result = (self.colBox.curselection(), self.var.get())
if len(self.result) > 0:
return 1
else:
return 0
class clusterDialog(Dialog):
def __init__(self, parent, headers, title = 'Choose Columns'):
self.headers = headers
Dialog.__init__(self, parent, title=title)
def body(self, master):
headers = self.headers
w = tk.Label(master, text="Choose Columns:")
w.pack()
self.colBox = tk.Listbox(master, selectmode=tk.EXTENDED)
self.colBox.pack()
w = tk.Label(master, text="Number of Clusters:")
w.pack()
self.clusterBox = tk.Entry(master)
self.clusterBox.pack()
for i in headers:
self.colBox.insert(tk.END, i)
def validate(self):
self.result = list(self.colBox.curselection())
self.clusters = self.clusterBox.get()
if len(self.result) > 0 and self.clusters.isdigit():
return 1
else:
return 0
class EigenDialog(Dialog):
def __init__(self, parent, array, title = 'Eigen Info'):
self.array = array
Dialog.__init__(self, parent, title=title)
def body(self, master):
array = self.array
for idx, i in enumerate(array):
for idx2, j in enumerate(i):
if type(j) is float:
e = tk.Label(master, text = "%.4f" % j)
else:
e = tk.Label(master, text = j)
e.grid(row=idx, column = idx2, sticky = tk.NSEW)
def cancel(self, event=None):
print "Cancelling"
# put focus back to the parent window
for i in self.winfo_children():
i.grid_forget()
self.parent.focus_set()
self.destroy()
# create a class to build and manage the display
class DisplayApp:
def __init__(self, width, height):
# create a tk object, which is the root window
self.root = tk.Tk()
# width and height of the window
self.initDx = width
self.initDy = height
self.dx = 2
self.size = None
self.color = None
#Discrete colors
self.colors = ["#ACFFCC","#FD1F74","#20FF57","#5F60E0","#BBF14E",
"#C64288","#73B302","#F98DD9","#BACC0F","#A4B9FE","#1CA03A","#FE6340",
"#27F1CF","#AD5407","#93CBF6","#578616","#E8CAFD","#87F898","#FA726D",
"#42E39C","#ED8798","#11885D","#F2B656","#4F73A4","#EAFDD4","#A05551",
"#CAD5F6","#47899A","#FBF2E6","#96749C"]
# set up the geometry for the window
self.root.geometry( "%dx%d+50+30" % (self.initDx, self.initDy) )
# set the title of the window
self.root.title("D.I.A.N.A. 2.0") # give it a sick name
# set the maximum size of the window for resizing
self.root.maxsize( 1600, 900 )
# setup the menus
self.buildMenus()
# build the controls
self.buildControls()
# build the Canvas
self.buildCanvas()
# bring the window to the front
self.root.lift()
# - do idle events here to get actual canvas size
self.root.update_idletasks()
# now we can ask the size of the canvas
print self.canvas.winfo_geometry()
# set up the key bindings
self.setBindings()
# set up the application state
self.objects = [] # list of data objects that will be drawn in the canvas
self.data = None # will hold the raw data someday.
self.baseClick = None # used to keep track of mouse movement
self.view = View()
self.endpoints = np.matrix( [[0,1,0,0,0,0],
[0,0,0,1,0,0],
[0,0,0,0,0,1],
[1,1,1,1,1,1]] )
self.axes = []
self.regLines = []
self.regEndpoints = None
self.clearData()
self.spacePoints = None
self.scalefactor = 1
self.pcaList = []
self.pcacount = -1
self.clusterCount = -1
self.handleOpen()
#Builds Axes
def buildAxes(self):
self.axes = []
vtm = self.view.build()
tend = vtm * self.endpoints
self.axes.append(self.canvas.create_line(tend[0,0], tend[1,0],
tend[0,1], tend[1,1], fill='red'))
self.axes.append(self.canvas.create_line(tend[0,2], tend[1,2],
tend[0,3], tend[1,3], fill='green'))
self.axes.append(self.canvas.create_line(tend[0,4], tend[1,4],
tend[0,5], tend[1,5], fill='blue'))
#Updates Axes and points
def updateAxes(self):
vtm = self.view.build()
tend = vtm * self.endpoints
for idx, line in enumerate(self.axes):
self.canvas.coords(line, tend[0,idx*2], tend[1,idx*2],
tend[0,idx*2+1], tend[1,idx*2+1])
self.updateFits()
def updateFits(self):
if self.regEndpoints != None:
vtm = self.view.build()
tend = vtm * self.regEndpoints
for i, line in enumerate(self.regLines):
self.canvas.coords(line, tend[0,i*2], tend[1,i*2],
tend[0,i*2+1], tend[1,i*2+1])
def colorFix(self):
colorD = self.colorOp.get()
if colorD != "Ignore":
self.color = analysis.normalizeColumnsTogether([colorD], self.data)
colorDict = {}
print self.color
print np.unique(np.asarray(self.color)).size
if np.unique(np.asarray(self.color)).size <= 30:
colCount = 0
for i, point in enumerate(self.objects):
if str(self.color[i,0]) not in colorDict:
colorDict[str(self.color[i,0])] = self.colors[colCount]
colCount += 1
if self.spacePoints != None:
if len(self.objects) > 0:
for i, point in enumerate(self.objects):
self.canvas.itemconfigure(point, fill=colorDict[str(self.color[i,0])])
def updatePoints(self):
if self.spacePoints != None:
vtm = self.view.build()
tpoints = self.spacePoints * vtm.T
l = tpoints[:,-2].tolist()
sortz = [item for sublist in l for item in sublist]
indices = sorted(range(len(sortz)), key=lambda k: sortz[k])
if len(self.objects) > 0:
if self.size == None:
dx = self.dx
for i in indices:
self.canvas.coords(self.objects[i], tpoints[i,0]-dx, tpoints[i,1]-dx, tpoints[i,0]+dx, tpoints[i,1]+dx)
else:
for i in indices:
dx = self.size[i,0]*10 +2
self.canvas.coords(self.objects[i], tpoints[i,0]-dx, tpoints[i,1]-dx, tpoints[i,0]+dx, tpoints[i,1]+dx)
#Adds points from file chosen in init
def buildPoints(self, headers):
self.clearData()
print(len(self.axes))
points = analysis.normalizeColumnsSeparately(headers[0:3], self.data)
if headers[3] != "Ignore":
self.size = analysis.normalizeColumnsSeparately([headers[3]], self.data)
if headers[4] != "Ignore":
self.color = analysis.normalizeColumnsTogether([headers[4]], self.data)
vtm = self.view.build()
self.spacePoints = np.ones((points.shape[0], 4))
if headers[2] == "Ignore":
self.spacePoints[:,:-2] = points
self.spacePoints[:,-2] = np.zeros((points.shape[0]))
else:
self.spacePoints[:,:-1] = points
tend = self.spacePoints * vtm.T
l = tend[:,-2].tolist()
sortz = [item for sublist in l for item in sublist]
indices = sorted(range(len(sortz)), key=lambda k: sortz[k])
self.objects = [None] * len(sortz)
if self.size != None and self.color != None:
for i in indices:
dx = self.size[i,0]*10 +2
mycolor = '#%02x%02x%02x' % analysis.pseudocolor(self.color[i,0])
pt = self.canvas.create_oval( tend[i,0]-dx, tend[i,1]-dx, tend[i,0]+dx, tend[i,1]+dx,
fill=mycolor, outline='')
self.objects[i] = pt
elif self.size != None:
for i in indices:
dx = self.size[i,0]*10 +2
pt = self.canvas.create_oval( tend[i,0]-dx, tend[i,1]-dx, tend[i,0]+dx, tend[i,1]+dx,
fill='black', outline='')
self.objects[i] = pt
elif self.color != None:
for i in indices:
dx = self.dx
mycolor = '#%02x%02x%02x' % analysis.pseudocolor(self.color[i,0])
pt = self.canvas.create_oval( tend[i,0]-dx, tend[i,1]-dx, tend[i,0]+dx, tend[i,1]+dx,
fill=mycolor, outline='')
self.objects[i] = pt
else:
for i in indices:
dx = self.dx
pt = self.canvas.create_oval( tend[i,0]-dx, tend[i,1]-dx, tend[i,0]+dx, tend[i,1]+dx,
fill="black", outline='')
self.objects[i] = pt
#self.updatePoints()
#self.updateAxes()
#Adds points from file chosen in init
def buildPCA(self, headers, data):
self.clearData()
print(len(self.axes))
points = analysis.normalizeColumnsSeparately(headers[0:3], data)
if len(data.headersNumeric) > 3:
self.size = analysis.normalizeColumnsSeparately([headers[3]], data)
if len(data.headersNumeric) > 4:
self.color = analysis.normalizeColumnsTogether([headers[4]], data)
vtm = self.view.build()
self.spacePoints = np.ones((points.shape[0], 4))
if len(data.headersNumeric) < 3:
self.spacePoints[:,:-2] = points
self.spacePoints[:,-2] = np.zeros((points.shape[0]))
else:
self.spacePoints[:,:-1] = points
tend = self.spacePoints * vtm.T
l = tend[:,-2].tolist()
sortz = [item for sublist in l for item in sublist]
indices = sorted(range(len(sortz)), key=lambda k: sortz[k])
self.objects = [None] * len(sortz)
if self.size != None and self.color != None:
for i in indices:
dx = self.size[i,0]*10 +2
mycolor = '#%02x%02x%02x' % analysis.pseudocolor(self.color[i,0])
pt = self.canvas.create_oval( tend[i,0]-dx, tend[i,1]-dx, tend[i,0]+dx, tend[i,1]+dx,
fill=mycolor, outline='')
self.objects[i] = pt
elif self.size != None:
for i in indices:
dx = self.size[i,0]*10 +2
pt = self.canvas.create_oval( tend[i,0]-dx, tend[i,1]-dx, tend[i,0]+dx, tend[i,1]+dx,
fill='black', outline='')
self.objects[i] = pt
elif self.color != None:
for i in indices:
dx = self.dx
mycolor = '#%02x%02x%02x' % analysis.pseudocolor(self.color[i,0])
pt = self.canvas.create_oval( tend[i,0]-dx, tend[i,1]-dx, tend[i,0]+dx, tend[i,1]+dx,
fill=mycolor, outline='')
self.objects[i] = pt
else:
for i in indices:
dx = self.dx
pt = self.canvas.create_oval( tend[i,0]-dx, tend[i,1]-dx, tend[i,0]+dx, tend[i,1]+dx,
fill="black", outline='')
self.objects[i] = pt
#self.updatePoints()
#self.updateAxes()
def buildLinearRegression(self, variables):
normalized = analysis.normalizeColumnsSeparately(variables, self.data)
points = np.ones((normalized.shape[0], 4))
points[:,-2] = np.zeros((normalized.shape[0]))
points[:,:-2] = normalized
self.spacePoints = points
vtm = self.view.build()
tend = self.spacePoints * vtm.T
if self.size != None and self.color != None:
for i in range(0,tend.shape[0]):
dx = self.size[i,0]*10 +2
mycolor = '#%02x%02x%02x' % analysis.pseudocolor(self.color[i,0])
pt = self.canvas.create_oval( tend[i,0]-dx, tend[i,1]-dx, tend[i,0]+dx, tend[i,1]+dx,
fill=mycolor, outline='')
self.objects.append(pt)
elif self.size != None:
for i in range(0,tend.shape[0]):
dx = self.size[i,0]*10 +2
pt = self.canvas.create_oval( tend[i,0]-dx, tend[i,1]-dx, tend[i,0]+dx, tend[i,1]+dx,
fill='black', outline='')
self.objects.append(pt)
elif self.color != None:
for i in range(0,tend.shape[0]):
dx = self.dx
mycolor = '#%02x%02x%02x' % analysis.pseudocolor(self.color[i,0])
pt = self.canvas.create_oval( tend[i,0]-dx, tend[i,1]-dx, tend[i,0]+dx, tend[i,1]+dx,
fill=mycolor, outline='')
self.objects.append(pt)
else:
for i in range(0,tend.shape[0]):
dx = self.dx
pt = self.canvas.create_oval( tend[i,0]-dx, tend[i,1]-dx, tend[i,0]+dx, tend[i,1]+dx,
fill="black", outline='')
self.objects.append(pt)
ind = self.data.getData([variables[0]]).tolist()
dep = self.data.getData([variables[1]]).tolist()
independent = [val for sublist in ind for val in sublist]
dependent = [val for sublist in dep for val in sublist]
m, b, r_value, p_value, std_err = stats.linregress(independent,dependent)
ri = analysis.dataRange([variables[0]], self.data)
rd = analysis.dataRange([variables[1]], self.data)
xmin, xmax = ri[0][0], ri[0][1]
ymin, ymax = rd[0][0], rd[0][1]
y0 = ((xmin * m + b) - ymin)/(ymax - ymin)
y1 = ((xmax * m + b) - ymin)/(ymax - ymin)
self.regEndpoints = np.matrix([[0,1],
[y0, y1],
[0,0],
[1,1]])
tend = vtm * self.regEndpoints
print tend
self.regLines.append(self.canvas.create_line(tend[0,0], tend[1,0],
tend[0,1], tend[1,1], fill="blue"))
output = ("Regression slope: " , "%.3f" % float(m) ,
" Intercept: " , "%.3f" % float(b) , " R-Value: " , "%.3f" % float(r_value * r_value))
print output
self.regLabel.config(text=output)
#Resets view to standard xy plane
def resetView(self):
self.view = View()
self.updateAxes()
if self.spacePoints != None:
self.updatePoints()
def buildMenus(self):
# create a new menu
menu = tk.Menu(self.root)
# set the root menu to our new menu
self.root.config(menu = menu)
# create a variable to hold the individual menus
menulist = []
# create a file menu
filemenu = tk.Menu( menu )
menu.add_cascade( label = "File", menu = filemenu )
menulist.append(filemenu)
# create another menu for kicks
cmdmenu = tk.Menu( menu )
menu.add_cascade( label = "Command", menu = cmdmenu )
menulist.append(cmdmenu)
anamenu = tk.Menu( menu )
menu.add_cascade( label = "Analysis", menu = anamenu )
menulist.append(anamenu)
# menu text for the elements
# the first sublist is the set of items for the file menu
# the second sublist is the set of items for the option menu
menutext = [ [ 'Open File', 'Clear \xE2\x8C\x98-N', 'Quit \xE2\x8C\x98-Q' ],
[ 'Reset View', 'Capture View' ],
['Linear Regression', 'PCA', 'K-Means Clustering'] ]
# menu callback functions (note that some are left blank,
# so that you can add functions there if you want).
# the first sublist is the set of callback functions for the file menu
# the second sublist is the set of callback functions for the option menu
menucmd = [ [self.handleOpen, self.clearData, self.handleQuit],
[self.resetView, self.captureView],
[self.handleLinearRegression, self.handlePCA, self.handleCluster] ]
# build the menu elements and callbacks
for i in range( len( menulist ) ):
for j in range( len( menutext[i]) ):
if menutext[i][j] != '-':
menulist[i].add_command( label = menutext[i][j], command=menucmd[i][j] )
else:
menulist[i].add_separator()
# create the canvas object
def buildCanvas(self):
self.canvas = tk.Canvas( self.root, width=self.initDx, height=self.initDy )
self.canvas.pack( expand=tk.YES, fill=tk.BOTH )
return
# build a frame and put controls in it
def buildControls(self):
### Control ###
# make a control frame on the right
self.rightcntlframe = tk.Frame(self.root)
self.rightcntlframe.pack(side=tk.RIGHT, padx=2, pady=2, fill=tk.Y)
# make a separator frame
self.sep = tk.Frame( self.root, height=self.initDy, width=2, bd=1, relief=tk.SUNKEN )
self.sep.pack( side=tk.RIGHT, padx = 2, pady = 2, fill=tk.Y)
# use a label to set the size of the right panel
label = tk.Label( self.rightcntlframe, text="Control Panel", width=20 )
label.pack( side=tk.TOP, pady=10 )
plotButton = tk.Button(self.rightcntlframe, text="Plot Data",
command=self.handlePlotData)
plotButton.pack(side=tk.TOP)
self.statusBar = tk.Frame(self.root, bg='white')
self.statusBar.pack(side=tk.BOTTOM, padx=2, pady=2, fill=tk.Y)
self.statusLabel = tk.Label( self.statusBar, text="Hello", height=1)
self.statusLabel.pack( side=tk.TOP, pady=0 )
self.regLabel = tk.Label( self.statusBar, text=" ", height=1)
self.regLabel.pack( side=tk.BOTTOM, pady=0 )
delPCAButton = tk.Button(self.rightcntlframe, text="Delete Analysis",
command=self.handlePCADel)
delPCAButton.pack(side=tk.BOTTOM, pady = 0)
dispPCAButton = tk.Button(self.rightcntlframe, text="Display Info",
command=self.handlePCAinfo)
dispPCAButton.pack(side=tk.BOTTOM, pady = 0)
plotPCAButton = tk.Button(self.rightcntlframe, text="Plot Analysis",
command=self.handlePCAPlot)
plotPCAButton.pack(side=tk.BOTTOM, pady = 0)
self.pcaBox = tk.Listbox(self.rightcntlframe, selectmode=tk.BROWSE)
self.pcaBox.pack(side=tk.BOTTOM, pady = 5)
b = tk.Label(self.rightcntlframe, text="PCA Analyses", width=20)
b.pack(side = tk.BOTTOM, pady=0)
def setBindings(self):
# bind mouse motions to the canvas
self.canvas.bind( '<Motion>', self.motion)
self.canvas.bind( '<Button-1>', self.handleMouseButton1 )
self.canvas.bind( '<Control-Button-1>', self.handleMouseButton2 )
self.canvas.bind( '<Command-Button-1>', self.handleMouseButton3)
self.canvas.bind( '<Command-B1-Motion>', self.handleMouseButton3Motion)
self.canvas.bind( '<Button-2>', self.handleMouseButton2 )
self.canvas.bind( '<B1-Motion>', self.handleMouseButton1Motion )
self.canvas.bind( '<B2-Motion>', self.handleMouseButton2Motion )
self.canvas.bind( '<Control-B1-Motion>', self.handleMouseButton2Motion )
self.root.bind( '<Command-q>', self.handleQuit )
self.root.bind( '<Command-o>', self.handleOpen )
self.root.bind( '<Command-n>', self.clearData ) # To clear data
def handleQuit(self, event=None):
print 'Terminating'
self.root.destroy()
def handleOpen(self, event=None):
fn = tkFileDialog.askopenfilename(parent=self.root,
title='Choose a data file',
initialdir='.')
if(fn != '' and ".csv" in fn):
self.data = Data(fn)
try:
self.buildOptions()
print("Success")
except AttributeError:
print("No Data yet!")
return
def buildOptions(self):
try:
self.xlabel.pack_forget()
self.xDrop.pack_forget()
self.ylabel.pack_forget()
self.yDrop.pack_forget()
self.zlabel.pack_forget()
self.zDrop.pack_forget()
self.slabel.pack_forget()
self.sizeDrop.pack_forget()
self.clabel.pack_forget()
self.colorDrop.pack_forget()
self.cbutton.pack_forget()
except:
print("Whoops")
self.xAxisOp = tk.StringVar(self.root)
self.yAxisOp = tk.StringVar(self.root)
self.zAxisOp = tk.StringVar(self.root)
self.sizeOp = tk.StringVar(self.root)
self.colorOp = tk.StringVar(self.root)
options = self.data.headersNumeric
optionals = ["Ignore"] + options
self.xAxisOp.set(options[0])
self.yAxisOp.set(options[0])
self.zAxisOp.set(optionals[0])
self.sizeOp.set(optionals[0])
self.colorOp.set(optionals[0])
self.xDrop = apply(tk.OptionMenu, (self.rightcntlframe, self.xAxisOp) + tuple(options))
self.yDrop = apply(tk.OptionMenu, (self.rightcntlframe, self.yAxisOp) + tuple(options))
self.zDrop = apply(tk.OptionMenu, (self.rightcntlframe, self.zAxisOp) + tuple(optionals))
self.sizeDrop = apply(tk.OptionMenu, (self.rightcntlframe, self.sizeOp) + tuple(optionals))
self.colorDrop = apply(tk.OptionMenu, (self.rightcntlframe, self.colorOp) + tuple(optionals))
self.xlabel = tk.Label( self.rightcntlframe, text="X-Axis Data", width=20 )
self.xlabel.pack( side=tk.TOP, pady=5 )
self.xDrop.pack(side=tk.TOP)
self.ylabel = tk.Label( self.rightcntlframe, text="Y-Axis Data", width=20 )
self.ylabel.pack( side=tk.TOP, pady=5 )
self.yDrop.pack(side=tk.TOP)
self.zlabel = tk.Label( self.rightcntlframe, text="Z-Axis Data", width=20 )
self.zlabel.pack( side=tk.TOP, pady=5 )
self.zDrop.pack(side=tk.TOP)
self.slabel = tk.Label( self.rightcntlframe, text="Size Data", width=20 )
self.slabel.pack( side=tk.TOP, pady=5 )
self.sizeDrop.pack(side=tk.TOP)
self.clabel = tk.Label( self.rightcntlframe, text="Color Data", width=20 )
self.clabel.pack( side=tk.TOP, pady=5 )
self.colorDrop.pack(side=tk.TOP)
self.cbutton = tk.Button(self.rightcntlframe, text="Discrete Colors",
command=self.colorFix)
self.cbutton.pack(side = tk.TOP)
def handlePlotData(self, event=None):
if self.data == None:
self.handleOpen()
else:
self.color = None
self.size = None
self.buildPoints(self.handleChooseAxes())
def handleChooseAxes(self):
headersForPoints = []
headersForPoints.append(self.xAxisOp.get())
headersForPoints.append(self.yAxisOp.get())
headersForPoints.append(self.zAxisOp.get())
headersForPoints.append(self.sizeOp.get())
headersForPoints.append(self.colorOp.get())
return headersForPoints
def handleCluster(self):
d = clusterDialog(self.root, self.data.getHeaders())
if d.result != None and int(d.clusters) > 0:
print d.result
headers = []
for index in d.result:
headers.append(self.data.headersNumeric[index])
codebook, codes, errors = analysis.kmeans(self.data, headers, int(d.clusters))
print codes
self.clusterCount += 1
print self.clusterCount
self.data.addColumn("Clusters %d" % (self.clusterCount,), codes)
self.buildOptions()
def handlePCA(self):
if self.data == None:
self.handleOpen()
else:
self.pcacount += 1
d = ColDialog(self.root, self.data.getHeaders())
if len(d.result) > 0:
w = NameDialog(self.root, self.pcacount)
if w.result != None:
headers = []
for i in d.result[0]:
headers.append(self.data.headersNumeric[i])
print self.data.headersNumeric[i]
self.pcaList.append(analysis.pca(self.data, headers, normalize = d.result[1]))
self.pcaBox.insert(tk.END, w.result)
self.pcaList[len(self.pcaList)-1].toFile()
def handlePCAPlot(self):
if len(self.pcaList) > 0:
index = self.pcaBox.index(tk.ACTIVE)
print(self.pcaList[index].get_eigenvectors())
print(self.pcaList[index].get_eigenvalues())
self.buildPCA(self.pcaList[index].getHeaders(), self.pcaList[index])
def handlePCADel(self):
index = self.pcaBox.index(tk.ACTIVE)
self.pcaBox.delete(index)
if len(self.pcaList > 0):
del self.pcaList[index]
def handlePCAinfo(self):
if len(self.pcaList) > 0:
index = self.pcaBox.index(tk.ACTIVE)
vecs = self.pcaList[index].get_eigenvectors().tolist()
vals = self.pcaList[index].get_eigenvalues().tolist()[0]
rheaders = self.pcaList[index].getRawHeaders()
headers = copy.copy(self.pcaList[index].getHeaders())
headers.insert(0,"E-Val")
headers.insert(0,"E-Vec")
array = []
array.append(headers)
for rheader,val,vec in zip(rheaders,vals,vecs):
vec.insert(0,val)
vec.insert(0,rheader)
array.append(vec)
e = EigenDialog(self.root, array)
#Nice little clearData method. It does what it is supposed to. Good method!
def clearData(self, event=None):
print 'clearing data'
self.objects = []
self.axes = []
self.canvas.delete("all");
self.buildAxes()
def captureView(self, event=None):
f = tkFileDialog.asksaveasfile(mode='w', defaultextension=".eps")
if f is None: # asksaveasfile return `None` if dialog closed with "cancel".
return
text2save = f.name
self.canvas.postscript(file=text2save)
def motion(self, event):
x, y = event.x, event.y
for idx, point in enumerate(self.objects):
coords = self.canvas.coords(point)
if coords[0]<= x and x <= coords[2] and coords[1]<= y and y <= coords[3]:
output = ""
for header in self.handleChooseAxes():
if header != "Ignore":
output += header + ": "
output += self.data.getRawValue(header, idx)
output += " "
self.statusLabel.config(text=output)
break
else:
output = ""
self.statusLabel.config(text=output)
def handleButton1(self):
print 'handling command button:', self.colorOption.get()
for obj in self.objects:
self.canvas.itemconfig(obj, fill="black" )
def handleLinearRegression(self):
d = LinRegDialog(self.root, self.data)
if(d.result != None):
self.clearData()
self.regLines = []
self.resetView()
self.updateAxes()
#print(d.result)
self.buildLinearRegression(d.result)
def handleMouseButton1(self, event):
print 'handle mouse button 1: %d %d' % (event.x, event.y)
self.baseClick = (event.x, event.y)
print(self.view.vrp)
def handleMouseButton2(self, event):
self.baseClick = (event.x, event.y)
self.ogExtent = self.view.extent.copy()
print 'handle mouse button 2: %d %d' % (event.x, event.y)
# This is called if the first mouse button is being moved
def handleMouseButton1Motion(self, event):
# calculate the difference
diff = ( event.x - self.baseClick[0], event.y - self.baseClick[1] )
#print 'handle button1 motion %d %d' % (diff[0], diff[1])
screendiff = (float(diff[0])/self.view.screen[0,0],
float(diff[1])/self.view.screen[0,1])
multdiff = (screendiff[0]*self.view.extent[0,0],
screendiff[1]*self.view.extent[0,1])
self.view.vrp = self.view.vrp + (multdiff[0]*self.view.u) + (multdiff[1]*self.view.vup)
print(self.view.vrp)
self.updateAxes()
self.updatePoints()
self.baseClick = (event.x, event.y)
# This is called if the second button of a real mouse has been pressed
# and the mouse is moving. Or if the control key is held down while
# a person moves their finger on the track pad.
def handleMouseButton2Motion(self, event):
print 'handle button 2 motion'
diff = event.y - self.baseClick[1]
if diff > 400:
diff = 400
elif diff < -400:
diff = -400
if diff < 0:
scalefac = (((diff + 400) * 0.9)/400) + 0.1
elif diff > 0:
scalefac = (((diff)* 3.0)/400) + 1
self.view.extent = self.ogExtent * scalefac
self.updateAxes()
self.updatePoints()
def handleMouseButton3(self, event):
self.baseClick = (event.x, event.y)
self.ogView = self.view.clone()
def handleMouseButton3Motion(self,event):
diff = (event.x - self.baseClick[0], event.y - self.baseClick[1])
dx = diff[0] * math.pi/400
dy = diff[1] * math.pi/400
self.view = self.ogView.clone()
self.view.rotateVRC(-dx, dy)
self.updateAxes()
self.updatePoints()
self.baseClick = (event.x, event.y)
self.ogView = self.view.clone()
def main(self):
print 'Entering main loop'
self.root.mainloop()
if __name__ == "__main__":
dapp = DisplayApp(850, 720)
dapp.main()
| {"/analysis.py": ["/data.py", "/pcadata.py"]} |
52,496 | majgaard/DIANA-2.0 | refs/heads/master | /analysis.py | from data import *
import colorsys
from scipy import stats as sp
import numpy as np
import scipy.cluster.vq as vq
import random
import pcadata as pcadata
# Analysis methods
# Carl-Philip Majgaard
# CS 251
# Spring 2016
#get the range of a column
def dataRange(columns, data):
colNum = []
for col in columns: #grab the indexes of the columns
colNum.append(data.getHeader(col))
mins = data.matrix.min(axis = 0) #do some calculation
maxs = data.matrix.max(axis = 0)
forReturn = [] #set up our return
for col in colNum:
pair = [] #further setup
pair.append(mins.item(col)) #add only requested columns
pair.append(maxs.item(col))
forReturn.append(pair) #add to return
return forReturn
def mean(columns, data, matrix = None):
if matrix != None:
means = matrix.mean(axis = 0)
forReturn = []
for col in range(matrix.shape[1]):
forReturn.append(means.item(col)) #filter
return np.matrix(forReturn) #return
else:
colNum = []
for col in columns:
colNum.append(data.getHeader(col)) #grab the indexes
means = data.matrix.mean(axis = 0) #calculate
forReturn = []
for col in colNum:
forReturn.append(means.item(col)) #filter
return np.matrix(forReturn) #return
def stdev(columns, data):
colNum = []
for col in columns:
colNum.append(data.getHeader(col)) #grab indexes
devs = data.matrix.std(axis = 0) #calculate
forReturn = []
for col in colNum:
forReturn.append(devs.item(col)) #filter
return forReturn #return
def mode(columns, data):
colNum = []
for col in columns:
colNum.append(data.getHeader(col)) #grab indexes
modes = stats.mode(data.matrix) #calculate
forReturn = []
for col in colNum:
forReturn.append(modes[0].item(col)) #filter
return forReturn #return
def translateColumns(data):
a = data
def operation(a): #define translation and scaling operation
min = a.min()
return (a-min) #return the processed 1D array
return np.apply_along_axis(operation, 0, a) #apply the function to each col
def normalizeColumnsSeparately(columns, data):
a = data.getData(columns) #grab columns
def operation(a): #define translation and scaling operation
min = a.min()
max = a.max()
return (a-min)/(max-min) #return the processed 1D array
return np.apply_along_axis(operation, 0, a) #apply the function to each col
def normalizeColumnsTogether(columns, data):
a = data.getData(columns) #grab columns
min = a.min()
max = a.max()
return (a-min)/(max-min) #return processed matrix
#Use to generate color val
def pseudocolor(val):
h = (float(val) / 1) * 120
# convert hsv color (h,1,1) to its rgb equivalent
r, g, b = colorsys.hsv_to_rgb(h/360, 1., 1.)
r = r * 255
g = g * 255
b = b * 255
return (g, r, b)
def linear_regression(data, ind, dep):
pre_a = data.getData(ind)
y = data.getData([dep])
a = np.ones((pre_a.shape[0], pre_a.shape[1]+1))
a[:,:-1] = pre_a
AAinv = np.linalg.inv(np.dot(a.T, a))
x = np.linalg.lstsq(a,y)
b = x[0]
n = y.shape[0]
c = b.shape[0]
df_e = n - c
df_r = c - 1
error = y - np.dot(a, b)
sse = np.dot(error.T, error) / df_e
stderr = np.sqrt( np.diagonal(sse[0,0] * AAinv))
t = b.T / stderr
p = 2*(1- sp.t.cdf(abs(t), df_e))
r2 = 1 - error.var() / y.var()
return b, sse, r2, t, p
def pca(data, headers, normalize = "True"):
if normalize == "True":
A = normalizeColumnsSeparately(headers, data)
else:
A = data.getData(headers)
m = mean(headers, data, matrix = A)
D = A - m
U, S, V = np.linalg.svd(D, full_matrices=0)
eigenval = (S*S)/(len(A)-1)
vectors = V
projection = ((V * D.T).T)
return pcadata.PCAData(headers, projection, eigenval, vectors, m)
def kmeans_numpy( d, headers, K, whiten = True):
A = d.getData(headers)
# assign to W the result of calling vq.whiten on A
W= vq.whiten(A)
# assign to codebook, bookerror the result of calling vq.kmeans with W and K
codebook, bookerror = vq.kmeans(W, K)
# assign to codes, error the result of calling vq.vq with W and the codebook
codes, error = vq.vq(W,codebook)
# return codebook, codes, and error
return codebook, codes, error
def kmeans_init(d, K, categories= ""):
if categories != "":
cats, labels = np.unique( np.asarray( categories.T ), return_inverse = True )
means = np.matrix( np.zeros( (len(cats), d.shape[1]) ) )
for i in range(len(cats)):
means[i,:] = np.mean( d[labels==i, :], axis=0)
else:
means = np.matrix(np.zeros((K, d[0].size), dtype = np.float))
maxes = d.max(0)
mins = d.min(0)
for i in range(d[0].size):
for j in range(K):
#print i
means[j, i] = random.uniform(mins[0,i],maxes[0,i])
return means
def kmeans_algorithm(A, means):
# set up some useful constants
MIN_CHANGE = 1e-7
MAX_ITERATIONS = 100
D = means.shape[1]
K = means.shape[0]
N = A.shape[0]
# iterate no more than MAX_ITERATIONS
for i in range(MAX_ITERATIONS):
# calculate the codes
codes, errors = kmeans_classify( A, means )
# calculate the new means
newmeans = np.zeros_like( means )
counts = np.zeros( (K, 1) )
for j in range(N):
newmeans[codes[j,0],:] += A[j,:]
counts[codes[j,0],0] += 1.0
# finish calculating the means, taking into account possible zero counts
for j in range(K):
if counts[j,0] > 0.0:
newmeans[j,:] /= counts[j, 0]
else:
newmeans[j,:] = A[random.randint(0,A.shape[0]),:]
# test if the change is small enough
diff = np.sum(np.square(means - newmeans))
means = newmeans
if diff < MIN_CHANGE:
break
# call classify with the final means
codes, errors = kmeans_classify( A, means )
# return the means, codes, and errors
return (means, codes, errors)
def kmeans_classify(data, means):
idxs = np.matrix(np.zeros((data.shape[0], 1), dtype = np.int))
dist = np.matrix(np.zeros((data.shape[0], 1), dtype = np.float))
for i in range(data.shape[0]):
tempdists = []
pt = data[i]
for j in range(means.shape[0]):
m = means[j]
tempdists.append(np.linalg.norm(m-pt))
inOrder = copy.copy(tempdists)
inOrder.sort()
dist[i,0] = inOrder[0]
idxs[i,0] = tempdists.index(inOrder[0])
return (idxs, dist)
def kmeans(d, headers, K, whiten=True, categories = ''):
if whiten:
# assign to W the result of calling vq.whiten on the data--> Normalized
A= d.getData(headers)
W= vq.whiten(A)
# else
else:
# assign to W the matrix A--> unnormalized
A = d.getData(headers)
W = A
# assign to codebook the result of calling kmeans_init with W, K, and categories
codebook = kmeans_init(W, K, categories)
# assign to codebook, codes, errors, the result of calling kmeans_algorithm with W and codebook
codebook, codes, errors = kmeans_algorithm(W, codebook)
# return the codebook, codes, and representation error
return codebook, codes, errors
if __name__ == "__main__":
filenames = ['pcatest.csv']
for name in filenames:
d = Data(name)
pca(d, d.getHeaders())
| {"/analysis.py": ["/data.py", "/pcadata.py"]} |
52,497 | jonathanavalosescobar/mysite | refs/heads/master | /app_Armado/apps.py | from django.apps import AppConfig
class AppArmadoConfig(AppConfig):
name = 'app_Armado'
| {"/app_Armado/urls.py": ["/app_Armado/views.py"], "/app_Armado/quickstart/serializers.py": ["/app_Armado/models.py"], "/app_Armado/quickstart/viewss.py": ["/app_Armado/models.py", "/app_Armado/quickstart/serializers.py"], "/app_Armado/views.py": ["/app_Armado/models.py"], "/app_Armado/admin.py": ["/app_Armado/models.py"]} |
52,498 | jonathanavalosescobar/mysite | refs/heads/master | /app_Armado/migrations/0005_remove_solicitud_estado.py | # Generated by Django 2.1.3 on 2019-11-10 04:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app_Armado', '0004_auto_20191110_0057'),
]
operations = [
migrations.RemoveField(
model_name='solicitud',
name='estado',
),
]
| {"/app_Armado/urls.py": ["/app_Armado/views.py"], "/app_Armado/quickstart/serializers.py": ["/app_Armado/models.py"], "/app_Armado/quickstart/viewss.py": ["/app_Armado/models.py", "/app_Armado/quickstart/serializers.py"], "/app_Armado/views.py": ["/app_Armado/models.py"], "/app_Armado/admin.py": ["/app_Armado/models.py"]} |
52,499 | jonathanavalosescobar/mysite | refs/heads/master | /app_Armado/migrations/0002_auto_20191030_1112.py | # Generated by Django 2.2.6 on 2019-10-30 14:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app_Armado', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='solicitud',
options={'permissions': (('tecnico', 'Es tecnico'),)},
),
]
| {"/app_Armado/urls.py": ["/app_Armado/views.py"], "/app_Armado/quickstart/serializers.py": ["/app_Armado/models.py"], "/app_Armado/quickstart/viewss.py": ["/app_Armado/models.py", "/app_Armado/quickstart/serializers.py"], "/app_Armado/views.py": ["/app_Armado/models.py"], "/app_Armado/admin.py": ["/app_Armado/models.py"]} |
52,500 | jonathanavalosescobar/mysite | refs/heads/master | /app_Armado/urls.py | from django.urls import path
from . import views
from .views import info, form, inicio, RegistroUsuario, exito, login_exito, login_salida, listar_solicitud, listarproductos, listarpedidos, listar_solicitud_tecnico
from django.contrib import admin
from django.conf.urls import url, include
from django.contrib.auth.decorators import login_required
from rest_framework import routers
from app_Armado.quickstart import viewss
# Wire up our API using automatic URL routing.
router = routers.DefaultRouter()
router.register(r'users', viewss.UserViewSet)
router.register(r'products', viewss.ProductoViewSet)
urlpatterns = [
path('', views.post_list, name='post_list '),
path('Armafox', inicio, name='Armafox'),
path('informacion', info, name='informacion'),
path('RegistroExitoso', exito, name='exito'),
path('solicitud',login_required(login_exito) , name='solicitud'),
path('tecnico',login_required(login_exito) , name='tecnico'),
path('listar_solicitud',login_required(listar_solicitud) , name='listar_solicitud'),
path('listar_soli_tecnico',login_required(listar_solicitud_tecnico) , name='listar_soli_tecnico'),
path('listar_producto',login_required(listarproductos),name="listar_producto"),
path('listar_pedidos',login_required(listarpedidos),name="listarpedidos"),
path('logged_out',login_salida , name='logged_out'),
url(r'form', RegistroUsuario.as_view(), name="form"),
# Additionally, we include login URLs for the browsable API.
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| {"/app_Armado/urls.py": ["/app_Armado/views.py"], "/app_Armado/quickstart/serializers.py": ["/app_Armado/models.py"], "/app_Armado/quickstart/viewss.py": ["/app_Armado/models.py", "/app_Armado/quickstart/serializers.py"], "/app_Armado/views.py": ["/app_Armado/models.py"], "/app_Armado/admin.py": ["/app_Armado/models.py"]} |
52,501 | jonathanavalosescobar/mysite | refs/heads/master | /app_Armado/migrations/0001_initial.py | # Generated by Django 2.2.6 on 2019-10-30 14:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Solicitud',
fields=[
('id_servicio', models.IntegerField(primary_key=True, serialize=False)),
('rut_cliente', models.CharField(max_length=12)),
('Nombre_completo', models.CharField(max_length=100)),
('fecha_de_visita', models.DateField()),
('hora_de_visita', models.CharField(max_length=5)),
('direccion', models.CharField(max_length=100)),
('preosupuesto', models.CharField(max_length=7)),
('descripcion_pc_a_solicitar', models.CharField(max_length=50)),
('desea_induccion', models.CharField(max_length=2)),
('numero_celular', models.CharField(max_length=8)),
],
),
]
| {"/app_Armado/urls.py": ["/app_Armado/views.py"], "/app_Armado/quickstart/serializers.py": ["/app_Armado/models.py"], "/app_Armado/quickstart/viewss.py": ["/app_Armado/models.py", "/app_Armado/quickstart/serializers.py"], "/app_Armado/views.py": ["/app_Armado/models.py"], "/app_Armado/admin.py": ["/app_Armado/models.py"]} |
52,502 | jonathanavalosescobar/mysite | refs/heads/master | /app_Armado/migrations/0007_pedido_codigo_producto.py | # Generated by Django 2.1.3 on 2019-11-10 06:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_Armado', '0006_auto_20191110_0215'),
]
operations = [
migrations.AddField(
model_name='pedido',
name='codigo_producto',
field=models.CharField(default=4, max_length=4),
preserve_default=False,
),
]
| {"/app_Armado/urls.py": ["/app_Armado/views.py"], "/app_Armado/quickstart/serializers.py": ["/app_Armado/models.py"], "/app_Armado/quickstart/viewss.py": ["/app_Armado/models.py", "/app_Armado/quickstart/serializers.py"], "/app_Armado/views.py": ["/app_Armado/models.py"], "/app_Armado/admin.py": ["/app_Armado/models.py"]} |
52,503 | jonathanavalosescobar/mysite | refs/heads/master | /app_Armado/migrations/0003_auto_20191109_2259.py | # Generated by Django 2.1.3 on 2019-11-10 01:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_Armado', '0002_auto_20191030_1112'),
]
operations = [
migrations.CreateModel(
name='Pedido',
fields=[
('id_pedido', models.IntegerField(primary_key=True, serialize=False)),
('rut_cliente', models.CharField(max_length=12)),
('nombre_cliente', models.CharField(max_length=50)),
('rut_visitante', models.CharField(max_length=12)),
('nombre_visitante', models.CharField(max_length=100)),
('estado_pedido', models.CharField(max_length=2)),
('numero_celular', models.CharField(max_length=8)),
('fecha_de_despacho', models.DateField()),
],
options={
'permissions': (('tecnico', 'Es tecnico'),),
},
),
migrations.CreateModel(
name='Producto',
fields=[
('id_producto', models.TextField(max_length=12, primary_key=True, serialize=False)),
('procesador', models.TextField(max_length=22)),
('placa', models.TextField(max_length=22)),
('memoria_ram', models.TextField(max_length=22)),
('tarjeta_video', models.TextField(max_length=22)),
('disco_duro', models.TextField(max_length=22)),
('modelo_gabinete', models.TextField(max_length=20)),
('precio', models.TextField(max_length=20)),
],
),
migrations.AlterModelOptions(
name='solicitud',
options={},
),
migrations.AddField(
model_name='solicitud',
name='estado',
field=models.CharField(default=2, max_length=2),
preserve_default=False,
),
]
| {"/app_Armado/urls.py": ["/app_Armado/views.py"], "/app_Armado/quickstart/serializers.py": ["/app_Armado/models.py"], "/app_Armado/quickstart/viewss.py": ["/app_Armado/models.py", "/app_Armado/quickstart/serializers.py"], "/app_Armado/views.py": ["/app_Armado/models.py"], "/app_Armado/admin.py": ["/app_Armado/models.py"]} |
52,504 | jonathanavalosescobar/mysite | refs/heads/master | /app_Armado/migrations/0008_auto_20191110_1123.py | # Generated by Django 2.1.3 on 2019-11-10 14:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_Armado', '0007_pedido_codigo_producto'),
]
operations = [
migrations.AlterField(
model_name='solicitud',
name='preosupuesto',
field=models.CharField(max_length=9),
),
]
| {"/app_Armado/urls.py": ["/app_Armado/views.py"], "/app_Armado/quickstart/serializers.py": ["/app_Armado/models.py"], "/app_Armado/quickstart/viewss.py": ["/app_Armado/models.py", "/app_Armado/quickstart/serializers.py"], "/app_Armado/views.py": ["/app_Armado/models.py"], "/app_Armado/admin.py": ["/app_Armado/models.py"]} |
52,505 | jonathanavalosescobar/mysite | refs/heads/master | /app_Armado/models.py | from django.db import models
from django.utils.translation import ugettext as _
# Create your models here.
class Solicitud(models.Model):
id_servicio = models.IntegerField(primary_key=True)
rut_cliente= models.CharField(max_length=12)
Nombre_completo = models.CharField(max_length=100)
fecha_de_visita = models.CharField(max_length=10)
hora_de_visita = models.CharField(max_length=5)
direccion= models.CharField(max_length=100)
preosupuesto = models.CharField(max_length=9)
descripcion_pc_a_solicitar= models.CharField(max_length=50)
desea_induccion= models.CharField(max_length=2)
numero_celular = models.CharField(max_length=8)
def __str__(self):
return self.rut_cliente
# Modelo para llenar de admin y listar en tecnico
class Producto(models.Model):
id_producto= models.TextField(max_length=12,primary_key=True)
procesador = models.TextField(max_length=22)
placa = models.TextField(max_length=22)
memoria_ram= models.TextField(max_length=22)
tarjeta_video= models.TextField(max_length=22)
disco_duro = models.TextField(max_length=22)
modelo_gabinete = models.TextField(max_length=20)
precio = models.TextField(max_length=20)
imagen = models.ImageField(upload_to="Productos",null=True)
def __str__(self):
return self.procesador
# Modelo para generar pedido de tecnico
class Pedido(models.Model):
id_pedido = models.IntegerField(primary_key=True)
codigo_producto = models.CharField(max_length=4)
rut_cliente= models.CharField(max_length=12)
nombre_cliente = models.CharField(max_length=50)
rut_visitante= models.CharField(max_length=12)
nombre_visitante = models.CharField(max_length=100)
estado_pedido= models.CharField(max_length=2)
numero_celular = models.CharField(max_length=8)
fecha_de_despacho = models.CharField(max_length=10)
def __str__(self):
return self.rut_cliente
class Meta:
permissions = (
('tecnico',_('Es tecnico')),
)
| {"/app_Armado/urls.py": ["/app_Armado/views.py"], "/app_Armado/quickstart/serializers.py": ["/app_Armado/models.py"], "/app_Armado/quickstart/viewss.py": ["/app_Armado/models.py", "/app_Armado/quickstart/serializers.py"], "/app_Armado/views.py": ["/app_Armado/models.py"], "/app_Armado/admin.py": ["/app_Armado/models.py"]} |
52,506 | jonathanavalosescobar/mysite | refs/heads/master | /app_Armado/quickstart/serializers.py | from django.contrib.auth.models import User
from app_Armado.models import Producto
from rest_framework import serializers
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'groups')
class ProductoSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Producto
fields = ('url', 'id_producto', 'procesador' , 'placa' , 'memoria_ram' , 'tarjeta_video' , 'disco_duro', 'modelo_gabinete', 'precio')
| {"/app_Armado/urls.py": ["/app_Armado/views.py"], "/app_Armado/quickstart/serializers.py": ["/app_Armado/models.py"], "/app_Armado/quickstart/viewss.py": ["/app_Armado/models.py", "/app_Armado/quickstart/serializers.py"], "/app_Armado/views.py": ["/app_Armado/models.py"], "/app_Armado/admin.py": ["/app_Armado/models.py"]} |
52,507 | jonathanavalosescobar/mysite | refs/heads/master | /app_Armado/migrations/0004_auto_20191110_0057.py | # Generated by Django 2.1.3 on 2019-11-10 03:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_Armado', '0003_auto_20191109_2259'),
]
operations = [
migrations.AlterField(
model_name='solicitud',
name='fecha_de_visita',
field=models.CharField(max_length=10),
),
]
| {"/app_Armado/urls.py": ["/app_Armado/views.py"], "/app_Armado/quickstart/serializers.py": ["/app_Armado/models.py"], "/app_Armado/quickstart/viewss.py": ["/app_Armado/models.py", "/app_Armado/quickstart/serializers.py"], "/app_Armado/views.py": ["/app_Armado/models.py"], "/app_Armado/admin.py": ["/app_Armado/models.py"]} |
52,508 | jonathanavalosescobar/mysite | refs/heads/master | /app_Armado/tests.py | from django.test import TestCase
# Create your tests here.
import unittest
class Pruebas(unittest.TestCase):
def test(self):
pass
if __name__ == "__main__":
unittest.main()
import unittest
def doblar(a): return a*2
def sumar(a,b): return a+b
def es_par(a): return 1 if a%2 == 0 else 0
class PruebasFunciones(unittest.TestCase):
def test_doblar(self):
self.assertEqual(doblar(10), 20)
self.assertEqual(doblar('Ab'), 'AbAb')
def test_sumar(self):
self.assertEqual(sumar(-15, 15), 0)
self.assertEqual(sumar('Ab' ,'cd'), 'Abcd')
def test_es_par(self):
self.assertEqual(es_par(11), False)
self.assertEqual(es_par(68), True)
if __name__ == '__main__':
unittest.main()
import unittest
class PruebasMetodosCadenas(unittest.TestCase):
def test_upper(self):
self.assertEqual('hola'.upper(), 'HOLA')
def test_isupper(self):
self.assertTrue('HOLA'.isupper())
self.assertFalse('Hola'.isupper())
def test_split(self):
s = 'Hola mundo'
self.assertEqual(s.split(), ['Hola', 'mundo'])
if __name__ == '__main__':
unittest.main()
import unittest
class PruebaTestFixture(unittest.TestCase):
def setUp(self):
print("Preparando el contexto")
self.numeros = [1, 2, 3, 4, 5]
def test(self):
print("Realizando una prueba")
r = [doblar(n) for n in self.numeros]
self.assertEqual(r, [2, 4, 6, 8, 10])
def tearDown(self):
print("Destruyendo el contexto")
del(self.numeros)
if __name__ == '__main__':
unittest.main() | {"/app_Armado/urls.py": ["/app_Armado/views.py"], "/app_Armado/quickstart/serializers.py": ["/app_Armado/models.py"], "/app_Armado/quickstart/viewss.py": ["/app_Armado/models.py", "/app_Armado/quickstart/serializers.py"], "/app_Armado/views.py": ["/app_Armado/models.py"], "/app_Armado/admin.py": ["/app_Armado/models.py"]} |
52,509 | jonathanavalosescobar/mysite | refs/heads/master | /app_Armado/quickstart/viewss.py |
from django.contrib.auth.models import User
from app_Armado.models import Producto
from rest_framework import viewsets
from app_Armado.quickstart.serializers import UserSerializer, ProductoSerializer
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class ProductoViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Userpermissions to be viewed or edited.
"""
queryset = Producto.objects.all()
serializer_class = ProductoSerializer | {"/app_Armado/urls.py": ["/app_Armado/views.py"], "/app_Armado/quickstart/serializers.py": ["/app_Armado/models.py"], "/app_Armado/quickstart/viewss.py": ["/app_Armado/models.py", "/app_Armado/quickstart/serializers.py"], "/app_Armado/views.py": ["/app_Armado/models.py"], "/app_Armado/admin.py": ["/app_Armado/models.py"]} |
52,510 | jonathanavalosescobar/mysite | refs/heads/master | /app_Armado/views.py | from django.shortcuts import render
from .models import Solicitud, Producto, Pedido
from django.views.generic import CreateView
from django.contrib.auth.models import User
from app_Armado.forms import RegistroForm
from django.urls import reverse_lazy
# Create your views here.
def post_list(request):
return render(request, 'plantillas/Armafox.html', {}) # plantilla determinada arranque.
def inicio(request):
return render(request, 'plantillas/Armafox.html') # plantilla de inicio.
def info(request):
return render(request, 'plantillas/informacion.html') # plantilla de wiki armafox.
def form(request):
return render(request, 'plantillas/form.html') # plantilla de registro usuarios.
# para registrar usuario.
class RegistroUsuario(CreateView):
model = User
template_name = "plantillas/form.html"
form_class = RegistroForm
success_url = reverse_lazy('exito')
# funcion para retornar registro_exitoso.
def exito(request):
return render(request, 'registration/RegistroExitoso.html')
# funcion para retornar login exitoso dependiendo del tipo de usuario.
def login_exito(request):
user = request.user
if user.has_perm('app_Armado.tecnico'):
pedi = Pedido.objects.all()
if request.POST:
idped = request.POST.get("id_pedido", "")
codpro = request.POST.get("codigo_producto", "")
rutcli = request.POST.get("rut_cliente", "")
nomcli = request.POST.get("nombre_cliente", "")
rutvisi = request.POST.get("rut_visitante", "")
nomvisi = request.POST.get("nombre_visitante", "")
estpe = request.POST.get("estado_pedido", "")
numcel = request.POST.get("numero_celular", "")
fechdes = request.POST.get("fecha_de_despacho", "")
pedi = Pedido(
id_pedido=idped,
codigo_producto = codpro,
rut_cliente=rutcli,
nombre_cliente=nomcli,
rut_visitante=rutvisi,
nombre_visitante=nomvisi,
estado_pedido=estpe,
numero_celular=numcel,
fecha_de_despacho=fechdes
)
pedi.save()
return render(request, 'plantillas/tecnico.html') # plantilla con funciones para tecnico.
else:
soli = Solicitud.objects.all()
if request.POST:
idser = request.POST.get("id_servicio", "")
rut = request.POST.get("rut_cliente", "")
nomcomp = request.POST.get("Nombre_completo", "")
fechvi = request.POST.get("fecha_de_visita", "")
horavi = request.POST.get("hora_de_visita", "")
dire = request.POST.get("direccion", "")
presu = request.POST.get("preosupuesto", "")
descrip = request.POST.get("descripcion_pc_a_solicitar", "")
desea = request.POST.get("desea_induccion", "")
num = request.POST.get("numero_celular", "")
soli = Solicitud(
id_servicio=idser,
rut_cliente=rut,
Nombre_completo=nomcomp,
fecha_de_visita=fechvi,
hora_de_visita=horavi,
direccion=dire,
preosupuesto=presu,
descripcion_pc_a_solicitar=descrip,
desea_induccion=desea,
numero_celular=num,
)
soli.save()
return render(request, 'plantillas/solicitud.html') # plantilla con funciones para cliente.
# funcion para retornar logout.
def login_salida(request):
return render(request, 'registration/logged_out.html')
# funcion para retornar listado solicitud de cliente.
def listar_solicitud(request):
soli= Solicitud.objects.all()
return render(request, 'plantillas/listar_solicitud.html', {'solicitud': soli})
# funcion para retornar listado de solicitudes para tecnico.
def listar_solicitud_tecnico(request):
soli = Solicitud.objects.all()
return render(request, 'plantillas/listar_soli_tecnico.html', {'solicitud': soli})
# funcion para retornar listado de productos.
def listarproductos(request):
prod = Producto.objects.all()
return render(request, 'plantillas/listar_producto.html', {'productos': prod})
# funcion para retornar listado de pedidos
def listarpedidos(request):
ped = Pedido.objects.all()
return render(request, 'plantillas/listar_pedido.html', {'pedidos': ped}) | {"/app_Armado/urls.py": ["/app_Armado/views.py"], "/app_Armado/quickstart/serializers.py": ["/app_Armado/models.py"], "/app_Armado/quickstart/viewss.py": ["/app_Armado/models.py", "/app_Armado/quickstart/serializers.py"], "/app_Armado/views.py": ["/app_Armado/models.py"], "/app_Armado/admin.py": ["/app_Armado/models.py"]} |
52,511 | jonathanavalosescobar/mysite | refs/heads/master | /app_Armado/admin.py | from django.contrib import admin
from .models import Solicitud , Producto, Pedido
# Register your models here.
admin.site.register(Solicitud)
admin.site.register(Producto)
admin.site.register(Pedido) | {"/app_Armado/urls.py": ["/app_Armado/views.py"], "/app_Armado/quickstart/serializers.py": ["/app_Armado/models.py"], "/app_Armado/quickstart/viewss.py": ["/app_Armado/models.py", "/app_Armado/quickstart/serializers.py"], "/app_Armado/views.py": ["/app_Armado/models.py"], "/app_Armado/admin.py": ["/app_Armado/models.py"]} |
52,512 | jonathanavalosescobar/mysite | refs/heads/master | /app_Armado/migrations/0009_producto_imagen.py | # Generated by Django 2.1.3 on 2019-11-28 02:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_Armado', '0008_auto_20191110_1123'),
]
operations = [
migrations.AddField(
model_name='producto',
name='imagen',
field=models.ImageField(null=True, upload_to='Productos'),
),
]
| {"/app_Armado/urls.py": ["/app_Armado/views.py"], "/app_Armado/quickstart/serializers.py": ["/app_Armado/models.py"], "/app_Armado/quickstart/viewss.py": ["/app_Armado/models.py", "/app_Armado/quickstart/serializers.py"], "/app_Armado/views.py": ["/app_Armado/models.py"], "/app_Armado/admin.py": ["/app_Armado/models.py"]} |
52,527 | McTuTuM/x_or_o1 | refs/heads/master | /Window.py | from PyQt5 import QtWidgets, QtCore
from PyQt5.QtWidgets import QGraphicsScene, QGraphicsView, QWidget
from x_or_o import QtWidgets, Ui_MainWindow
class Window(Ui_MainWindow, QtWidgets.QMainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
self.setupUi(self)
def button_click(self):
self.pushButton.clicked.connect(lambda: self.grapView(self))
def grapView(self):
scene = QGraphicsScene()
scene.addText('Hi')
view = QGraphicsView(scene)
view.show()
| {"/main.py": ["/Window.py"]} |
52,528 | McTuTuM/x_or_o1 | refs/heads/master | /main.py | from Window import Window
import sys
from PyQt5 import QtWidgets
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
MainWindow = Window()
# ui = Ui_MainWindow()
# # ui.setupUi(MainWindow)
MainWindow.show()
# h = ui.graphicsView.height()
# w = ui.graphicsView.width()
sys.exit(app.exec_()) | {"/main.py": ["/Window.py"]} |
52,529 | McTuTuM/x_or_o1 | refs/heads/master | /general.py | import pygame
import sys
from pygame.color import THECOLORS
import math
from x_or_o import Ui_MainWindow
from PyQt5 import QtWidgets
pygame.init
class General():
def __init__(self):
Ui_MainWindow.__init__(self)
self.loc = None
self.loc1start = None
self.loc1end = None
self.loc2start = None
self.loc2end = None
self.button_event()
game_over = False
height = 300
width = 300
grid = 3
size = math.ceil((width * 0.02) / ((grid) / 2))
screen = pygame.display.set_mode((width, height))
def location1(self, event, i, j):
if (
(i - 1) / self.grid * self.width < event.pos[0] < i / self.grid * self.width and
(j - 1) / self.grid * self.height < event.pos[1] < j / self.grid * self.height
):
start_line_x = (i - 1)/ self.grid * self.width + 2 / 30 * (1 / self.grid * self.width)
end_line_x = start_line_x + 26 / 30 * (1 / self.grid *self.width)
start_line_y = (j - 1)/ self.grid * self.height + 2 / 30 * (1 / self.grid * self.height)
end_line_y = start_line_y + 26 / 30 * (1 / self.grid * self.height)
loc1start = (start_line_x, start_line_y)
loc1end = (end_line_x, end_line_y)
loc2start = (end_line_x, start_line_y)
loc2end = (start_line_x, end_line_y)
loc = (
(i - 1)/ self.grid * self.width + 1 / 2 * (1 / self.grid * self.width),
(j - 1)/ self.grid * self.height + 1 / 2 * (1 / self.grid * self.height)
)
return loc, loc1start, loc1end, loc2start, loc2end
def start(self):
General.field()
arr = []
General.game_over = False
Box.x = True
for i in range(General.grid):
arr2 = []
for j in range(self.grid):
arr2.append(Box())
arr.append(arr2)
while not General.game_over:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
for i in range(self.grid):
for j in range(self.grid):
data = self.location1(event, i + 1, j + 1)
if data is not None:
arr[i][j].check(*data)
self.win_or_lose(arr)
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
pygame.display.flip()
self.start()
def win_or_lose(self, arr):
s = 0
for i in arr:
if all(tuple(j.memory == 1 for j in i )):
print('win o')
General.game_over = True
elif all(tuple(j.memory == 2 for j in i )):
print('win x')
General.game_over = True
for j in range(General.grid):
for i in range(General.grid):
if all(tuple(arr[i] [j].memory == 1 for i in range(General.grid))):
print('win o')
General.game_over = True
elif all(tuple(arr[i] [j].memory == 2 for i in range(General.grid))):
print('win x')
General.game_over = True
if all(arr[j][j].memory == 1 for j in range(General.grid)):
print('o win')
General.game_over = True
elif all(arr[j][j].memory == 2 for j in range(General.grid)):
print('x win')
General.game_over = True
if all(arr[j][General.grid - 1 - j].memory == 1 for j in range(General.grid)):
print('o win')
General.game_over = True
elif all(arr[j][General.grid - 1 - j].memory == 2 for j in range(General.grid)):
print('x win')
General.game_over = True
for i in range(General.grid):
for j in range(General.grid):
if arr[i] [j].memory > 0 :
s += 1
if s == self.grid ** 2:
print("nothink")
General.game_over = True
def button_event(self):
pass
@staticmethod
def field():
General.screen.fill(THECOLORS['white'])
for i in range(1, General.grid):
pygame.draw.line(General.screen, THECOLORS['black'], ((i / General.grid * General.width, 0)),((i / General.grid * General.width, General.height)), General.size)
pygame.draw.line(General.screen, THECOLORS['black'], ((0, i / General.grid * General.height)),((General.width, i / General.grid * General.height)), General.size)
class Box():
def __init__(self):
self.incorrect = False
self.memory = 0
x = True
def x_or_o(self, loc, loc1start, loc1end, loc2start, loc2end):
Box.x = not Box.x
if Box.x:
self.figure_1(loc)
self.memory = 1
else:
self.figure_2(loc1start, loc1end, loc2start, loc2end)
self.memory = 2
def check(self, loc, loc1start, loc1end, loc2start, loc2end):
if not self.incorrect:
self.x_or_o(loc, loc1start, loc1end, loc2start, loc2end)
self.incorrect = True
else:
print("---")
def figure_1(self, loc):
pygame.draw.circle(General.screen, THECOLORS["black"], loc, round(1 / (2.5 * General.grid) * min(General.height, General.width)), General.size)
def figure_2(self, loc1start, loc1end, loc2start, loc2end):
pygame.draw.line(General.screen, THECOLORS["black"], loc1start, loc1end, General.size)
pygame.draw.line(General.screen, THECOLORS["black"], loc2start, loc2end, General.size)
if __name__ == "__main__":
start = General()
start.start() | {"/main.py": ["/Window.py"]} |
52,558 | tina315/school-manage | refs/heads/master | /src/test_assert.py | # -*- coding:utf-8 -*-
# @Time : 2020/11/29 17:27
# @Author : xiangxuan
import pytest
# 测试相等
def test_add():
assert 3 + 4 == 7
# 测试不相等
def test_add2():
assert 27 + 22 != 50
# 测试大于等于
def test_add3():
assert 27 + 22 <= 50
# 测试小于等于
def test_add4():
assert 27 + 22 >= 50
# 测试大于
def test_add5():
assert 27 + 22 < 50
# 测试小于
def test_add6():
assert 27 + 22 > 50
def test_in():
a = "hello world"
b = "he"
assert b in a
# 测试不相等
def test_not_in():
a = "hello world"
b = "hi"
assert b not in a
if __name__ == '__main__':
pytest.main(["-s"])
| {"/src/test_01.py": ["/locallib/add_tags.py"]} |
52,559 | tina315/school-manage | refs/heads/master | /src/test_para.py | # -*- coding:utf-8 -*-
# @Time : 2020/11/29 17:25
# @Author : xiangxuan
import pytest
def value():
pa = [
(2, 2),
(5, 3),
(4, 4)
]
return pa
@pytest.mark.parametrize("a, b", value())
def test_01(a, b):
print(a)
print(b)
assert a == b
def test_02():
"""
这里会有一个坑,想法是测三种场景,实际第二种场景报错后不会运行第三种情况,建议使用上面的参数化方式。
"""
x = [
(2, 2),
(5, 3),
(4, 4)
]
for a, b in x:
assert a == b
# 可以在参数内使用高级用法
@pytest.mark.parametrize("a, b", [(2, 2), (5, 3), (4, 4), pytest.param(42, 42, marks=pytest.mark.xfail)])
def test(a, b):
print(a)
print(b)
assert a == b
if __name__ == '__main__':
pytest.main(['-s'])
| {"/src/test_01.py": ["/locallib/add_tags.py"]} |
52,560 | tina315/school-manage | refs/heads/master | /src/__init__.py | #-*- coding:utf-8 -*-
# @Time : 2020/11/29 16:34
# @Author : xiangxuan | {"/src/test_01.py": ["/locallib/add_tags.py"]} |
52,561 | tina315/school-manage | refs/heads/master | /test_data/__init__.py | #-*- coding:utf-8 -*-
# @Time : 2020/11/30 7:19
# @Author : xiangxuan | {"/src/test_01.py": ["/locallib/add_tags.py"]} |
52,562 | tina315/school-manage | refs/heads/master | /config/__init__.py | #-*- coding:utf-8 -*-
# @Time : 2020/11/30 7:18
# @Author : xiangxuan | {"/src/test_01.py": ["/locallib/add_tags.py"]} |
52,563 | tina315/school-manage | refs/heads/master | /src/test_setup_teardown.py | # -*- coding:utf-8 -*-
# @Time : 2020/11/29 16:34
# @Author : xiangxuan
import pytest
def setup_module(module):
"""
module级别的setup,它会在本module(test.py)里
所有test执行之前,被调用一次。
注意,它是直接定义为一个module里的函数
"""
print("#### setup before module")
def teardown_module(module):
"""
module级别的teardown,它会在本module(test.py)里
所有test执行完成之后,被调用一次。
注意,它是直接定义为一个module里的函数
"""
print("#### teardown after module")
def setup_function(function):
"""
功能函数的始末
每一个函数方法前
"""
print("#### setup_function")
def teardown_function(function):
"""
功能函数的始末
每一个函数方法后
"""
print("#### teardown_function")
def test_case001():
assert 1 == 1
print("#### test_case001")
def test_case002():
assert 1 == 1
print("#### test_case002")
# 注意以类方法形式编写用例时,请不要带__init__方法
class TestSohu(object):
@classmethod
def setup_class(cls):
"""
class级别的setup函数,它会在这个测试类TestSohu里
所有test执行之前,被调用一次.
注意它是一个@classmethod
"""
print("~~~~ setup before class TestSohu")
@classmethod
def teardown_class(cls):
"""
class级别的teardown函数,它会在这个测试
类TestSohu里所有test执行完之后,被调用一次.
注意它是一个@classmethod
"""
print("~~~~ teardown after class TestSohu")
def setup_method(self, method):
"""
这是一个method级别的setup函数,它会在这个测试
类TestSohu里,每一个test执行之前,被调用一次.
"""
print("~~~~ setup before each method")
def teardown_method(self, method):
"""
这是一个method级别的teardown函数,它会在这个测试
类TestSohu里,每一个test执行之后,被调用一次.
"""
print("~~~~ teardown after each method")
def test_login(self):
assert True == True
print("~~~~ test_login")
def test_logout(self):
assert True == True
print("~~~~ test_logout")
if __name__ == '__main__':
pytest.main(['-s'])
| {"/src/test_01.py": ["/locallib/add_tags.py"]} |
52,564 | tina315/school-manage | refs/heads/master | /locallib/add_tags.py | #-*- coding:utf-8 -*-
# @Time : 2020/11/30 7:29
# @Author : xiangxuan
import time
import allure
def add_tag(tag_name):
"""
添加一个标签
:param tag_name: 标签名字
:return:
"""
localtime = time.asctime(time.localtime(time.time()))
with allure.step("根据时间创建一个标签,标签名字是:" + tag_name):
allure.attach("添加结果:成功")
| {"/src/test_01.py": ["/locallib/add_tags.py"]} |
52,565 | tina315/school-manage | refs/heads/master | /main.py | # test jenkins auto build
import time
import os
import pytest
localtime = time.asctime( time.localtime(time.time()) )
print(localtime)
work_runner = []
work_runner.append("-s")
case_mark = 'auto'
if case_mark:
work_runner.append("-m=%s" % case_mark)
work_runner.append("--alluredir")
work_runner.append("report")
print(work_runner)
pytest.main(work_runner)
print(os.path.abspath(__file__)) | {"/src/test_01.py": ["/locallib/add_tags.py"]} |
52,566 | tina315/school-manage | refs/heads/master | /src/test_01.py | # -*- coding: utf-8 -*-
# @Time : 2020/11/27 06:40
# @Author : hexiangxuan
# @File : test_case.py
import allure
import pytest
from locallib.add_tags import add_tag
@allure.epic('自动化用例')
@allure.feature('大模块')
@allure.story('小模块')
@pytest.mark.auto
class TestOne:
def setup_class(self):
pass
@allure.title("获取标签数据信息")
@allure.severity("critical")
@pytest.mark.critical
def test_case_01(self):
"""
author:xiangxuan
describe: 获取标签数据信息
"""
add_tag('网购达人')
assert 0 == 0
@allure.title("新增标签")
@allure.severity("normal")
@pytest.mark.normal
@allure.issue('https://www.baidu.com', '百度首页')
def test_case_02(self):
"""
作者:xiangxuan
描述:新增一个标签
"""
assert 0 == 0
if __name__ == '__main__':
pytest.main(['-s', '-q', '--alluredir', './report/xml'])
| {"/src/test_01.py": ["/locallib/add_tags.py"]} |
52,567 | tina315/school-manage | refs/heads/master | /conftest.py | #-*- coding:utf-8 -*-
# @Time : 2020/11/14 7:52
# @Author : xiangxuan | {"/src/test_01.py": ["/locallib/add_tags.py"]} |
52,579 | apache/yetus | refs/heads/main | /releasedocmaker/src/main/python/releasedocmaker/getversions.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Handle versions in JIRA """
import copy
import http.client
import json
import logging
import re
import sys
import urllib.error
from .utils import get_jira
class ReleaseVersion:
''' a very simple version handler '''
def __init__(self, version=None):
self.rawversion = version
self.rawcomponents = re.split('[ \\.]', version)
self.intcomponents = []
for value in self.rawcomponents:
try:
self.intcomponents.append(int(value))
except ValueError:
try:
self.intcomponents.append(int(value[1:]))
except ValueError:
self.intcomponents.append(-1)
def __repr__ (self):
return f"ReleaseVersion ('{str(self)}')"
def __str__(self):
return self.rawversion
def __lt__(self, cmpver): # pylint: disable=too-many-return-statements
if isinstance(cmpver, (int,str)):
cmpver = ReleaseVersion(cmpver)
# shortcut
if self.rawversion == cmpver.rawversion:
return False
srcver = copy.deepcopy(self)
if len(srcver.rawcomponents) < len(cmpver.rawcomponents):
for index in range(0, len(cmpver.rawcomponents)):
srcver.rawcomponents.append('0')
srcver.intcomponents.append(0)
for index, rawvalue in enumerate(srcver.rawcomponents): # pylint: disable=unused-variable
if index+1 > len(cmpver.rawcomponents):
cmpver.rawcomponents.append('0')
cmpver.intcomponents.append(0)
intvalue = srcver.intcomponents[index]
if intvalue == -1 or cmpver.intcomponents[index] == -1:
return self.rawversion < cmpver.rawversion
if intvalue < cmpver.intcomponents[index]:
return True
if intvalue > cmpver.intcomponents[index]:
return False
return False
class GetVersions: # pylint: disable=too-few-public-methods
""" List of version strings """
def __init__(self, versions, projects, jira_base_url):
self.userversions = sorted(versions, key=ReleaseVersion)
logging.info("Looking for %s through %s", self.userversions[0],
self.userversions[-1])
serverversions = set()
for project in projects:
url = f"{jira_base_url}/rest/api/2/project/{project.upper()}/versions"
try:
resp = get_jira(url)
except (urllib.error.HTTPError, urllib.error.URLError,
http.client.BadStatusLine):
sys.exit(1)
datum = json.loads(resp.read())
for data in datum:
serverversions.add(data['name'])
serverversions = sorted(serverversions, key=ReleaseVersion)
combolist = serverversions + self.userversions
comboset = set(combolist)
combolist = sorted(comboset, key=ReleaseVersion)
start_index = combolist.index(self.userversions[0])
end_index = combolist.index(self.userversions[-1])
self.versions = []
for candidate in combolist[start_index:end_index+1]:
if candidate in serverversions:
self.versions.append(candidate)
logging.info('Adding %s to the list', candidate)
def getlist(self):
""" Get the list of versions """
return self.versions
| {"/releasedocmaker/src/main/python/releasedocmaker/getversions.py": ["/releasedocmaker/src/main/python/releasedocmaker/utils.py"], "/releasedocmaker/src/main/python/releasedocmaker/__init__.py": ["/releasedocmaker/src/main/python/releasedocmaker/getversions.py", "/releasedocmaker/src/main/python/releasedocmaker/jira.py", "/releasedocmaker/src/main/python/releasedocmaker/utils.py"], "/releasedocmaker/src/main/python/releasedocmaker/jira.py": ["/releasedocmaker/src/main/python/releasedocmaker/utils.py"]} |
52,580 | apache/yetus | refs/heads/main | /shelldocs/src/main/python/shelldocs.py | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" process bash scripts and generate documentation from them """
# Do this immediately to prevent compiled forms
import logging
import os
import pathlib
import re
import sys
from argparse import ArgumentParser
sys.dont_write_bytecode = True
ASFLICENSE = '''
<!---
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-->
'''
class ShellFunction: # pylint: disable=too-many-instance-attributes
"""a shell function"""
def __init__(self, filename='Unknown'):
'''Initializer'''
self.audience = ''
self.description = []
self.filename = filename
self.linenum = 0
self.name = ''
self.params = []
self.replacebool = False
self.replacerawtext = ''
self.replacetext = 'Not Replaceable'
self.returnt = []
self.stability = ''
def __lt__(self, other):
'''comparison'''
if self.audience == other.audience:
if self.stability == other.stability:
if self.replacebool == other.replacebool:
return self.name < other.name
if self.replacebool:
return True
else:
if self.stability == "Stable":
return True
else:
if self.audience == "Public":
return True
return False
def header(self):
'''get the header for this function'''
return f"{self.audience}/{self.stability}/{self.replacetext}"
def getdocpage(self):
'''get the built document page for this function'''
params = " ".join(self.params)
usage = f"{self.name} {params}"
description = "\n".join(self.description)
if not self.returnt:
returntext = 'Nothing'
else:
returntext = "\n".join(self.returnt)
return (f"### `{self.name}`\n\n"
"* Synopsis\n\n"
f"```\n{usage}\n"
"```\n\n"
"* Description\n\n"
f"{description}\n\n"
"* Returns\n\n"
f"{returntext}\n\n"
"| Classification | Level |\n"
"| :--- | :--- |\n"
f"| Audience | {self.audience} |\n"
f"| Stability | {self.stability} |\n"
f"| Replaceable | {self.replacebool} |\n\n")
def isprivateandnotreplaceable(self):
''' is this function Private and not replaceable? '''
return self.audience == "Private" and not self.replacebool
def lint(self):
'''Lint this function'''
validvalues = {
"audience": ("Public", "Private"),
"stability": ("Stable", "Evolving"),
"replacerawtext": ("yes", "no"),
}
for attribute, attrvalues in validvalues.items():
value = getattr(self, attribute)
if (not value or value == '') and attribute != 'replacerawtext':
logging.error("%s:%u:ERROR: function %s has no @%s",
self.filename, self.linenum, self.name,
attribute.lower())
elif value not in attrvalues:
if attribute == 'replacerawtext' and value == '':
continue
validvalue = "|".join(v.lower() for v in attrvalues)
logging.error(
"%s:%d:ERROR: function %s has invalid value (%s) for @%s (%s)",
self.filename, self.linenum, self.name, value.lower(),
attribute.lower().replace('rawtext', 'able'), validvalue)
def __str__(self):
'''Generate a string for this function'''
return f"{{{self.name} {self.audience} {self.stability} {self.replacebool}}}"
class ProcessFile:
''' shell file processor '''
FUNCTIONRE = re.compile(r"^(\w+) *\(\) *{")
def __init__(self, filename=None, skipsuperprivate=False):
self.filename = filename
self.functions = []
self.skipsuperprivate = skipsuperprivate
def isignored(self):
"""Checks for the presence of the marker(SHELLDOC-IGNORE) to ignore the file.
Marker needs to be in a line of its own and can not
be an inline comment.
A leading '#' and white-spaces(leading or trailing)
are trimmed before checking equality.
Comparison is case sensitive and the comment must be in
UPPERCASE.
"""
with open(self.filename) as input_file: #pylint: disable=unspecified-encoding
for line in input_file:
if line.startswith(
"#") and line[1:].strip() == "SHELLDOC-IGNORE":
return True
return False
@staticmethod
def _docstrip(key, dstr):
'''remove extra spaces from shelldoc phrase'''
dstr = re.sub(f"^## @{key} ", "", dstr)
dstr = dstr.strip()
return dstr
def _process_description(self, funcdef, text=None):
if not text:
funcdef.description = []
return
funcdef.description.append(self._docstrip('description', text))
def _process_audience(self, funcdef, text=None):
'''set the audience of the function'''
if not text:
return
funcdef.audience = self._docstrip('audience', text)
funcdef.audience = funcdef.audience.capitalize()
def _process_stability(self, funcdef, text=None):
'''set the stability of the function'''
if not text:
return
funcdef.stability = self._docstrip('stability', text)
funcdef.stability = funcdef.stability.capitalize()
def _process_replaceable(self, funcdef, text=None):
'''set the replacement state'''
if not text:
return
funcdef.replacerawtext = self._docstrip("replaceable", text)
if funcdef.replacerawtext in ['yes', 'Yes', 'true', 'True']:
funcdef.replacebool = True
else:
funcdef.replacebool = False
if funcdef.replacebool:
funcdef.replacetext = 'Replaceable'
else:
funcdef.replacetext = 'Not Replaceable'
def _process_param(self, funcdef, text=None):
'''add a parameter'''
if not text:
funcdef.params = []
return
funcdef.params.append(self._docstrip('param', text))
def _process_return(self, funcdef, text=None):
'''add a return value'''
if not text:
funcdef.returnt = []
return
funcdef.returnt.append(self._docstrip('return', text))
@staticmethod
def _process_function(funcdef, text=None, linenum=1):
'''set the name of the function'''
if ProcessFile.FUNCTIONRE.match(text):
definition = ProcessFile.FUNCTIONRE.match(text).groups()[0]
else:
definition = text.split()[1]
funcdef.name = definition.replace("(", "").replace(")", "")
funcdef.linenum = linenum
def process_file(self):
""" stuff all of the functions into an array """
self.functions = []
mapping = {
'## @description': '_process_description',
'## @audience': '_process_audience',
'## @stability': '_process_stability',
'## @replaceable': '_process_replaceable',
'## @param': '_process_param',
'## @return': '_process_return',
}
if self.isignored():
return
try:
with open(self.filename, "r") as shellcode: #pylint: disable=unspecified-encoding
# if the file contains a comment containing
# only "SHELLDOC-IGNORE" then skip that file
funcdef = ShellFunction(self.filename)
linenum = 0
for line in shellcode:
linenum = linenum + 1
for text, method in mapping.items():
if line.startswith(text):
getattr(self, method)(funcdef, text=line)
if line.startswith(
'function') or ProcessFile.FUNCTIONRE.match(line):
self._process_function(funcdef,
text=line,
linenum=linenum)
if self.skipsuperprivate and funcdef.isprivateandnotreplaceable(
):
pass
else:
self.functions.append(funcdef)
funcdef = ShellFunction(self.filename)
except OSError as err:
logging.error("ERROR: Failed to read from file: %s. Skipping.",
err.filename)
self.functions = []
class MarkdownReport:
''' generate a markdown report '''
def __init__(self, functions, filename=None):
self.filename = filename
self.filepath = pathlib.Path(self.filename)
if functions:
self.functions = sorted(functions)
else:
self.functions = None
def write_tableofcontents(self, fhout):
'''build a table of contents'''
header = None
for function in self.functions:
if header != function.header():
header = function.header()
fhout.write(f" * {header}\n")
markdownsafename = function.name.replace("_", r"\_")
fhout.write(f" * [{markdownsafename}](#{function.name})\n")
def write_output(self):
""" write the markdown file """
self.filepath.parent.mkdir(parents=True, exist_ok=True)
with open(self.filename, "w", encoding='utf-8') as outfile:
outfile.write(ASFLICENSE)
self.write_tableofcontents(outfile)
outfile.write("\n------\n\n")
header = []
for function in self.functions:
if header != function.header():
header = function.header()
outfile.write(f"## {header}\n")
outfile.write(function.getdocpage())
def process_input(inputlist, skipprnorep):
""" take the input and loop around it """
def call_process_file(filename, skipsuperprivate):
''' handle building a ProcessFile '''
fileprocessor = ProcessFile(filename=filename,
skipsuperprivate=skipsuperprivate)
fileprocessor.process_file()
return fileprocessor.functions
allfuncs = []
for inputname in inputlist:
if pathlib.Path(inputname).is_dir():
for dirpath, dirnames, filenames in os.walk(inputname): #pylint: disable=unused-variable
for fname in filenames:
if fname.endswith('sh'):
allfuncs = allfuncs + call_process_file(
filename=pathlib.Path(dirpath).joinpath(fname),
skipsuperprivate=skipprnorep)
else:
allfuncs = allfuncs + call_process_file(
filename=inputname, skipsuperprivate=skipprnorep)
if allfuncs is None:
logging.error("ERROR: no functions found.")
sys.exit(1)
allfuncs = sorted(allfuncs)
return allfuncs
def getversion():
""" print the version file"""
basepath = pathlib.Path(__file__).parent.resolve()
for versionfile in [
basepath.resolve().joinpath('VERSION'),
basepath.parent.resolve().joinpath('VERSION')
]:
if versionfile.exists():
with open(versionfile, encoding='utf-8') as ver_file:
version = ver_file.read()
return version
mvnversion = basepath.parent.parent.parent.parent.resolve().joinpath(
'.mvn', 'maven.config')
if mvnversion.exists():
with open(mvnversion, encoding='utf-8') as ver_file:
return ver_file.read().split('=')[1].strip()
return 'Unknown'
def process_arguments():
''' deal with parameters '''
parser = ArgumentParser(
prog='shelldocs',
epilog="You can mark a file to be ignored by shelldocs by adding"
" 'SHELLDOC-IGNORE' as comment in its own line. " +
"--input may be given multiple times.")
parser.add_argument("-o",
"--output",
dest="outfile",
action="store",
type=str,
help="file to create",
metavar="OUTFILE")
parser.add_argument("-i",
"--input",
dest="infile",
action="append",
type=str,
help="file to read",
metavar="INFILE")
parser.add_argument("--skipprnorep",
dest="skipprnorep",
action="store_true",
help="Skip Private & Not Replaceable")
parser.add_argument("--lint",
dest="lint",
action="store_true",
help="Enable lint mode")
parser.add_argument(
"-V",
"--version",
dest="release_version",
action="store_true",
default=False,
help="display version information for shelldocs and exit.")
options = parser.parse_args()
if options.release_version:
print(getversion())
sys.exit(0)
if options.infile is None:
parser.error("At least one input file needs to be supplied")
elif options.outfile is None and options.lint is None:
parser.error(
"At least one of output file and lint mode needs to be specified")
return options
def main():
'''main entry point'''
logging.basicConfig(format='%(message)s')
options = process_arguments()
allfuncs = process_input(options.infile, options.skipprnorep)
if options.lint:
for funcs in allfuncs:
funcs.lint()
if options.outfile:
mdreport = MarkdownReport(allfuncs, filename=options.outfile)
mdreport.write_output()
if __name__ == "__main__":
main()
| {"/releasedocmaker/src/main/python/releasedocmaker/getversions.py": ["/releasedocmaker/src/main/python/releasedocmaker/utils.py"], "/releasedocmaker/src/main/python/releasedocmaker/__init__.py": ["/releasedocmaker/src/main/python/releasedocmaker/getversions.py", "/releasedocmaker/src/main/python/releasedocmaker/jira.py", "/releasedocmaker/src/main/python/releasedocmaker/utils.py"], "/releasedocmaker/src/main/python/releasedocmaker/jira.py": ["/releasedocmaker/src/main/python/releasedocmaker/utils.py"]} |
52,581 | apache/yetus | refs/heads/main | /releasedocmaker/src/main/python/releasedocmaker/__init__.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Generate releasenotes based upon JIRA """
import errno
import http.client
import json
import logging
import os
import pathlib
import re
import shutil
import sys
import urllib.error
import urllib.parse
import urllib.request
from glob import glob
from argparse import ArgumentParser
from time import gmtime, strftime, sleep
sys.dont_write_bytecode = True
# pylint: disable=wrong-import-position
from .getversions import GetVersions, ReleaseVersion
from .jira import (Jira, JiraIter, Linter, RELEASE_VERSION, SORTTYPE,
SORTORDER, BACKWARD_INCOMPATIBLE_LABEL, NUM_RETRIES)
from .utils import get_jira, to_unicode, sanitize_text, processrelnote, Outputs
# pylint: enable=wrong-import-position
# These are done in order of preference as to which one seems to be
# more up-to-date at any given point in time. And yes, it is
# ironic that packaging is usually the last one to be
# correct.
EXTENSION = '.md'
ASF_LICENSE = '''
<!---
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-->
'''
def indexbuilder(title, asf_license, format_string):
"""Write an index file for later conversion using mvn site"""
versions = glob("*[0-9]*.[0-9]*")
versions = sorted(versions, reverse=True, key=ReleaseVersion)
with open("index" + EXTENSION, "w", encoding='utf-8') as indexfile:
if asf_license is True:
indexfile.write(ASF_LICENSE)
for version in versions:
indexfile.write(f"* {title} v{version}\n")
for k in ("Changelog", "Release Notes"):
indexfile.write(
format_string %
(k, version, k.upper().replace(" ", ""), version))
def buildprettyindex(title, asf_license):
"""Write an index file for later conversion using middleman"""
indexbuilder(title, asf_license, " * [%s](%s/%s.%s)\n")
def buildindex(title, asf_license):
"""Write an index file for later conversion using mvn site"""
indexbuilder(title, asf_license, " * [%s](%s/%s.%s.html)\n")
def buildreadme(title, asf_license):
"""Write an index file for Github using README.md"""
versions = glob("[0-9]*.[0-9]*")
versions = sorted(versions, reverse=True, key=ReleaseVersion)
with open("README.md", "w", encoding='utf-8') as indexfile:
if asf_license is True:
indexfile.write(ASF_LICENSE)
for version in versions:
indexfile.write(f"* {title} v{version}\n")
for k in ("Changelog", "Release Notes"):
indexfile.write(
f" * [{k}]({version}/{k.upper().replace(' ', '')}.{version}{EXTENSION})\n"
)
def getversion():
""" print the version file"""
basepath = pathlib.Path(__file__).parent.resolve()
for versionfile in [
basepath.resolve().joinpath('VERSION'),
basepath.parent.parent.resolve().joinpath('VERSION')
]:
if versionfile.exists():
with open(versionfile, encoding='utf-8') as ver_file:
version = ver_file.read()
return version
mvnversion = basepath.parent.parent.parent.parent.parent.resolve(
).joinpath('.mvn', 'maven.config')
if mvnversion.exists():
with open(mvnversion, encoding='utf-8') as ver_file:
return ver_file.read().split('=')[1].strip()
return 'Unknown'
def parse_args(): # pylint: disable=too-many-branches
"""Parse command-line arguments with optparse."""
parser = ArgumentParser(
prog='releasedocmaker',
epilog="--project and --version may be given multiple times.")
parser.add_argument("--dirversions",
dest="versiondirs",
action="store_true",
default=False,
help="Put files in versioned directories")
parser.add_argument("--empty",
dest="empty",
action="store_true",
default=False,
help="Create empty files when no issues")
parser.add_argument(
"--extension",
dest="extension",
default=EXTENSION,
type=str,
help="Set the file extension of created Markdown files")
parser.add_argument("--fileversions",
dest="versionfiles",
action="store_true",
default=False,
help="Write files with embedded versions")
parser.add_argument("-i",
"--index",
dest="index",
action="store_true",
default=False,
help="build an index file")
parser.add_argument("-l",
"--license",
dest="license",
action="store_true",
default=False,
help="Add an ASF license")
parser.add_argument("-p",
"--project",
dest="projects",
action="append",
type=str,
help="projects in JIRA to include in releasenotes",
metavar="PROJECT")
parser.add_argument("--prettyindex",
dest="prettyindex",
action="store_true",
default=False,
help="build an index file with pretty URLs")
parser.add_argument("-r",
"--range",
dest="range",
action="store_true",
default=False,
help="Given versions are a range")
parser.add_argument(
"--sortorder",
dest="sortorder",
metavar="TYPE",
default=SORTORDER,
# dec is supported for backward compatibility
choices=["asc", "dec", "desc", "newer", "older"],
help=f"Sorting order for sort type (default: {SORTORDER})")
parser.add_argument("--sorttype",
dest="sorttype",
metavar="TYPE",
default=SORTTYPE,
choices=["resolutiondate", "issueid"],
help=f"Sorting type for issues (default: {SORTTYPE})")
parser.add_argument(
"-t",
"--projecttitle",
dest="title",
type=str,
help="Title to use for the project (default is Apache PROJECT)")
parser.add_argument("-u",
"--usetoday",
dest="usetoday",
action="store_true",
default=False,
help="use current date for unreleased versions")
parser.add_argument("-v",
"--version",
dest="versions",
action="append",
type=str,
help="versions in JIRA to include in releasenotes",
metavar="VERSION")
parser.add_argument(
"-V",
dest="release_version",
action="store_true",
default=False,
help="display version information for releasedocmaker and exit.")
parser.add_argument(
"-O",
"--outputdir",
dest="output_directory",
action="append",
type=str,
help="specify output directory to put release docs to.")
parser.add_argument("-B",
"--baseurl",
dest="base_url",
action="append",
type=str,
default='https://issues.apache.org/jira',
help="specify base URL of the JIRA instance.")
parser.add_argument(
"--retries",
dest="retries",
action="append",
type=int,
help="Specify how many times to retry connection for each URL.")
parser.add_argument(
"--skip-credits",
dest="skip_credits",
action="store_true",
default=False,
help=
"While creating release notes skip the 'reporter' and 'contributor' columns"
)
parser.add_argument(
"-X",
"--incompatiblelabel",
dest="incompatible_label",
default="backward-incompatible",
type=str,
help="Specify the label to indicate backward incompatibility.")
Linter.add_parser_options(parser)
if len(sys.argv) <= 1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
# Handle the version string right away and exit
if options.release_version:
logging.info(getversion())
sys.exit(0)
# Validate options
if not options.release_version:
if options.versions is None:
parser.error("At least one version needs to be supplied")
if options.projects is None:
parser.error("At least one project needs to be supplied")
if options.base_url is None:
parser.error("Base URL must be defined")
if options.output_directory is not None:
if len(options.output_directory) > 1:
parser.error("Only one output directory should be given")
else:
options.output_directory = options.output_directory[0]
if options.range or len(options.versions) > 1:
if not options.versiondirs and not options.versionfiles:
parser.error(
"Multiple versions require either --fileversions or --dirversions"
)
return options
def generate_changelog_line_md(base_url, jira):
''' take a jira object and generate the changelog line in md'''
sani_jira_id = sanitize_text(jira.get_id())
sani_prio = sanitize_text(jira.get_priority())
sani_summ = sanitize_text(jira.get_summary())
line = f'* [{sani_jira_id}](' + f'{base_url}/browse/{sani_jira_id})'
line += f' | *{sani_prio}* | **{sani_summ}**\n'
return line
def main(): # pylint: disable=too-many-statements, too-many-branches, too-many-locals
""" hey, it's main """
global BACKWARD_INCOMPATIBLE_LABEL #pylint: disable=global-statement
global SORTTYPE #pylint: disable=global-statement
global SORTORDER #pylint: disable=global-statement
global NUM_RETRIES #pylint: disable=global-statement
global EXTENSION #pylint: disable=global-statement
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
options = parse_args()
if options.output_directory is not None:
# Create the output directory if it does not exist.
try:
outputpath = pathlib.Path(options.output_directory).resolve()
outputpath.mkdir(parents=True, exist_ok=True)
except OSError as exc:
logging.error("Unable to create output directory %s: %s, %s",
options.output_directory, exc.errno, exc.strerror)
sys.exit(1)
os.chdir(options.output_directory)
if options.incompatible_label is not None:
BACKWARD_INCOMPATIBLE_LABEL = options.incompatible_label
if options.extension is not None:
EXTENSION = options.extension
projects = options.projects
if options.range is True:
versions = GetVersions(options.versions, projects,
options.base_url).getlist()
else:
versions = [ReleaseVersion(v) for v in options.versions]
versions = sorted(versions)
SORTTYPE = options.sorttype
SORTORDER = options.sortorder
if options.title is None:
title = projects[0]
else:
title = options.title
if options.retries is not None:
NUM_RETRIES = options.retries[0]
haderrors = False
for version in versions:
vstr = str(version)
linter = Linter(vstr, options)
jlist = sorted(JiraIter(options.base_url, vstr, projects))
if not jlist and not options.empty:
logging.warning(
"There is no issue which has the specified version: %s",
version)
continue
if vstr in RELEASE_VERSION:
reldate = RELEASE_VERSION[vstr]
elif options.usetoday:
reldate = strftime("%Y-%m-%d", gmtime())
else:
reldate = f"Unreleased (as of {strftime('%Y-%m-%d', gmtime())})"
if not os.path.exists(vstr) and options.versiondirs:
os.mkdir(vstr)
if options.versionfiles and options.versiondirs:
reloutputs = Outputs(
"%(ver)s/RELEASENOTES.%(ver)s%(ext)s",
"%(ver)s/RELEASENOTES.%(key)s.%(ver)s%(ext)s", [], {
"ver": version,
"date": reldate,
"title": title,
"ext": EXTENSION
})
choutputs = Outputs("%(ver)s/CHANGELOG.%(ver)s%(ext)s",
"%(ver)s/CHANGELOG.%(key)s.%(ver)s%(ext)s", [],
{
"ver": version,
"date": reldate,
"title": title,
"ext": EXTENSION
})
elif options.versiondirs:
reloutputs = Outputs("%(ver)s/RELEASENOTES%(ext)s",
"%(ver)s/RELEASENOTES.%(key)s%(ext)s", [], {
"ver": version,
"date": reldate,
"title": title,
"ext": EXTENSION
})
choutputs = Outputs("%(ver)s/CHANGELOG%(ext)s",
"%(ver)s/CHANGELOG.%(key)s%(ext)s", [], {
"ver": version,
"date": reldate,
"title": title,
"ext": EXTENSION
})
elif options.versionfiles:
reloutputs = Outputs("RELEASENOTES.%(ver)s%(ext)s",
"RELEASENOTES.%(key)s.%(ver)s%(ext)s", [], {
"ver": version,
"date": reldate,
"title": title,
"ext": EXTENSION
})
choutputs = Outputs("CHANGELOG.%(ver)s%(ext)s",
"CHANGELOG.%(key)s.%(ver)s%(ext)s", [], {
"ver": version,
"date": reldate,
"title": title,
"ext": EXTENSION
})
else:
reloutputs = Outputs("RELEASENOTES%(ext)s",
"RELEASENOTES.%(key)s%(ext)s", [], {
"ver": version,
"date": reldate,
"title": title,
"ext": EXTENSION
})
choutputs = Outputs("CHANGELOG%(ext)s", "CHANGELOG.%(key)s%(ext)s",
[], {
"ver": version,
"date": reldate,
"title": title,
"ext": EXTENSION
})
if options.license is True:
reloutputs.write_all(ASF_LICENSE)
choutputs.write_all(ASF_LICENSE)
relhead = '# %(title)s %(key)s %(ver)s Release Notes\n\n' \
'These release notes cover new developer and user-facing ' \
'incompatibilities, important issues, features, and major improvements.\n\n'
chhead = '# %(title)s Changelog\n\n' \
'## Release %(ver)s - %(date)s\n'\
'\n'
reloutputs.write_all(relhead)
choutputs.write_all(chhead)
incompatlist = []
importantlist = []
buglist = []
improvementlist = []
newfeaturelist = []
subtasklist = []
tasklist = []
testlist = []
otherlist = []
for jira in jlist:
if jira.get_incompatible_change():
incompatlist.append(jira)
elif jira.get_important():
importantlist.append(jira)
elif jira.get_type() == "Bug":
buglist.append(jira)
elif jira.get_type() == "Improvement":
improvementlist.append(jira)
elif jira.get_type() == "New Feature":
newfeaturelist.append(jira)
elif jira.get_type() == "Sub-task":
subtasklist.append(jira)
elif jira.get_type() == "Task":
tasklist.append(jira)
elif jira.get_type() == "Test":
testlist.append(jira)
else:
otherlist.append(jira)
line = generate_changelog_line_md(options.base_url, jira)
if jira.get_release_note() or \
jira.get_incompatible_change() or jira.get_important():
reloutputs.write_key_raw(jira.get_project(), "\n---\n\n")
reloutputs.write_key_raw(jira.get_project(), line)
if not jira.get_release_note():
line = '\n**WARNING: No release note provided for this change.**\n\n'
else:
line = f'\n{processrelnote(jira.get_release_note())}\n\n'
reloutputs.write_key_raw(jira.get_project(), line)
linter.lint(jira)
if linter.enabled:
if linter.had_errors():
logging.error(linter.message())
haderrors = True
if os.path.exists(vstr):
shutil.rmtree(vstr)
continue
reloutputs.write_all("\n\n")
reloutputs.close()
if options.skip_credits:
change_header21 = "| JIRA | Summary | Priority | " + \
"Component |\n"
change_header22 = "|:---- |:---- | :--- |:---- |\n"
else:
change_header21 = "| JIRA | Summary | Priority | " + \
"Component | Reporter | Contributor |\n"
change_header22 = "|:---- |:---- | :--- |:---- |:---- |:---- |\n"
if incompatlist:
choutputs.write_all("### INCOMPATIBLE CHANGES:\n\n")
choutputs.write_all(change_header21)
choutputs.write_all(change_header22)
choutputs.write_list(incompatlist, options.skip_credits,
options.base_url)
if importantlist:
choutputs.write_all("\n\n### IMPORTANT ISSUES:\n\n")
choutputs.write_all(change_header21)
choutputs.write_all(change_header22)
choutputs.write_list(importantlist, options.skip_credits,
options.base_url)
if newfeaturelist:
choutputs.write_all("\n\n### NEW FEATURES:\n\n")
choutputs.write_all(change_header21)
choutputs.write_all(change_header22)
choutputs.write_list(newfeaturelist, options.skip_credits,
options.base_url)
if improvementlist:
choutputs.write_all("\n\n### IMPROVEMENTS:\n\n")
choutputs.write_all(change_header21)
choutputs.write_all(change_header22)
choutputs.write_list(improvementlist, options.skip_credits,
options.base_url)
if buglist:
choutputs.write_all("\n\n### BUG FIXES:\n\n")
choutputs.write_all(change_header21)
choutputs.write_all(change_header22)
choutputs.write_list(buglist, options.skip_credits,
options.base_url)
if testlist:
choutputs.write_all("\n\n### TESTS:\n\n")
choutputs.write_all(change_header21)
choutputs.write_all(change_header22)
choutputs.write_list(testlist, options.skip_credits,
options.base_url)
if subtasklist:
choutputs.write_all("\n\n### SUB-TASKS:\n\n")
choutputs.write_all(change_header21)
choutputs.write_all(change_header22)
choutputs.write_list(subtasklist, options.skip_credits,
options.base_url)
if tasklist or otherlist:
choutputs.write_all("\n\n### OTHER:\n\n")
choutputs.write_all(change_header21)
choutputs.write_all(change_header22)
choutputs.write_list(otherlist, options.skip_credits,
options.base_url)
choutputs.write_list(tasklist, options.skip_credits,
options.base_url)
choutputs.write_all("\n\n")
choutputs.close()
if options.index:
buildindex(title, options.license)
buildreadme(title, options.license)
if options.prettyindex:
buildprettyindex(title, options.license)
if haderrors is True:
sys.exit(1)
if __name__ == "__main__":
main()
| {"/releasedocmaker/src/main/python/releasedocmaker/getversions.py": ["/releasedocmaker/src/main/python/releasedocmaker/utils.py"], "/releasedocmaker/src/main/python/releasedocmaker/__init__.py": ["/releasedocmaker/src/main/python/releasedocmaker/getversions.py", "/releasedocmaker/src/main/python/releasedocmaker/jira.py", "/releasedocmaker/src/main/python/releasedocmaker/utils.py"], "/releasedocmaker/src/main/python/releasedocmaker/jira.py": ["/releasedocmaker/src/main/python/releasedocmaker/utils.py"]} |
52,582 | apache/yetus | refs/heads/main | /precommit/src/main/shell/plugins.d/detsecrets_parse.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' helper app for detect-secrets to take the json and make it colon delimited '''
import json
import logging
import pathlib
import sys
hashdict = []
INPUTFILE = sys.argv[1]
INPUTPATH = pathlib.Path(INPUTFILE).resolve()
if len(sys.argv) == 3:
HASHFILE = sys.argv[2]
HASHPATH = pathlib.Path(HASHFILE).resolve()
if HASHPATH.exists():
with open(HASHPATH, encoding='utf-8') as filein:
while True:
line = filein.readline()
if not line:
break
if line.startswith('#'):
continue
hashdict.append(line.strip())
if not INPUTPATH.exists() or not INPUTPATH.is_file():
logging.error('%s does not exist or is not a file.', INPUTPATH)
sys.exit(1)
with open(INPUTFILE, encoding='utf-8') as filein:
rawdata = filein.read()
jsondata = json.loads(rawdata)
for filename, results in sorted(jsondata['results'].items(),
key=lambda x: x[0]):
for result in results:
linenumber = result['line_number']
resulttype = result['type']
hashsecret = result['hashed_secret']
if hashsecret in hashdict:
continue
print(f'{filename}:{linenumber}:{hashsecret}:{resulttype}')
| {"/releasedocmaker/src/main/python/releasedocmaker/getversions.py": ["/releasedocmaker/src/main/python/releasedocmaker/utils.py"], "/releasedocmaker/src/main/python/releasedocmaker/__init__.py": ["/releasedocmaker/src/main/python/releasedocmaker/getversions.py", "/releasedocmaker/src/main/python/releasedocmaker/jira.py", "/releasedocmaker/src/main/python/releasedocmaker/utils.py"], "/releasedocmaker/src/main/python/releasedocmaker/jira.py": ["/releasedocmaker/src/main/python/releasedocmaker/utils.py"]} |
52,583 | apache/yetus | refs/heads/main | /releasedocmaker/src/main/python/releasedocmaker/jira.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Handle JIRA Issues """
import http.client
import json
import logging
import re
import sys
import urllib.parse
import urllib.error
import time
try:
import dateutil.parser
except ImportError:
logging.error(
("This script requires python-dateutil module to be installed. "
"You can install it using:\n\t pip install python-dateutil"))
sys.exit(1)
from .utils import get_jira, to_unicode, sanitize_text
RELEASE_VERSION = {}
SORTTYPE = 'resolutiondate'
SORTORDER = 'older'
NUM_RETRIES = 5
# label to be used to mark an issue as Incompatible change.
BACKWARD_INCOMPATIBLE_LABEL = 'backward-incompatible'
class Jira:
"""A single JIRA"""
def __init__(self, data, parent):
self.key = data['key']
self.fields = data['fields']
self.parent = parent
self.notes = None
self.incompat = None
self.reviewed = None
self.important = None
def get_id(self):
""" get the Issue ID """
return to_unicode(self.key)
def get_description(self):
""" get the description """
return to_unicode(self.fields['description'])
def get_release_note(self):
""" get the release note field """
if self.notes is None:
field = self.parent.field_id_map['Release Note']
if field in self.fields:
self.notes = to_unicode(self.fields[field])
elif self.get_incompatible_change() or self.get_important():
self.notes = self.get_description()
else:
self.notes = ""
return self.notes
def get_priority(self):
""" Get the priority """
ret = ""
pri = self.fields['priority']
if pri is not None:
ret = pri['name']
return to_unicode(ret)
def get_assignee(self):
""" Get the assignee """
ret = ""
mid = self.fields['assignee']
if mid is not None:
ret = mid['displayName']
return to_unicode(ret)
def get_components(self):
""" Get the component(s) """
if self.fields['components']:
return ", ".join(
[comp['name'] for comp in self.fields['components']])
return ""
def get_summary(self):
""" Get the summary """
return self.fields['summary']
def get_type(self):
""" Get the Issue type """
ret = ""
mid = self.fields['issuetype']
if mid is not None:
ret = mid['name']
return to_unicode(ret)
def get_reporter(self):
""" Get the issue reporter """
ret = ""
mid = self.fields['reporter']
if mid is not None:
ret = mid['displayName']
return to_unicode(ret)
def get_project(self):
""" get the project """
ret = ""
mid = self.fields['project']
if mid is not None:
ret = mid['key']
return to_unicode(ret)
def __lt__(self, other):
if SORTTYPE == 'issueid':
# compare by issue name-number
selfsplit = self.get_id().split('-')
othersplit = other.get_id().split('-')
result = selfsplit[0] < othersplit[0]
if not result:
result = int(selfsplit[1]) < int(othersplit[1])
# dec is supported for backward compatibility
if SORTORDER in ['dec', 'desc']:
result = not result
elif SORTTYPE == 'resolutiondate':
dts = dateutil.parser.parse(self.fields['resolutiondate'])
dto = dateutil.parser.parse(other.fields['resolutiondate'])
result = dts < dto
if SORTORDER == 'newer':
result = not result
return result
def get_incompatible_change(self):
""" get incompatible flag """
if self.incompat is None:
field = self.parent.field_id_map['Hadoop Flags']
self.reviewed = False
self.incompat = False
if field in self.fields:
if self.fields[field]:
for flag in self.fields[field]:
if flag['value'] == "Incompatible change":
self.incompat = True
if flag['value'] == "Reviewed":
self.reviewed = True
else:
# Custom field 'Hadoop Flags' is not defined,
# search for 'backward-incompatible' label
field = self.parent.field_id_map['Labels']
if field in self.fields and self.fields[field]:
if BACKWARD_INCOMPATIBLE_LABEL in self.fields[field]:
self.incompat = True
self.reviewed = True
return self.incompat
def get_important(self):
""" get important flag """
if self.important is None:
field = self.parent.field_id_map['Flags']
self.important = False
if field in self.fields:
if self.fields[field]:
for flag in self.fields[field]:
if flag['value'] == "Important":
self.important = True
return self.important
class JiraIter:
"""An Iterator of JIRAs"""
@staticmethod
def collect_fields(jira_base_url):
"""send a query to JIRA and collect field-id map"""
try:
resp = get_jira(f"{jira_base_url}/rest/api/2/field")
data = json.loads(resp.read())
except (urllib.error.HTTPError, urllib.error.URLError,
http.client.BadStatusLine, ValueError) as error:
logging.error('Blew up trying to get a response: %s', error)
sys.exit(1)
field_id_map = {}
for part in data:
field_id_map[part['name']] = part['id']
return field_id_map
@staticmethod
def query_jira(jira_base_url, ver, projects, pos):
"""send a query to JIRA and collect
a certain number of issue information"""
count = 100
pjs = "','".join(projects)
jql = f"project in ('{pjs}') and fixVersion in ('{ver}') and resolution = Fixed"
params = urllib.parse.urlencode({
'jql': jql,
'startAt': pos,
'maxResults': count
})
return JiraIter.load_jira(jira_base_url, params, 0)
@staticmethod
def load_jira(jira_base_url, params, fail_count):
"""send query to JIRA and collect with retries"""
try:
resp = get_jira(f"{jira_base_url}/rest/api/2/search?{params}")
except (urllib.error.URLError, http.client.BadStatusLine) as err:
return JiraIter.retry_load(jira_base_url, err, params, fail_count)
try:
data = json.loads(resp.read())
except http.client.IncompleteRead as err:
return JiraIter.retry_load(jira_base_url, err, params, fail_count)
return data
@staticmethod
def retry_load(jira_base_url, err, params, fail_count):
"""Retry connection up to NUM_RETRIES times."""
logging.error(err)
fail_count += 1
if fail_count <= NUM_RETRIES:
logging.warning("Connection failed %s times. Retrying.",
fail_count)
time.sleep(1)
return JiraIter.load_jira(jira_base_url, params, fail_count)
logging.error("Connection failed %s times. Aborting.", fail_count)
sys.exit(1)
@staticmethod
def collect_jiras(jira_base_url, ver, projects):
"""send queries to JIRA and collect all issues
that belongs to given version and projects"""
jiras = []
pos = 0
end = 1
while pos < end:
data = JiraIter.query_jira(jira_base_url, ver, projects, pos)
if 'error_messages' in data:
logging.error("JIRA returns error message: %s",
data['error_messages'])
sys.exit(1)
pos = data['startAt'] + data['maxResults']
end = data['total']
jiras.extend(data['issues'])
if ver not in RELEASE_VERSION:
for issue in data['issues']:
for fix_version in issue['fields']['fixVersions']:
if 'releaseDate' in fix_version:
RELEASE_VERSION[fix_version['name']] = fix_version[
'releaseDate']
return jiras
def __init__(self, jira_base_url, version, projects):
self.version = version
self.projects = projects
self.jira_base_url = jira_base_url
self.field_id_map = JiraIter.collect_fields(jira_base_url)
ver = str(version).replace("-SNAPSHOT", "")
self.jiras = JiraIter.collect_jiras(jira_base_url, ver, projects)
self.iter = self.jiras.__iter__()
def __iter__(self):
return self
def __next__(self):
""" get next """
data = next(self.iter)
j = Jira(data, self)
return j
class Linter:
"""Encapsulates lint-related functionality.
Maintains running lint statistics about JIRAs."""
_valid_filters = [
"incompatible", "important", "version", "component", "assignee"
]
def __init__(self, version, options):
self._warning_count = 0
self._error_count = 0
self._lint_message = ""
self._version = version
self._filters = dict(
list(zip(self._valid_filters, [False] * len(self._valid_filters))))
self.enabled = False
self._parse_options(options)
@staticmethod
def add_parser_options(parser):
"""Add Linter options to passed optparse parser."""
filter_string = ", ".join("'" + f + "'" for f in Linter._valid_filters)
parser.add_argument(
"-n",
"--lint",
dest="lint",
action="append",
type=str,
help="Specify lint filters. Valid filters are " + filter_string +
". " + "'all' enables all lint filters. " +
"Multiple filters can be specified comma-delimited and " +
"filters can be negated, e.g. 'all,-component'.")
def _parse_options(self, options):
"""Parse options from optparse."""
if options.lint is None or not options.lint:
return
self.enabled = True
# Valid filter specifications are
# self._valid_filters, negations, and "all"
valid_list = self._valid_filters
valid_list += ["-" + v for v in valid_list]
valid_list += ["all"]
valid = set(valid_list)
enabled = []
disabled = []
for opt in options.lint:
for token in opt.split(","):
if token not in valid:
logging.error(
"Unknown lint filter '%s', valid options are: %s",
token, ', '.join(v for v in sorted(valid)))
sys.exit(1)
if token.startswith("-"):
disabled.append(token[1:])
else:
enabled.append(token)
for eopt in enabled:
if eopt == "all":
for filt in self._valid_filters:
self._filters[filt] = True
else:
self._filters[eopt] = True
for disopt in disabled:
self._filters[disopt] = False
def had_errors(self):
"""Returns True if a lint error was encountered, else False."""
return self._error_count > 0
def message(self):
"""Return summary lint message suitable for printing to stdout."""
if not self.enabled:
return None
msg = self._lint_message
msg += "\n======================================="
msg += f"\n{self._version}: Error:{self._error_count}, Warning:{self._warning_count} \n"
return msg
def _check_missing_component(self, jira):
"""Return if JIRA has a 'missing component' lint error."""
if not self._filters["component"]:
return False
if jira.fields['components']:
return False
return True
def _check_missing_assignee(self, jira):
"""Return if JIRA has a 'missing assignee' lint error."""
if not self._filters["assignee"]:
return False
if jira.fields['assignee'] is not None:
return False
return True
def _check_version_string(self, jira):
"""Return if JIRA has a version string lint error."""
if not self._filters["version"]:
return False
field = jira.parent.field_id_map['Fix Version/s']
for ver in jira.fields[field]:
found = re.match(r'^((\d+)(\.\d+)*).*$|^(\w+\-\d+)$', ver['name'])
if not found:
return True
return False
def lint(self, jira):
"""Run lint check on a JIRA."""
if not self.enabled:
return
if not jira.get_release_note():
jiraid = sanitize_text(jira.get_id())
if self._filters["incompatible"] and jira.get_incompatible_change(
):
self._warning_count += 1
self._lint_message += f"\nWARNING: incompatible change {jiraid} lacks release notes." #pylint: disable=line-too-long
if self._filters["important"] and jira.get_important():
self._warning_count += 1
self._lint_message += f"\nWARNING: important issue {jiraid} lacks release notes."
if self._check_version_string(jira):
self._warning_count += 1
self._lint_message += f"\nWARNING: Version string problem for {jira.get_id()} "
if self._check_missing_component(jira) or self._check_missing_assignee(
jira):
self._error_count += 1
error_message = []
if self._check_missing_component(jira):
error_message.append("component")
if self._check_missing_assignee(jira):
error_message.append("assignee")
multimessage = ' and '.join(error_message)
self._lint_message += f"\nERROR: missing {multimessage} for {jira.get_id()} "
| {"/releasedocmaker/src/main/python/releasedocmaker/getversions.py": ["/releasedocmaker/src/main/python/releasedocmaker/utils.py"], "/releasedocmaker/src/main/python/releasedocmaker/__init__.py": ["/releasedocmaker/src/main/python/releasedocmaker/getversions.py", "/releasedocmaker/src/main/python/releasedocmaker/jira.py", "/releasedocmaker/src/main/python/releasedocmaker/utils.py"], "/releasedocmaker/src/main/python/releasedocmaker/jira.py": ["/releasedocmaker/src/main/python/releasedocmaker/utils.py"]} |
52,584 | apache/yetus | refs/heads/main | /releasedocmaker/src/main/python/releasedocmaker/utils.py | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Utility methods used by releasedocmaker """
import base64
import os
import re
import urllib.request
import urllib.error
import urllib.parse
import sys
import json
import http.client
sys.dont_write_bytecode = True
NAME_PATTERN = re.compile(r' \([0-9]+\)')
def get_jira(jira_url):
""" Provide standard method for fetching content from apache jira and
handling of potential errors. Returns urllib2 response or
raises one of several exceptions."""
username = os.environ.get('RDM_JIRA_USERNAME')
password = os.environ.get('RDM_JIRA_PASSWORD')
req = urllib.request.Request(jira_url)
if username and password:
basicauth = base64.b64encode(f"{username}:{password}").replace(
'\n', '')
req.add_header('Authorization', f'Basic {basicauth}')
try:
response = urllib.request.urlopen(req) # pylint: disable=consider-using-with
except urllib.error.HTTPError as http_err:
code = http_err.code
print(f"JIRA returns HTTP error {code}: {http_err.msg}. Aborting.")
error_response = http_err.read()
try:
error_response = json.loads(error_response)
print("- Please ensure that specified authentication, projects,"\
" fixVersions etc. are correct.")
for message in error_response['errorMessages']:
print("-", message)
except ValueError:
print("FATAL: Could not parse json response from server.")
sys.exit(1)
except urllib.error.URLError as url_err:
print(f"Error contacting JIRA: {jira_url}\n")
print(f"Reason: {url_err.reason}")
raise url_err
except http.client.BadStatusLine as err:
raise err
return response
def format_components(input_string):
""" format the string """
input_string = re.sub(NAME_PATTERN, '', input_string).replace("'", "")
if input_string != "":
ret = input_string
else:
# some markdown parsers don't like empty tables
ret = "."
return sanitize_markdown(re.sub(NAME_PATTERN, "", ret))
def sanitize_markdown(input_string):
""" Sanitize Markdown input so it can be handled by Python.
The expectation is that the input is already valid Markdown,
so no additional escaping is required. """
input_string = input_string.replace('\r', '')
input_string = input_string.rstrip()
return input_string
def sanitize_text(input_string):
""" Sanitize arbitrary text so it can be embedded in MultiMarkdown output.
Note that MultiMarkdown is not Markdown, and cannot be parsed as such.
For instance, when using pandoc, invoke it as `pandoc -f markdown_mmd`.
Calls sanitize_markdown at the end as a final pass.
"""
escapes = {}
# See: https://daringfireball.net/projects/markdown/syntax#backslash
# We only escape a subset of special characters. We ignore characters
# that only have significance at the start of a line.
slash_escapes = "_<>*|"
slash_escapes += "`"
slash_escapes += "\\"
all_chars = set()
# Construct a set of escapes
for char in slash_escapes:
all_chars.add(char)
for char in all_chars:
escapes[char] = "\\" + char
# Build the output string character by character to prevent double escaping
output_string = ""
for char in input_string:
out = char
if escapes.get(char):
out = escapes[char]
output_string += out
return sanitize_markdown(output_string.rstrip())
def processrelnote(input_string):
""" if release notes have a special marker, we'll treat them as already in markdown format """
relnote_pattern = re.compile(r'^\<\!\-\- ([a-z]+) \-\-\>')
fmt = relnote_pattern.match(input_string)
if fmt is None:
return sanitize_text(input_string)
return {
'markdown': sanitize_markdown(input_string),
}.get(fmt.group(1), sanitize_text(input_string))
def to_unicode(obj):
""" convert string to unicode """
if obj is None:
return ""
return str(obj)
class Outputs:
"""Several different files to output to at the same time"""
def __init__(self, base_file_name, file_name_pattern, keys, params=None):
if params is None:
params = {}
self.params = params
self.base = open(base_file_name % params, 'w', encoding='utf-8') # pylint: disable=consider-using-with
self.others = {}
for key in keys:
both = dict(params)
both['key'] = key
filename = file_name_pattern % both
self.others[key] = open(filename, 'w', encoding='utf-8') # pylint: disable=consider-using-with
def write_all(self, pattern):
""" write everything given a pattern """
both = dict(self.params)
both['key'] = ''
self.base.write(pattern % both)
for key, filehandle in self.others.items():
both = dict(self.params)
both['key'] = key
filehandle.write(pattern % both)
def write_key_raw(self, key, input_string):
""" write everything without changes """
self.base.write(input_string)
if key in self.others:
self.others[key].write(input_string.decode("utf-8"))
def close(self):
""" close all the outputs """
self.base.close()
for value in list(self.others.values()):
value.close()
def write_list(self, mylist, skip_credits, base_url):
""" Take a Jira object and write out the relevants parts in a multimarkdown table line"""
for jira in sorted(mylist):
if skip_credits:
line = '| [{id}]({base_url}/browse/{id}) | {summary} | ' \
'{priority} | {component} |\n'
else:
line = '| [{id}]({base_url}/browse/{id}) | {summary} | ' \
'{priority} | {component} | {reporter} | {assignee} |\n'
args = {
'id': jira.get_id(),
'base_url': base_url,
'summary': sanitize_text(jira.get_summary()),
'priority': sanitize_text(jira.get_priority()),
'component': format_components(jira.get_components()),
'reporter': sanitize_text(jira.get_reporter()),
'assignee': sanitize_text(jira.get_assignee())
}
line = line.format(**args)
self.write_key_raw(jira.get_project(), line)
| {"/releasedocmaker/src/main/python/releasedocmaker/getversions.py": ["/releasedocmaker/src/main/python/releasedocmaker/utils.py"], "/releasedocmaker/src/main/python/releasedocmaker/__init__.py": ["/releasedocmaker/src/main/python/releasedocmaker/getversions.py", "/releasedocmaker/src/main/python/releasedocmaker/jira.py", "/releasedocmaker/src/main/python/releasedocmaker/utils.py"], "/releasedocmaker/src/main/python/releasedocmaker/jira.py": ["/releasedocmaker/src/main/python/releasedocmaker/utils.py"]} |
52,585 | apache/yetus | refs/heads/main | /precommit/src/main/python/jenkins-admin.py | #!/usr/bin/env python3
# pylint: disable=invalid-name
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
""" Process patch file attachments from JIRA using a query """
#
# we actually want native encoding so tell pylint to be quiet
#
# pylint: disable=unspecified-encoding
from argparse import ArgumentParser
from tempfile import NamedTemporaryFile
from xml.etree import ElementTree
import os
import pathlib
import re
import sys
import requests
def http_get(resource, ignore_error=False, username=None, password=None):
""" get the contents of a URL """
try:
if username and password:
response = requests.get(resource, auth=(username, password), timeout=10)
else:
response = requests.get(resource, timeout=10)
response.raise_for_status()
except requests.exceptions.HTTPError as http_err:
errstr = str(http_err)
print(
f'%{resource} returns HTTP error %{response.status_code}: %{errstr}\n'
)
if ignore_error:
return ''
print('Aborting.')
sys.exit(1)
return response.text
def parse_jira_data(filename):
""" returns a map of (project, issue) => attachment id """
tree = ElementTree.parse(filename)
root = tree.getroot()
jirapattern = re.compile(r'([A-Z]+)\-([0-9]+)')
result = {}
for item in root.findall('./channel/item'):
jirakey = item.find('key')
if jirakey is None:
continue
jiraissue = jirakey.text
matcher = jirapattern.match(jiraissue)
if not matcher:
continue
jiraissue = (matcher.group(1), matcher.group(2))
attachmentids = []
for jiraattachment in item.findall('./attachments/attachment'):
attachmentid = jiraattachment.get('id')
try:
attachmentids.append(int(attachmentid))
except ValueError:
pass
if attachmentids:
attachmentids.sort()
result[jiraissue] = attachmentids[-1]
return result
def main(): #pylint: disable=too-many-branches, too-many-statements, too-many-locals
""" main program """
parser = ArgumentParser(prog='jenkins-admin')
if os.getenv('JENKINS_URL'):
parser.set_defaults(jenkinsurl=os.getenv('JENKINS_URL'))
if os.getenv('JOB_NAME'):
parser.set_defaults(jenkinsJobName=os.getenv('JOB_NAME'))
else:
parser.set_defaults(jenkinsJobName='PreCommit-Admin')
parser.set_defaults(jenkinsJobTemplate='PreCommit-{project}')
parser.add_argument('--initialize',
action='store_true',
dest='jenkinsInit',
help='Start a new patch_tested.txt file')
parser.add_argument('--jenkins-jobname',
type=str,
dest='jenkinsJobName',
help='PreCommit-Admin JobName',
metavar='JOB_NAME')
parser.add_argument('--jenkins-project-template',
type=str,
dest='jenkinsJobTemplate',
help='Template for project jobs',
metavar='TEMPLATE')
parser.add_argument('--jenkins-token',
type=str,
dest='jenkinsToken',
help='Jenkins Token',
metavar='TOKEN')
parser.add_argument('--jenkins-url',
type=str,
dest='jenkinsurl',
help='Jenkins base URL',
metavar='URL')
parser.add_argument(
'--jenkins-url-override',
type=str,
dest='jenkinsurloverrides',
action='append',
help='Project specific Jenkins base URL',
metavar='PROJECT=URL',
)
parser.add_argument('--jira-filter',
type=str,
dest='jiraFilter',
help='JIRA filter URL',
metavar='URL')
parser.add_argument('--jira-user',
type=str,
dest='jiraUser',
help='JIRA username')
parser.add_argument('--jira-password',
type=str,
dest='jiraPassword',
help='JIRA password')
parser.add_argument('--live',
dest='live',
action='store_true',
help='Submit Job to jenkins')
parser.add_argument('--max-history',
dest='history',
type=int,
help='Maximum history to store',
default=5000)
parser.add_argument(
'-V',
'--version',
dest='release_version',
action='store_true',
default=False,
help="display version information for jenkins-admin and exit.")
options = parser.parse_args()
# Handle the version string right away and exit
if options.release_version:
execname = pathlib.Path(__file__)
binversion = execname.joinpath("..", "..", "VERSION").resolve()
mvnversion = execname.joinpath("..", "..", "..", "..", "..", ".mvn",
"maven.config").resolve()
if binversion.exists():
with open(binversion, encoding='utf-8') as ver_file:
print(ver_file.read().strip())
elif mvnversion.exists():
with open(mvnversion, encoding='utf-8') as ver_file:
print(ver_file.read().split('=')[1].strip())
sys.exit(0)
token_frag = ''
if options.jenkinsToken:
token_frag = f'token={options.jenkinsToken}'
else:
token_frag = 'token={project}-token'
if not options.jiraFilter:
parser.error('ERROR: --jira-filter is a required argument.')
if not options.jenkinsurl:
parser.error(
'ERROR: --jenkins-url or the JENKINS_URL environment variable is required.'
)
if options.history < 0:
parser.error('ERROR: --max-history must be 0 or a positive integer.')
jenkinsurloverrides = {}
if options.jenkinsurloverrides:
for override in options.jenkinsurloverrides:
if '=' not in override:
parser.error('Invalid Jenkins Url Override: ' + override)
(project, url) = override.split('=', 1)
jenkinsurloverrides[project.upper()] = url
tempfile = NamedTemporaryFile(delete=False) # pylint: disable=consider-using-with
try:
jobloghistory = None
if not options.jenkinsInit:
lsb = 'lastSuccessfulBuild/artifact/patch_tested.txt'
lcb = 'lastCompletedBuild/artifact/patch_tested.txt'
jobloghistory = http_get(
f'{options.jenkinsurl}/job/{options.jenkinsJobName}/{lsb}',
True)
# if we don't have a successful build available try the last build
if not jobloghistory:
jobloghistory = http_get(
f'{options.jenkinsurl}/job/{options.jenkinsJobName}/{lcb}')
jobloghistory = jobloghistory.strip().split('\n')
if 'TESTED ISSUES' not in jobloghistory[0]:
print(
'Downloaded patch_tested.txt control file may be corrupted. Failing.'
)
sys.exit(1)
# we are either going to write a new one or rewrite the old one
joblog = open('patch_tested.txt', 'w+') # pylint: disable=consider-using-with
if jobloghistory:
if len(jobloghistory) > options.history:
jobloghistory = [jobloghistory[0]] \
+ jobloghistory[len(jobloghistory) \
- options.history:]
for jobhistoryrecord in jobloghistory:
joblog.write(jobhistoryrecord + '\n')
else:
joblog.write('TESTED ISSUES\n')
joblog.flush()
rssdata = http_get(options.jiraFilter, False, options.jiraUser,
options.jiraPassword)
tempfile.write(rssdata.encode('utf-8'))
tempfile.flush()
for (key, attachment) in list(parse_jira_data(tempfile.name).items()):
(project, issue) = key
if jenkinsurloverrides.get(project):
url = jenkinsurloverrides[project]
else:
url = options.jenkinsurl
jenkinsurltemplate = url + '/job/' \
+ options.jenkinsJobTemplate \
+ '/buildWithParameters?' + token_frag \
+ '&ISSUE_NUM={issue}&ATTACHMENT_ID={attachment}'
url_args = {
'project': project,
'issue': issue,
'attachment': attachment,
}
jenkinsurl = jenkinsurltemplate.format(**url_args)
# submit job
jobname = f'{project}-{issue},{attachment}'
if not jobloghistory or jobname not in jobloghistory:
print(jobname + ' has not been processed, submitting')
joblog.write(jobname + '\n')
joblog.flush()
if options.live:
http_get(jenkinsurl, True)
else:
print('GET ' + jenkinsurl)
else:
print(jobname + ' has been processed, ignoring')
joblog.close()
finally:
if options.live:
os.remove(tempfile.name)
else:
print('JIRA Data is located: ' + tempfile.name)
if __name__ == '__main__':
main()
| {"/releasedocmaker/src/main/python/releasedocmaker/getversions.py": ["/releasedocmaker/src/main/python/releasedocmaker/utils.py"], "/releasedocmaker/src/main/python/releasedocmaker/__init__.py": ["/releasedocmaker/src/main/python/releasedocmaker/getversions.py", "/releasedocmaker/src/main/python/releasedocmaker/jira.py", "/releasedocmaker/src/main/python/releasedocmaker/utils.py"], "/releasedocmaker/src/main/python/releasedocmaker/jira.py": ["/releasedocmaker/src/main/python/releasedocmaker/utils.py"]} |
52,592 | Saunak626/cmbper | refs/heads/master | /utils.py | import torch
import numpy as np
import os
def set_random_seed(n):
np.random.seed(n)
torch.manual_seed(n)
def make_dirs(model_desc):
def _mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
save_dir = "params/{}".format(model_desc)
log_dir = "logs/{}".format(model_desc)
_mkdir(save_dir)
_mkdir(log_dir)
return save_dir, log_dir
def save_checkpoint(epoch, global_step, model, optimizer, save_dir, name):
checkpoint_path = os.path.join(save_dir, name)
torch.save({"state_dict": model.state_dict(),
"epoch": epoch,
"global_step": global_step,
"optimizer": optimizer}, checkpoint_path)
def get_model_config(args, postprocess=False):
calibration = True if postprocess and args.loss == 'evi' else False
use_temperature_alpha = True if calibration and args.use_temperature_alpha else False
config = {'chin': args.chin,
'chout': args.chout,
'hidden': args.hidden,
'depth': args.depth,
'kernel_size': args.kernel_size,
'stride': args.stride,
'calibration': calibration,
'use_temperature_alpha': use_temperature_alpha}
return config
def get_checkpoint_path(args, postprocess=False):
model_desc = get_model_description(args, postprocess)
checkpoint_path = "params/{}/{}_model.pth".format(model_desc, args.load_type)
return checkpoint_path
def get_model_description(args, postprocess=False):
model_desc = "loss-{}_bsz-{}_hid-{}_dep-{}_ks-{}_st-{}_lr-{}_eta-{}".format(args.loss,
args.bsz,
args.hidden,
args.depth,
args.kernel_size,
args.stride,
args.lr,
args.eta)
if args.loss == 'evi' or (postprocess and args.loss == 'L1'):
model_desc += '_zeta-{}'.format(args.zeta)
if postprocess:
model_desc += '_pp'
return model_desc
def get_logger(log_dir):
log_train = open(os.path.join(log_dir, 'train.txt'), 'a')
log_valid = open(os.path.join(log_dir, 'valid.txt'), 'a')
log_info = open(os.path.join(log_dir, 'info.txt'), 'a')
return log_train, log_valid, log_info
def write_experiment_info(log_info, args, model):
log_info.write('----- ARGS -----\n')
log_info.flush()
for key, item in vars(args).items():
log_info.write('{} {}\n'.format(key, item))
log_info.flush()
log_info.write('----------------\n\n')
log_info.flush()
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
log_info.write('{} {}\n'.format('# of params', n_params))
log_info.flush()
print('# of params:', n_params) | {"/BP_prediction.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/train.py": ["/data_manager.py", "/model.py", "/utils.py", "/loss.py", "/args.py"], "/viz_uncertainty.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_prediction_subset.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_classification.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"]} |
52,593 | Saunak626/cmbper | refs/heads/master | /BP_prediction.py | import torch
from torch import optim
from torch.utils.data import DataLoader
import os
import numpy as np
from data_manager import ARTDataset
from model import Model
import argparse
from utils import make_dirs, get_model_config, get_model_description
from args import parse_args
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
import pickle
import matplotlib.pyplot as plt
import os
import scipy.stats
N_SEG = 5
def mean_confidence_interval(a, confidence=0.95):
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
ci = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, ci
def check_overall_performance(ys, preds):
sbps = abs(ys.max(axis=1) - preds.max(axis=1)).reshape(-1)
maps = abs(ys.mean(axis=1) - preds.mean(axis=1)).reshape(-1)
dbps = abs(ys.min(axis=1) - preds.min(axis=1)).reshape(-1)
bps = abs(ys - preds).reshape(-1)
SBP_m, SBP_ci = mean_confidence_interval(sbps)
MAP_m, MAP_ci = mean_confidence_interval(maps)
DBP_m, DBP_ci = mean_confidence_interval(dbps)
ALL_m, ALL_ci = mean_confidence_interval(bps)
return (SBP_m, MAP_m, DBP_m, ALL_m), (SBP_ci, MAP_ci, DBP_ci, ALL_ci)
def evaluate_BHS_Standard(ys, preds):
def _BHS_metric(err):
leq5 = (err < 5).sum()
leq10 = (err < 10).sum()
leq15 = (err < 15).sum()
return (leq5*100.0/len(err), leq10*100.0/len(err), leq15*100.0/len(err))
def _calcError(ys, preds):
sbps = abs(ys.max(axis=1) - preds.max(axis=1)).reshape(-1)
maps = abs(ys.mean(axis=1) - preds.mean(axis=1)).reshape(-1)
dbps = abs(ys.min(axis=1) - preds.min(axis=1)).reshape(-1)
bps = abs(ys - preds).reshape(-1)
return (sbps, dbps, maps, bps)
(sbps, dbps, maps, bps) = _calcError(ys, preds) # compute errors
sbp_percent = _BHS_metric(sbps)
dbp_percent = _BHS_metric(dbps)
map_percent = _BHS_metric(maps)
bp_percent = _BHS_metric(bps)
print('----------------------------')
print('| BHS-Metric |')
print('----------------------------')
print('----------------------------------------')
print('| | <= 5mmHg | <=10mmHg | <=15mmHg |')
print('----------------------------------------')
print('| SBP | {} % | {} % | {} % |'.format(round(sbp_percent[0], 2), round(sbp_percent[1], 2), round(sbp_percent[2], 2)))
print('| MAP | {} % | {} % | {} % |'.format(round(map_percent[0], 2), round(map_percent[1], 2), round(map_percent[2], 2)))
print('| DBP | {} % | {} % | {} % |'.format(round(dbp_percent[0], 2), round(dbp_percent[1], 2), round(dbp_percent[2], 2)))
print('| ALL | {} % | {} % | {} % |'.format(round(bp_percent[0], 2), round(bp_percent[1], 2), round(bp_percent[2], 2)))
print('----------------------------------------')
def evaluate_AAMI_Standard(ys, preds):
def _calcErrorAAMI(ys, preds):
sbps = (ys.max(axis=1) - preds.max(axis=1)).reshape(-1)
maps = (ys.mean(axis=1) - preds.mean(axis=1)).reshape(-1)
dbps = (ys.min(axis=1) - preds.min(axis=1)).reshape(-1)
bps = (ys - preds).reshape(-1)
return (sbps, dbps, maps, bps)
(sbps, dbps, maps, bps) = _calcErrorAAMI(ys, preds)
print('---------------------')
print('| AAMI Standard |')
print('---------------------')
print('-----------------------')
print('| | ME | STD |')
print('-----------------------')
print('| SBP | {} | {} |'.format(round(np.mean(sbps), 3), round(np.std(sbps), 3)))
print('| MAP | {} | {} |'.format(round(np.mean(maps), 3), round(np.std(maps), 3)))
print('| DBP | {} | {} |'.format(round(np.mean(dbps), 3), round(np.std(dbps), 3)))
print('| ALL | {} | {} |'.format(round(np.mean(bps), 3), round(np.std(bps), 3)))
print('-----------------------')
def inference(te_loader, model, args):
print('inference starts!')
xs = []
ys = []
preds =[]
epises = []
stats = pickle.load(open(args.stats_path, 'rb'))
x_mean = stats['PPG_mean']
x_std = stats['PPG_std']
y_mean = stats['ABP_mean']
y_std = stats['ABP_std']
print('Number of batches:', len(te_loader))
for itr, (x, y) in enumerate(te_loader):
print('Iteration: [{} / {}]'.format(itr+1, len(te_loader)))
B, C, T = y.shape
x, y = x.to(device), y.to(device)
pred, epis = model.compute_prediction_and_uncertainty(x)
x = x.contiguous().view(B * N_SEG, 1, T // N_SEG)
y = y.contiguous().view(B * N_SEG, 1, T // N_SEG)
pred = pred.contiguous().view(B * N_SEG, 1, T // N_SEG)
epis = epis.contiguous().view(B * N_SEG, 1, T // N_SEG)
x = x_std * x + x_mean
y = y_std * y + y_mean
pred = y_std * pred + y_mean
epis = y_std * epis.mean(dim=-1)
x = x.permute(0, 2, 1).cpu().numpy()
y = y.permute(0, 2, 1).cpu().numpy()
pred = pred.permute(0, 2, 1).cpu().numpy()
epis = epis.cpu().numpy()
xs.append(x)
ys.append(y)
preds.append(pred)
epises.append(epis)
xs = np.vstack(xs)
ys = np.vstack(ys)
preds = np.vstack(preds)
epises = np.vstack(epises).squeeze()
return xs, ys, preds, epises
if __name__ == "__main__":
args = parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_desc = get_model_description(args, postprocess=args.postprocess)
checkpoint_path = "params/{}/{}_model.pth".format(model_desc, args.load_type)
te_dataset = ARTDataset(args.te_path)
te_loader = DataLoader(te_dataset, batch_size=1024, shuffle=False)
config = get_model_config(args, postprocess=args.postprocess)
print('config', config)
model = Model(**config).to(device)
print("Load checkpoint from: {}".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint["state_dict"])
model.eval()
with torch.no_grad():
xs, ys, preds, epises = inference(te_loader, model, args)
(SBP_m, MAP_m, DBP_m, ALL_m), (SBP_ci, MAP_ci, DBP_ci, ALL_ci) = check_overall_performance(ys, preds)
print('[MAE] SBP:{:.3f}+-{:.3f}, MAP:{:.3f}+-{:.3f}, DBP:{:.3f}+-{:.3f}, All:{:.3f}+-{:.3f}'.format(SBP_m, SBP_ci,
MAP_m, MAP_ci, DBP_m, DBP_ci, ALL_m, ALL_ci))
evaluate_BHS_Standard(ys, preds)
evaluate_AAMI_Standard(ys, preds)
| {"/BP_prediction.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/train.py": ["/data_manager.py", "/model.py", "/utils.py", "/loss.py", "/args.py"], "/viz_uncertainty.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_prediction_subset.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_classification.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"]} |
52,594 | Saunak626/cmbper | refs/heads/master | /args.py | import argparse
import os
def parse_args():
parser=argparse.ArgumentParser(description='Continuous monitoring of blood pressure with evidential regression.')
parser.add_argument('--tr_path', default='datasets/train.p',type=str, help='Training data path.')
parser.add_argument('--val_path', default='datasets/valid.p',type=str, help='Validation data path.')
parser.add_argument('--te_path', default='datasets/test.p',type=str, help='Test data path')
parser.add_argument('--stats_path', default='datasets/stats.p',type=str, help='Data statistics path')
parser.add_argument('--result_path', default='results',type=str, help='Directory to save experimental results.')
parser.add_argument('--max_itr', default=500000,type=int, help='Maximum number of iterations to train.')
parser.add_argument('--bsz', default=512,type=int, help='Batch size.')
parser.add_argument('--num_workers', default=4,type=int, help='Number of workers for training loader.')
parser.add_argument('--lr', default=5e-4,type=float, help='Learning rate.')
parser.add_argument('--chin', default=1,type=int, help='Input size')
parser.add_argument('--chout', default=4,type=int, help='Output size')
parser.add_argument('--hidden', default=64,type=int, help='First hidden channel size.')
parser.add_argument('--depth', default=4,type=int, help='Number of blocks in encoder and decoder.')
parser.add_argument('--kernel_size', default=6,type=int, help='Kernel size of convolution layer.')
parser.add_argument('--stride', default=2,type=int, help='Stride of convolution layer.')
parser.add_argument('--eta', default=1.0,type=float, help='Coefficient of peak-to-peak matching loss.')
parser.add_argument('--zeta', default=0.0,type=float, help='Coefficient of penalty term in total evidential loss.')
parser.add_argument('--loss', default='L1', choices=['L1', 'MSE', 'NLL', 'evi'], help='Main objective function.')
parser.add_argument('--loss_aux', default='L1', choices=['L1', 'MSE'], help='Objective function of peak-to-peak matching loss.')
parser.add_argument('--load_type', default=None, choices=[None, 'best_loss', 'best_MAE', 'latest'], help='Model type to load.')
parser.add_argument('--postprocess', action="store_true", help='Whether to load post-processed model.')
parser.add_argument('--use_temperature_alpha', action="store_true", help='Whether to use additional temperature parameter for uncertainty calibration.')
parser.add_argument('--post_itr', default=1e3, type=int, help='Number of iterations for post-processing.')
parser.add_argument('--post_lr', default=5e-4,type=float, help='Learning rate for post-processing.')
parser.add_argument('--subset_ratio', default=0.8,type=float, help='Ratio at which high-reliability samples are selected.')
args = parser.parse_args()
return args
| {"/BP_prediction.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/train.py": ["/data_manager.py", "/model.py", "/utils.py", "/loss.py", "/args.py"], "/viz_uncertainty.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_prediction_subset.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_classification.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"]} |
52,595 | Saunak626/cmbper | refs/heads/master | /train.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from data_manager import ARTDataset
from model import Model
from utils import get_model_config, get_model_description, make_dirs, get_logger, write_experiment_info, save_checkpoint, set_random_seed
from loss import compute_loss, compute_auxiliary_loss, performance_check
from args import parse_args
from random import sample
import numpy as np
import pickle
import os
import argparse
import json
import time
def train(epoch, writer, log_train, args):
global global_step
loss_all_avg = loss_main_avg = MSE_avg = MAE_avg = SBP_avg = MAP_avg = DBP_avg = 0.
model.train()
for i, (x, y) in enumerate(tr_loader):
global_step += 1
x, y = x.to(device), y.to(device)
out = model(x)
pred = out[:, :1, :]
loss_main = compute_loss(y, out, args.loss, args.zeta)
loss_aux = compute_auxiliary_loss(y, pred, args.loss_aux)
loss_all = loss_main + args.eta * loss_aux
optimizer.zero_grad()
loss_all.backward()
nn.utils.clip_grad_norm_(model.parameters(), 1.)
optimizer.step()
with torch.no_grad():
MAE, MSE, SBP, MAP, DBP = performance_check(pred, y, stats)
loss_all_avg += loss_all.item() / len(tr_loader)
loss_main_avg += loss_main.item() / len(tr_loader)
MAE_avg += MAE.item() / len(tr_loader)
MSE_avg += MSE.item() / len(tr_loader)
SBP_avg += SBP.item() / len(tr_loader)
MAP_avg += MAP.item() / len(tr_loader)
DBP_avg += DBP.item() / len(tr_loader)
writer.add_scalar('Train/{}_loss_all'.format(args.loss), loss_all.item(), global_step)
writer.add_scalar('Train/{}_loss_main'.format(args.loss), loss_main.item(), global_step)
writer.add_scalar('Train/{}_loss_aux'.format(args.loss), loss_aux.item(), global_step)
writer.add_scalar('Train/MAE', MAE.item(), global_step)
writer.add_scalar('Train/MSE', MSE.item(), global_step)
writer.add_scalar('Train/SBP', SBP.item(), global_step)
writer.add_scalar('Train/MAP', MAP.item(), global_step)
writer.add_scalar('Train/DBP', DBP.item(), global_step)
state = {}
state['Epoch'] = epoch
state['Global step'] = global_step
state['{}_loss_all'.format(args.loss)] = loss_all_avg
state['{}_loss_main'.format(args.loss)] = loss_main_avg
state['MAE'] = MAE_avg
state['MSE'] = MSE_avg
state['SBP'] = SBP_avg
state['MAP'] = MAP_avg
state['DBP'] = DBP_avg
log_train.write('%s\n' % json.dumps(state))
log_train.flush()
print('[Train] Epoch: {}, Itr:{}, Loss: {:0.4f}, Loss-main: {:0.4f}, MAE: {:0.4f}, MSE: {:0.4f} SBP: {:0.4f}, MAP: {:0.4f}, DBP: {:0.4f}'.format(
epoch, global_step, loss_all_avg, loss_main_avg, MAE_avg, MSE_avg, SBP_avg, MAP_avg, DBP_avg))
def validate(epoch, writer, log_valid, args):
global global_step
loss_all_avg = loss_main_avg = MSE_avg = MAE_avg = SBP_avg = MAP_avg = DBP_avg = 0.
model.eval()
for i, (x, y) in enumerate(val_loader):
x, y = x.to(device), y.to(device)
out = model(x)
pred = out[:, :1, :]
loss_main = compute_loss(y, out, args.loss, args.zeta)
loss_aux = compute_auxiliary_loss(y, pred, args.loss_aux)
loss_all = loss_main + args.eta * loss_aux
MAE, MSE, SBP, MAP, DBP = performance_check(pred, y, stats)
loss_all_avg += loss_all.item() / len(val_loader)
loss_main_avg += loss_main.item() / len(val_loader)
MAE_avg += MAE.item() / len(val_loader)
MSE_avg += MSE.item() / len(val_loader)
SBP_avg += SBP.item() / len(val_loader)
MAP_avg += MAP.item() / len(val_loader)
DBP_avg += DBP.item() / len(val_loader)
writer.add_scalar('Valid/{}_loss_main'.format(args.loss), loss_main_avg, global_step)
writer.add_scalar('Valid/{}_loss_all'.format(args.loss), loss_all_avg, global_step)
writer.add_scalar('Valid/MAE', MAE_avg, global_step)
writer.add_scalar('Valid/MSE', MSE_avg, global_step)
writer.add_scalar('Valid/SBP', SBP_avg, global_step)
writer.add_scalar('Valid/MAP', MAP_avg, global_step)
writer.add_scalar('Valid/DBP', DBP_avg, global_step)
state = {}
state['Epoch'] = epoch
state['Global step'] = global_step
state['{}_loss_all'.format(args.loss)] = loss_all_avg
state['{}_loss_main'.format(args.loss)] = loss_main_avg
state['MAE'] = MAE_avg
state['MSE'] = MSE_avg
state['SBP'] = SBP_avg
state['MAP'] = MAP_avg
state['DBP'] = DBP_avg
log_valid.write('%s\n' % json.dumps(state))
log_valid.flush()
print('[Valid] Epoch: {}, Itr:{}, Loss: {:0.4f}, Loss-main: {:0.4f}, MAE: {:0.4f}, MSE: {:0.4f} SBP: {:0.4f}, MAP: {:0.4f}, DBP: {:0.4f}'.format(
epoch, global_step, loss_all_avg, loss_main_avg, MAE_avg, MSE_avg, SBP_avg, MAP_avg, DBP_avg))
return loss_all_avg, MAE_avg
if __name__ == "__main__":
global global_step
global_step = 0
args = parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
tr_dataset = ARTDataset(args.tr_path)
val_dataset = ARTDataset(args.val_path)
tr_loader = DataLoader(tr_dataset, batch_size=args.bsz, num_workers=args.num_workers, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.bsz, num_workers=args.num_workers, shuffle=False)
config = get_model_config(args)
model = Model(**config).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
stats = pickle.load(open(args.stats_path, 'rb'))
model_desc = get_model_description(args)
save_dir, log_dir = make_dirs(model_desc)
log_train, log_valid, log_info = get_logger(log_dir)
writer = SummaryWriter(logdir=os.path.join(log_dir, 'runs', str(time.strftime('%Y-%m-%d_%H:%M:%S'))))
write_experiment_info(log_info, args, model)
loss_best = MAE_best = 987654321
for epoch in range(1, 987654321):
print('# --- {}th epoch start --- # '.format(epoch))
train(epoch, writer, log_train, args)
with torch.no_grad():
loss_val, MAE_val = validate(epoch, writer, log_valid, args)
save_checkpoint(epoch, global_step, model, optimizer, save_dir, "latest_model.pth")
print('Model Saved! - [latest_model.pth]')
if loss_val < loss_best:
loss_best = loss_val
save_checkpoint(epoch, global_step, model, optimizer, save_dir, "best_loss_model.pth")
print('Model Saved! - [best_loss_model.pth]')
if MAE_val < MAE_best:
MAE_best = MAE_val
save_checkpoint(epoch, global_step, model, optimizer, save_dir, "best_MAE_model.pth")
print('Model Saved! - [best_MAE_model.pth]')
print('Best Valid Loss: {:0.4f}'.format(loss_best))
print('# --- {}th epoch end --- # \n'.format(epoch))
if global_step >= args.max_itr:
break
print('Done.')
| {"/BP_prediction.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/train.py": ["/data_manager.py", "/model.py", "/utils.py", "/loss.py", "/args.py"], "/viz_uncertainty.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_prediction_subset.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_classification.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"]} |
52,596 | Saunak626/cmbper | refs/heads/master | /viz_uncertainty.py | import torch
from torch import optim
from torch.utils.data import DataLoader
import os
import numpy as np
from math import pi, log
from data_manager import ARTDataset
from model import Model
import json
import time
import argparse
from utils import make_dirs, get_model_config, get_model_description
from args import parse_args
from random import sample
import pickle
import matplotlib.pyplot as plt
n_stds = 4
viz_start_idx = 300
sr = 125
def compute_epistemic_uncertainty(alpha, beta, v):
epis = (beta / (v * (alpha - 1))) ** 0.5
epis = torch.clamp(epis, max=1e3)
print(epis.max())
return epis
def compute_aleatoric_uncertainty(alpha, beta):
alea = (beta / ((alpha - 1))) ** 0.5
alea = torch.clamp(alea, max=1e3)
return alea
def rescale_value(mu, val):
val_norm = val / mu.var()
return val_norm
def prepare_to_visualize(y, pred, epis, mean, std):
y = y.squeeze().cpu().numpy() * std + mean
pred = pred.squeeze().cpu().numpy() * std + mean
epis = epis.squeeze().cpu().numpy() * std
return y, pred, epis
def add_subplot(fig, pos, y, mu, var):
ax = fig.add_subplot(pos)
T = len(y)
t = np.arange(T) / sr
ax.plot(t, y, c='#EE0000', linestyle='--', zorder=2, label="Ground-truth")
ax.plot(t, mu, c='k', zorder=3, label="Prediction")
for k in np.linspace(0, n_stds, 4):
ax.fill_between(
t, (mu - k * var), (mu + k * var),
alpha=0.3,
edgecolor=None,
facecolor='#94b5c0',
linewidth=0,
zorder=1,
label="Unc." if k == 0 else None)
ax.set_ylim(20, 220)
plt.xlabel('Time (seconds)', fontsize=10)
plt.ylabel('Blood pressure (mmHg)', fontsize=10)
plt.tight_layout()
ax.legend(loc="upper right", fontsize='x-small')
def viz_regression(te_loader, model, args):
dt = pickle.load(open(args.stats_path, 'rb'))
mean = dt['ABP_mean']
std = dt['ABP_std']
for itr, (x, y) in enumerate(te_loader):
print('itr', itr, flush=True)
x, y = x.to(device), y.to(device)
pred, epis = model.compute_prediction_and_uncertainty(x)
y, pred, epis = prepare_to_visualize(y, pred, epis, mean, std)
fig = plt.figure(figsize=(5, 2.5), dpi=300)
plt.subplots_adjust(wspace=0.25, hspace=0.4)
add_subplot(fig, 111, y, pred, epis)
plt.show()
plt.close()
if __name__ == "__main__":
args = parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
postprocess = args.postprocess
model_desc = get_model_description(args, postprocess)
checkpoint_path = "params/{}/{}_model.pth".format(model_desc, args.load_type)
te_dataset = ARTDataset(args.te_path)
te_loader = DataLoader(te_dataset, batch_size=1, shuffle=False, num_workers=1)
config = get_model_config(args, postprocess)
model = Model(**config).to(device)
checkpoint = torch.load(checkpoint_path, map_location=device)
print("Load checkpoint from: {}".format(checkpoint_path))
model.load_state_dict(checkpoint["state_dict"])
model.eval()
with torch.no_grad():
viz_regression(te_loader, model, args)
| {"/BP_prediction.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/train.py": ["/data_manager.py", "/model.py", "/utils.py", "/loss.py", "/args.py"], "/viz_uncertainty.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_prediction_subset.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_classification.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"]} |
52,597 | Saunak626/cmbper | refs/heads/master | /model.py | import torch as th
from torch import nn
from torch.nn import functional as F
from torch.nn.utils import weight_norm
import math
import time
class BLSTM(nn.Module):
def __init__(self, dim, layers=2, bi=True):
super().__init__()
self.lstm = nn.LSTM(bidirectional=bi, num_layers=layers, hidden_size=dim, input_size=dim)
self.linear = None
if bi:
self.linear = nn.Linear(2 * dim, dim)
def forward(self, x, hidden=None):
x, hidden = self.lstm(x, hidden)
if self.linear:
x = self.linear(x)
return x, hidden
def rescale_conv(conv, reference):
std = conv.weight.std().detach()
scale = (std / reference)**0.5
conv.weight.data /= scale
if conv.bias is not None:
conv.bias.data /= scale
def rescale_module(module, reference):
for sub in module.modules():
if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d)):
rescale_conv(sub, reference)
class Model(nn.Module):
def __init__(self,
chin=1,
chout=4,
hidden=48,
depth=4,
kernel_size=5,
stride=2,
causal=False,
growth=2,
max_hidden=10_000,
normalize=False,
glu=True,
rescale=0.1,
floor=1e-3,
eps=1e-8,
calibration=False,
use_temperature_alpha=False):
super().__init__()
self.chin = chin
self.chout = chout
self.hidden = hidden
self.depth = depth
self.kernel_size = kernel_size
self.stride = stride
self.causal = causal
self.floor = floor
self.normalize = normalize
self.eps = eps
self.calibration = calibration
self.use_temperature_alpha = use_temperature_alpha
self.tmps = nn.Parameter(th.zeros(3))
self.encoder = nn.ModuleList()
self.decoder = nn.ModuleList()
activation = nn.GLU(1) if glu else nn.ReLU()
ch_scale = 2 if glu else 1
for index in range(depth):
encode = []
encode += [
weight_norm(nn.Conv1d(chin, hidden, kernel_size, stride), name='weight'),
nn.ReLU(),
weight_norm(nn.Conv1d(hidden, hidden * ch_scale, 1), name='weight'),
activation,
nn.BatchNorm1d(hidden),
]
self.encoder.append(nn.Sequential(*encode))
decode = []
decode += [
nn.BatchNorm1d(hidden),
weight_norm(nn.Conv1d(hidden, ch_scale * hidden, 1), name='weight'),
activation,
weight_norm(nn.ConvTranspose1d(hidden, chout, kernel_size, stride), name='weight'),
]
if index > 0:
decode.append(nn.ReLU())
self.decoder.insert(0, nn.Sequential(*decode))
chout = hidden
chin = hidden
hidden = min(int(growth * hidden), max_hidden)
self.lstm = BLSTM(chin, bi=not causal)
if rescale:
rescale_module(self, reference=rescale)
def valid_length(self, length):
length = math.ceil(length)
for _ in range(self.depth):
length = math.ceil((length - self.kernel_size) / self.stride) + 1
length = max(length, 1)
for _ in range(self.depth):
length = (length - 1) * self.stride + self.kernel_size
length = int(math.ceil(length))
return int(length)
@property
def total_stride(self):
return self.stride ** self.depth
def compute_epistemic_uncertainty(self, alpha, beta, v):
epis = (beta / (v * (alpha - 1))) ** 0.5
return epis
def compute_aleatoric_uncertainty(self, alpha, beta):
alea = (beta / ((alpha - 1))) ** 0.5
return alea
def compute_prediction_and_uncertainty(self, mix, aleatoric=False):
out = self.forward(mix)
gamma, v, alpha, beta = out.chunk(4,1)
pred = gamma
epis = self.compute_epistemic_uncertainty(alpha, beta, v)
if aleatoric:
alea = self.compute_aleatoric_uncertainty(alpha, beta)
return pred, epis, alea
else:
return pred, epis
def forward(self, mix):
if mix.dim() == 2:
mix = mix.unsqueeze(1)
if self.normalize:
mono = mix.mean(dim=1, keepdim=True)
std = mono.std(dim=-1, keepdim=True)
mix = mix / (self.floor + std)
else:
std = 1
length = mix.shape[-1]
x = mix
pad_l = (self.valid_length(length) - length) // 2
pad_r = (self.valid_length(length) - length) - pad_l
x = F.pad(x, (pad_l, pad_r))
skips = []
for encode in self.encoder:
x = encode(x)
skips.append(x)
x = x.permute(2, 0, 1)
x, _ = self.lstm(x)
x = x.permute(1, 2, 0)
for decode in self.decoder:
skip = skips.pop(-1)
x = x + skip[..., :x.shape[-1]]
x = decode(x)
x = x[..., :length]
x = std * x
a, b, c, d = x.chunk(4, 1)
gamma = a
v = F.softplus(b)
alpha = F.softplus(c)
beta = F.softplus(d)
if self.calibration:
v = F.softplus(self.tmps[0]) * v
beta = F.softplus(self.tmps[1]) * beta
if self.use_temperature_alpha:
alpha = F.softplus(self.tmps[2]) * alpha
v = v + self.eps
alpha_plus_one = alpha + 1 + self.eps
beta = beta + self.eps
x = th.cat((gamma, v, alpha_plus_one, beta), dim=1)
return x | {"/BP_prediction.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/train.py": ["/data_manager.py", "/model.py", "/utils.py", "/loss.py", "/args.py"], "/viz_uncertainty.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_prediction_subset.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_classification.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"]} |
52,598 | Saunak626/cmbper | refs/heads/master | /BP_prediction_subset.py | import torch
from torch import optim
from torch.utils.data import DataLoader
import os
import numpy as np
from data_manager import ARTDataset
from model import Model
import argparse
from utils import make_dirs, get_model_config, get_model_description
from args import parse_args
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
import pickle
import matplotlib.pyplot as plt
import os
import scipy.stats
N_SEG = 5
def rescale_value(mu, val):
scale = mu.max(dim=-1)[0] - mu.min(dim=-1)[0]
val_norm = val / scale
return val_norm
def mean_confidence_interval(a, confidence=0.95):
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
ci = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, ci
def check_overall_performance(ys, preds):
sbps = abs(ys.max(axis=1) - preds.max(axis=1)).reshape(-1)
maps = abs(ys.mean(axis=1) - preds.mean(axis=1)).reshape(-1)
dbps = abs(ys.min(axis=1) - preds.min(axis=1)).reshape(-1)
bps = abs(ys - preds).reshape(-1)
SBP_m, SBP_ci = mean_confidence_interval(sbps)
MAP_m, MAP_ci = mean_confidence_interval(maps)
DBP_m, DBP_ci = mean_confidence_interval(dbps)
ALL_m, ALL_ci = mean_confidence_interval(bps)
return (SBP_m, MAP_m, DBP_m, ALL_m), (SBP_ci, MAP_ci, DBP_ci, ALL_ci)
def inference(te_loader, model, save_dir, args):
print('inference starts!')
xs = []
ys = []
preds =[]
epises = []
stats = pickle.load(open(args.stats_path, 'rb'))
x_mean = stats['PPG_mean']
x_std = stats['PPG_std']
y_mean = stats['ABP_mean']
y_std = stats['ABP_std']
print('Number of batches:', len(te_loader))
for itr, (x, y) in enumerate(te_loader):
print('Iteration: [{} / {}]'.format(itr+1, len(te_loader)))
B, C, T = y.shape
x, y = x.to(device), y.to(device)
pred, epis = model.compute_prediction_and_uncertainty(x)
x = x.contiguous().view(B * N_SEG, 1, T // N_SEG)
y = y.contiguous().view(B * N_SEG, 1, T // N_SEG)
pred = pred.contiguous().view(B * N_SEG, 1, T // N_SEG)
epis = epis.contiguous().view(B * N_SEG, 1, T // N_SEG)
x = x_std * x + x_mean
y = y_std * y + y_mean
pred = y_std * pred + y_mean
epis = y_std * epis.mean(dim=-1) # epis: [B*N_SEG, 1]
epis = rescale_value(pred, epis)
x = x.permute(0, 2, 1).cpu().numpy()
y = y.permute(0, 2, 1).cpu().numpy()
pred = pred.permute(0, 2, 1).cpu().numpy()
epis = epis.cpu().numpy()
xs.append(x)
ys.append(y)
preds.append(pred)
epises.append(epis)
xs = np.vstack(xs)
ys = np.vstack(ys)
preds = np.vstack(preds)
epises = np.vstack(epises).squeeze()
return xs, ys, preds, epises
if __name__ == "__main__":
args = parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_desc = get_model_description(args, postprocess=args.postprocess)
checkpoint_path = "params/{}/{}_model.pth".format(model_desc, args.load_type)
save_dir = "BP_classification/{}/".format(model_desc)
te_dataset = ARTDataset(args.te_path)
te_loader = DataLoader(te_dataset, batch_size=1024, shuffle=False)
config = get_model_config(args, postprocess=args.postprocess)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
print('config', config)
model = Model(**config).to(device)
print("Load checkpoint from: {}".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint["state_dict"])
model.eval()
with torch.no_grad():
xs, ys, preds, epises = inference(te_loader, model, save_dir, args)
(SBP_m, MAP_m, DBP_m, ALL_m), (SBP_ci, MAP_ci, DBP_ci, ALL_ci) = check_overall_performance(ys, preds)
print('All - [MAE] SBP:{:.3f}+-{:.3f}, MAP:{:.3f}+-{:.3f}, DBP:{:.3f}+-{:.3f}, All:{:.3f}+-{:.3f}'.format(SBP_m, SBP_ci,
MAP_m, MAP_ci, DBP_m, DBP_ci, ALL_m, ALL_ci))
indices = np.argsort(epises)
xs_sub = []
ys_sub = []
preds_sub = []
sub_len = int(len(xs) * args.subset_ratio)
for i, idx in enumerate(indices):
xs_sub.append(xs[idx])
ys_sub.append(ys[idx])
preds_sub.append(preds[idx])
if i+1 == sub_len:
break
xs_sub = np.array(xs_sub)
ys_sub = np.array(ys_sub)
preds_sub = np.array(preds_sub)
(SBP_m, MAP_m, DBP_m, ALL_m), (SBP_ci, MAP_ci, DBP_ci, ALL_ci) = check_overall_performance(ys_sub, preds_sub)
print('Subset - [MAE] SBP:{:.3f}+-{:.3f}, MAP:{:.3f}+-{:.3f}, DBP:{:.3f}+-{:.3f}, All:{:.3f}+-{:.3f}'.format(SBP_m, SBP_ci,
MAP_m, MAP_ci, DBP_m, DBP_ci, ALL_m, ALL_ci))
| {"/BP_prediction.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/train.py": ["/data_manager.py", "/model.py", "/utils.py", "/loss.py", "/args.py"], "/viz_uncertainty.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_prediction_subset.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_classification.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"]} |
52,599 | Saunak626/cmbper | refs/heads/master | /data_manager.py | import torch
from torch.utils.data import Dataset
import numpy as np
import pickle
class ARTDataset(Dataset):
def __init__(self, data_path):
split = data_path.split('/')[-1].replace('.p', '')
df = pickle.load(open(data_path,'rb'))
self.x_all = torch.tensor(df['PPG'], dtype=torch.float32)
self.y_all = torch.tensor(df['ABP'], dtype=torch.float32)
print("Total number of <{}> data points: {}".format(split, len(self.x_all)), flush=True)
def __len__(self):
return len(self.x_all)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
return self.x_all[idx], self.y_all[idx] | {"/BP_prediction.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/train.py": ["/data_manager.py", "/model.py", "/utils.py", "/loss.py", "/args.py"], "/viz_uncertainty.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_prediction_subset.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_classification.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"]} |
52,600 | Saunak626/cmbper | refs/heads/master | /BP_classification.py | import torch
from torch import optim
from torch.utils.data import DataLoader
import os
import numpy as np
from data_manager import ARTDataset
from model import Model
import argparse
from utils import make_dirs, get_model_config, get_model_description
from args import parse_args
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
import pickle
import matplotlib.pyplot as plt
import os
N_SEG = 5
def classify_BP(ys, preds, title, split='SBP'):
cls_gt = []
cls_pred = []
fig = plt.figure(figsize=(4, 4), dpi=200)
if split == 'SBP':
boundary1 = 120
boundary2 = 140
elif split == 'DBP':
boundary1 = 80
boundary2 = 90
else:
assert False, 'Split should be SBP or DBP.'
for i in (range(len(ys))):
y = ys[i].ravel()
pred = preds[i].ravel()
if split == 'SBP':
gt = max(y)
pred = max(pred)
else:
gt = min(y)
pred = min(pred)
if(gt <= boundary1):
cls_gt.append('Normo.')
elif((gt > boundary1)and(gt <= boundary2)):
cls_gt.append('Prehyp.')
elif(gt > boundary2):
cls_gt.append('Hyper.')
else:
print('bump')
if(pred <= boundary1):
cls_pred.append('Normo.')
elif((pred > boundary1)and(pred <= boundary2)):
cls_pred.append('Prehyp.')
elif(pred > boundary2):
cls_pred.append('Hyper.')
else:
print('bump')
classes = ['Hyper.', 'Prehyp.', 'Normo.']
print('{} Classification Accuracy'.format(split))
print(classification_report(cls_gt, cls_pred, labels=classes, digits=5))
print('Accuracy score:', accuracy_score(cls_gt,cls_pred))
cm = confusion_matrix(cls_gt, cls_pred, labels=classes)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
ax = plt.subplot(1,1,1)
im = ax.imshow(cm, interpolation='nearest', cmap='GnBu')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="4%", pad=0.25)
ax.figure.colorbar(im, cax=cax)
cax.tick_params(labelsize=6)
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
xticklabels=classes, yticklabels=classes)
ax.set_title(title, fontsize=12)
ax.set_ylabel('True', fontsize=10)
ax.set_xlabel('Predicted', fontsize=10)
plt.setp(ax.get_yticklabels(), rotation=90, fontsize=7, va="center")
plt.setp(ax.get_xticklabels(), fontsize=7)
fmt = '.3f'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center", fontsize=8,
color="white" if cm[i, j] > thresh else "black")
ax.grid(False)
fig.tight_layout()
plt.show()
plt.close()
def rescale_value(mu, val):
scale = mu.max(dim=-1)[0] - mu.min(dim=-1)[0]
val_norm = val / scale
return val_norm
def compute_epistemic_uncertainty(alpha, beta, v):
epis = (beta / (v * (alpha - 1))) ** 0.5
return epis
def inference(te_loader, model, args):
print('inference starts!')
xs = []
ys = []
preds =[]
epises = []
stats = pickle.load(open(args.stats_path, 'rb'))
x_mean = stats['PPG_mean']
x_std = stats['PPG_std']
y_mean = stats['ABP_mean']
y_std = stats['ABP_std']
print('Number of batches:', len(te_loader))
for itr, (x, y) in enumerate(te_loader):
print('Iteration: [{} / {}]'.format(itr+1, len(te_loader)))
B, C, T = y.shape
x, y = x.to(device), y.to(device)
pred, epis = model.compute_prediction_and_uncertainty(x)
x = x.contiguous().view(B * N_SEG, 1, T // N_SEG)
y = y.contiguous().view(B * N_SEG, 1, T // N_SEG)
pred = pred.contiguous().view(B * N_SEG, 1, T // N_SEG)
epis = epis.contiguous().view(B * N_SEG, 1, T // N_SEG)
x = x_std * x + x_mean
y = y_std * y + y_mean
pred = y_std * pred + y_mean
epis = y_std * epis.mean(dim=-1) # epis: [B*N_SEG, 1]
epis = rescale_value(pred, epis)
x = x.permute(0, 2, 1).cpu().numpy()
y = y.permute(0, 2, 1).cpu().numpy()
pred = pred.permute(0, 2, 1).cpu().numpy()
epis = epis.cpu().numpy()
xs.append(x)
ys.append(y)
preds.append(pred)
epises.append(epis)
xs = np.vstack(xs)
ys = np.vstack(ys)
preds = np.vstack(preds)
epises = np.vstack(epises).squeeze()
return xs, ys, preds, epises
if __name__ == "__main__":
args = parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_desc = get_model_description(args, postprocess=args.postprocess)
checkpoint_path = "params/{}/{}_model.pth".format(model_desc, args.load_type)
te_dataset = ARTDataset(args.te_path)
te_loader = DataLoader(te_dataset, batch_size=1024, shuffle=False)
config = get_model_config(args, postprocess=args.postprocess)
model = Model(**config).to(device)
print("Load checkpoint from: {}".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint["state_dict"])
model.eval()
with torch.no_grad():
xs, ys, preds, epises = inference(te_loader, model, args)
indices = np.argsort(epises)
xs_sub = []
ys_sub = []
preds_sub = []
sub_len = int(len(xs) * args.subset_ratio)
for i, idx in enumerate(indices):
xs_sub.append(xs[idx])
ys_sub.append(ys[idx])
preds_sub.append(preds[idx])
if i+1 == sub_len:
break
xs_sub = np.array(xs_sub)
ys_sub = np.array(ys_sub)
preds_sub = np.array(preds_sub)
classify_BP(ys, preds, title='All Samples', split='SBP')
classify_BP(ys_sub, preds_sub, title='Subset', split='SBP')
| {"/BP_prediction.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/train.py": ["/data_manager.py", "/model.py", "/utils.py", "/loss.py", "/args.py"], "/viz_uncertainty.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_prediction_subset.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_classification.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"]} |
52,601 | Saunak626/cmbper | refs/heads/master | /loss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from math import pi, log
def Student_NLL(value, df, loc, scale):
y = (value - loc) / scale
Z = (scale.log() +
0.5 * df.log() +
0.5 * log(pi) +
torch.lgamma(0.5 * df) -
torch.lgamma(0.5 * (df + 1.)))
log_prob = -0.5 * (df + 1.) * torch.log1p(y**2. / df) - Z
nll = -log_prob.mean()
return nll
def NIG_Reg(y, gamma, v, alpha, beta):
error = torch.abs(y-gamma)
evi = 2*v+(alpha)
reg = error*evi
return reg.mean()
def EvidentialRegression(evidential_output, y_true, coeff=1e-2):
gamma, v, alpha, beta = evidential_output.chunk(4,1)
df = 2*alpha
loc = gamma
scale = beta*(v+1)/(v*alpha)
loss_nll = Student_NLL(y_true, df, loc, scale)
loss_reg = NIG_Reg(y_true, gamma, v, alpha, beta)
return loss_nll + coeff * loss_reg
def compute_auxiliary_loss(y, pred, loss_aux_type):
idx_seq_max, idx_seq_min = get_local_maxmin_index_sequence(y)
y_local_max, y_local_min = y.gather(2, idx_seq_max), y.gather(2, idx_seq_min)
pred_local_max, pred_local_min = pred.gather(2, idx_seq_max), pred.gather(2, idx_seq_min)
loss_aux1 = compute_loss(pred_local_max, y_local_max, loss_aux_type)
loss_aux2 = compute_loss(pred_local_min, y_local_min, loss_aux_type)
idx_seq_max, idx_seq_min = get_local_maxmin_index_sequence(pred)
y_local_max, y_local_min = y.gather(2, idx_seq_max), y.gather(2, idx_seq_min)
pred_local_max, pred_local_min = pred.gather(2, idx_seq_max), pred.gather(2, idx_seq_min)
loss_aux3 = compute_loss(pred_local_max, y_local_max, loss_aux_type)
loss_aux4 = compute_loss(pred_local_min, y_local_min, loss_aux_type)
return loss_aux1 + loss_aux2 + loss_aux3 + loss_aux4
def compute_loss(y, out, loss_type, zeta=None):
L1_measurer = nn.L1Loss()
MSE_measurer = nn.MSELoss()
if loss_type == 'L1':
pred = out[:, :1]
loss = L1_measurer(pred, y)
elif loss_type == 'MSE':
pred = out[:, :1]
loss = MSE_measurer(out, y)
elif loss_type == 'evi':
loss = EvidentialRegression(out, y, zeta)
return loss
def performance_check(out, y, stats, n_seg=10):
mean = stats['ABP_mean']
std = stats['ABP_std']
out = std * out + mean
y = std * y + mean
B, C, T = y.shape
out = out.contiguous().view(B * n_seg, 1, T // n_seg)
y = y.contiguous().view(B * n_seg, 1, T // n_seg)
MAE = (out - y).abs().mean()
MSE = ((out - y) ** 2).mean()
SBP = (out.max(dim=2)[0] - y.max(dim=2)[0]).abs().mean()
MAP = (out.mean(dim=2) - y.mean(dim=2)).abs().mean()
DBP = (out.min(dim=2)[0] - y.min(dim=2)[0]).abs().mean()
return MAE, MSE, SBP, MAP, DBP
def get_local_maxmin_index_sequence(y, seg_size=100):
B, C, T = y.shape
assert C == 1
idx_seq_max = torch.tensor([]).view(B, C, 0).to(y.device).long()
idx_seq_min = torch.tensor([]).view(B, C, 0).to(y.device).long()
for i in range(0, T, seg_size):
idx_max = y[:, :, i: i + seg_size].max(dim=2)[1].unsqueeze(1) + i
idx_min = y[:, :, i: i + seg_size].min(dim=2)[1].unsqueeze(1) + i
idx_seq_max = torch.cat((idx_seq_max, idx_max), dim=2)
idx_seq_min = torch.cat((idx_seq_min, idx_min), dim=2)
return idx_seq_max, idx_seq_min
| {"/BP_prediction.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/train.py": ["/data_manager.py", "/model.py", "/utils.py", "/loss.py", "/args.py"], "/viz_uncertainty.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_prediction_subset.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_classification.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"]} |
52,602 | Saunak626/cmbper | refs/heads/master | /data_handling.py | import h5py
import numpy as np
import os
from tqdm import tqdm
import pickle
import argparse
def prepare_data(data_path, out_dir):
val_start = 90000
val_end = 100000
fl = h5py.File(data_path, 'r')
X_train = []
Y_train = []
X_val = []
Y_val = []
X_test = []
Y_test = []
for i in tqdm(range(0, val_start), desc='Training Data'):
X_train.append(np.array(fl['data'][i][1]).reshape(1, -1))
Y_train.append(np.array(fl['data'][i][0]).reshape(1, -1))
for i in tqdm(range(val_start, val_end), desc='Validation Data'):
X_val.append(np.array(fl['data'][i][1]).reshape(1, -1))
Y_val.append(np.array(fl['data'][i][0]).reshape(1, -1))
for i in tqdm(range(val_end, len(fl['data'])), desc='Test Data'):
X_test.append(np.array(fl['data'][i][1]).reshape(1, -1))
Y_test.append(np.array(fl['data'][i][0]).reshape(1, -1))
X_train = np.array(X_train)
Y_train = np.array(Y_train)
X_mean = np.mean(X_train)
X_std = np.std(X_train, ddof=1)
Y_mean = np.mean(Y_train)
Y_std = np.std(Y_train, ddof=1)
X_train -= X_mean
X_train /= X_std
Y_train -= Y_mean
Y_train /= Y_std
pickle.dump({'PPG': X_train, 'ABP': Y_train}, open(os.path.join(out_dir, 'train.p'), 'wb'))
X_val = np.array(X_val)
X_val -= X_mean
X_val /= X_std
Y_val = np.array(Y_val)
Y_val -= Y_mean
Y_val /= Y_std
pickle.dump({'PPG': X_val, 'ABP': Y_val}, open(os.path.join(out_dir, 'valid.p'), 'wb'))
X_test = np.array(X_test)
X_test -= X_mean
X_test /= X_std
Y_test = np.array(Y_test)
Y_test -= Y_mean
Y_test /= Y_std
pickle.dump({'PPG': X_test, 'ABP': Y_test}, open(os.path.join(out_dir, 'test.p'), 'wb'))
pickle.dump({'PPG_mean': X_mean,
'PPG_std': X_std,
'ABP_mean': Y_mean,
'ABP_std': Y_std}, open(os.path.join(out_dir, 'stats.p'), 'wb'))
def main(args):
data_path = args.data_path
out_dir = args.out_dir
if not os.path.exists(out_dir):
os.makedirs(out_dir)
prepare_data(data_path, out_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Data Preparation',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data_path', '-i', type=str, default='DB/data.hdf5', help='Preprocessed data path')
parser.add_argument('--out_dir', '-o', type=str, default='datasets', help='Out directory')
args = parser.parse_args()
main(args)
| {"/BP_prediction.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/train.py": ["/data_manager.py", "/model.py", "/utils.py", "/loss.py", "/args.py"], "/viz_uncertainty.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_prediction_subset.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"], "/BP_classification.py": ["/data_manager.py", "/model.py", "/utils.py", "/args.py"]} |
52,625 | aaalgo/aardvark | refs/heads/master | /zoo/wrn.py | from tensorflow import variable_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers import conv2d, max_pool2d, avg_pool2d, flatten, fully_connected, batch_norm
# https://arxiv.org/pdf/1605.07146.pdf
# https://arxiv.org/abs/1603.05027
def original_conv2d (net, depth, filter_size, step = 1):
return conv2d(net, depth, filter_size, step, normalizer_fn=batch_norm)
def rewired_conv2d (net, depth, filter_size):
# https://arxiv.org/abs/1603.05027
# original: conv-BN-ReLU,
# changed here to: BN-ReLU-conv
net = batch_norm(net)
net = tf.nn.relu(net)
net = tf.conv2d(net, depth, filter_size, normalizer_fn=None, activation_fn=None)
return net
myconv2d = rewired_conv2d
def block (net, config, n, depth, pool):
branch = net
if pool:
net = max_pool2d(net, 2, 2)
for _ in range(n):
for fs in config:
if pool:
step = 2
pool = False
else:
step = 1
branch = myconv2d(branch, depth, fs, step, normalizer_fn=batch_norm)
return net + branch
def wrn (net, k, n, num_classes=None):
net = block(net, [3], n, 16, False) # 32
net = block(net, [3,3], n, 16*k, False) # 32
net = block(net, [3,3], n, 32*k, True) # 16
net = block(net, [3,3], n, 64*k, True) # 8
if not num_classes is None:
net = avg_pool2d(net, 8, 8)
net = conv2d(net, num_classes, 1, activation_fn=None)
pass
return net
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,626 | aaalgo/aardvark | refs/heads/master | /zoo/vgg.py | from tensorflow import nn
from tensorflow import variable_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers import conv2d, max_pool2d, flatten, fully_connected, batch_norm
# https://arxiv.org/pdf/1409.1556.pdf
# Notes:
# - default size 224x224
# - input should be normalized by substracting mean pixel value
# - conv stride is always 1, down-sizing achieved by max_pool
# - conv padding is SAME
# - all intermediate conv2d have relu
# Tensorflow defaults
# - conv2: SAME, relu
# - max_pool, VALID
def classification_head (net, num_classes):
net = flatten(net)
net = fully_connected(net, 4096)
net = fully_connected(net, 4096)
net = fully_connected(net, num_classes, activation_fn=None)
return net
F1x1 = 1
Flrn = 2
configs = {'a': [[64, 1], [128, 1], [256, 2], [512, 2], [512, 2]],
'a_lrn': [[64, 1, Flrn], [128, 1], [256, 2], [512, 2], [512, 2]],
'b': [[64, 2], [128, 2], [256, 2], [512, 2], [512, 2]],
'c': [[64, 2], [128, 2], [256, 2, F1x1], [512, 2, F1x1], [512, 2, F1x1]],
'd': [[64, 2], [128, 2], [256, 3], [512, 3], [512, 3]],
'e': [[64, 2], [128, 2], [256, 4], [512, 4], [512, 4]],
}
def backbone (net, config, conv2d_params):
for block in config:
if len(block) == 2:
depth, times = block
flag = 0
else:
depth, times, flag = block
for _ in range(times):
net = conv2d(net, depth, 3, **conv2d_params)
pass
if flag == F1x1:
net = conv2d(net, depth, 1, **conv2d_params)
pass
elif flag == Flrn:
raise Exception('LRN not implemented')
net = max_pool2d(net, 2, 2)
return net
def vgg (net, num_classes=None, flavor='a', scope=None, conv2d_params = {}):
if scope is None:
scope = 'vgg_' + flavor
with variable_scope(scope):
net = backbone(net, configs[flavor], conv2d_params)
if not num_classes is None:
net = classification_head(net, num_classes)
pass
return net
def vgg_bn (net, is_training, num_classes=None, flavor='a', scope=None):
return vgg(net, num_classes, flavor, scope,
{"normalizer_fn": batch_norm,
"normalizer_params": {"is_training": is_training,
"decay": 0.9,
"epsilon": 5e-4}})
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,627 | aaalgo/aardvark | refs/heads/master | /gate/import.py | #!/usr/bin/env python3
import os
import picpac
from glob import glob
dic = { 'nogate.txt':0,
'gateclose.txt':1,
'gatefar.txt':2}
def load_file(path):
with open(path, 'rb') as f:
return f.read()
def load_anno(path):
anno_dic = {}
for files in os.listdir(path):
#print (files)
for line in open(os.path.join(path,files)):
line = line.strip('\n')
anno_dic[line]=dic[files]
#print (line,dic[files])
return anno_dic
anno = load_anno("/shared/s2/users/wdong/football/annotation")
def import_db (db_path, img_path):
db = picpac.Writer(db_path, picpac.OVERWRITE)
for image in glob(img_path+"*/*.jpg"):
file_name = image.split("/")[-1]
if file_name in anno.keys():
db.append(anno[file_name],load_file(image),image.encode('ascii'))
pass
#import_db('./scratch/train.db','/shared/s2/users/wdong/football/trainingthumbs/')
import_db('./scratch/val.db','/shared/s2/users/wdong/football/testthumbs/')
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,628 | aaalgo/aardvark | refs/heads/master | /zoo/dsb_selim/resnets.py | # -*- coding: utf-8 -*-
"""
keras_resnet.models._2d
~~~~~~~~~~~~~~~~~~~~~~~
This module implements popular two-dimensional residual models.
"""
import keras.backend
import keras.layers
import keras.models
import keras.regularizers
def ResNet(inputs, blocks, block, include_top=True, classes=1000, numerical_names=None, *args, **kwargs):
"""
Constructs a `keras.models.Model` object using the given block count.
:param inputs: input tensor (e.g. an instance of `keras.layers.Input`)
:param blocks: the network’s residual architecture
:param block: a residual block (e.g. an instance of `keras_resnet.blocks.basic_2d`)
:param include_top: if true, includes classification layers
:param classes: number of classes to classify (include_top must be true)
:param numerical_names: list of bool, same size as blocks, used to indicate whether names of layers should include numbers or letters
:return model: ResNet model with encoding output (if `include_top=False`) or classification output (if `include_top=True`)
Usage:
>>> import keras_resnet.blocks
>>> import keras_resnet.models
>>> shape, classes = (224, 224, 3), 1000
>>> x = keras.layers.Input(shape)
>>> blocks = [2, 2, 2, 2]
>>> block = keras_resnet.blocks.basic_2d
>>> model = keras_resnet.models.ResNet(x, classes, blocks, block, classes=classes)
>>> model.compile("adam", "categorical_crossentropy", ["accuracy"])
"""
if keras.backend.image_data_format() == "channels_last":
axis = 3
else:
axis = 1
if numerical_names is None:
numerical_names = [True] * len(blocks)
x = keras.layers.ZeroPadding2D(padding=3, name="padding_conv1")(inputs)
x = keras.layers.Conv2D(64, (7, 7), strides=(2, 2), use_bias=False, name="conv1")(x)
x = keras.layers.BatchNormalization(axis=axis, epsilon=1e-5, name="bn_conv1")(x)
x = keras.layers.Activation("relu", name="conv1_relu")(x)
x = keras.layers.MaxPooling2D((3, 3), strides=(2, 2), padding="same", name="pool1")(x)
features = 64
outputs = []
for stage_id, iterations in enumerate(blocks):
for block_id in range(iterations):
x = block(features, stage_id, block_id, numerical_name=(block_id > 0 and numerical_names[stage_id]))(x)
features *= 2
outputs.append(x)
if include_top:
assert classes > 0
x = keras.layers.GlobalAveragePooling2D(name="pool5")(x)
x = keras.layers.Dense(classes, activation="softmax", name="fc1000")(x)
return keras.models.Model(inputs=inputs, outputs=x, *args, **kwargs)
else:
# Else output each stages features
return keras.models.Model(inputs=inputs, outputs=outputs, *args, **kwargs)
def ResNet18(inputs, blocks=None, include_top=True, classes=1000, *args, **kwargs):
"""
Constructs a `keras.models.Model` according to the ResNet18 specifications.
:param inputs: input tensor (e.g. an instance of `keras.layers.Input`)
:param blocks: the network’s residual architecture
:param include_top: if true, includes classification layers
:param classes: number of classes to classify (include_top must be true)
:return model: ResNet model with encoding output (if `include_top=False`) or classification output (if `include_top=True`)
Usage:
>>> import keras_resnet.models
>>> shape, classes = (224, 224, 3), 1000
>>> x = keras.layers.Input(shape)
>>> model = keras_resnet.models.ResNet18(x, classes=classes)
>>> model.compile("adam", "categorical_crossentropy", ["accuracy"])
"""
if blocks is None:
blocks = [2, 2, 2, 2]
return ResNet(inputs, blocks, block=keras_resnet.blocks.basic_2d, include_top=include_top, classes=classes, *args, **kwargs)
def ResNet34(inputs, blocks=None, include_top=True, classes=1000, *args, **kwargs):
"""
Constructs a `keras.models.Model` according to the ResNet34 specifications.
:param inputs: input tensor (e.g. an instance of `keras.layers.Input`)
:param blocks: the network’s residual architecture
:param include_top: if true, includes classification layers
:param classes: number of classes to classify (include_top must be true)
:return model: ResNet model with encoding output (if `include_top=False`) or classification output (if `include_top=True`)
Usage:
>>> import keras_resnet.models
>>> shape, classes = (224, 224, 3), 1000
>>> x = keras.layers.Input(shape)
>>> model = keras_resnet.models.ResNet34(x, classes=classes)
>>> model.compile("adam", "categorical_crossentropy", ["accuracy"])
"""
if blocks is None:
blocks = [3, 4, 6, 3]
return ResNet(inputs, blocks, block=keras_resnet.blocks.basic_2d, include_top=include_top, classes=classes, *args, **kwargs)
def ResNet50(inputs, blocks=None, include_top=True, classes=1000, *args, **kwargs):
"""
Constructs a `keras.models.Model` according to the ResNet50 specifications.
:param inputs: input tensor (e.g. an instance of `keras.layers.Input`)
:param blocks: the network’s residual architecture
:param include_top: if true, includes classification layers
:param classes: number of classes to classify (include_top must be true)
:return model: ResNet model with encoding output (if `include_top=False`) or classification output (if `include_top=True`)
Usage:
>>> import keras_resnet.models
>>> shape, classes = (224, 224, 3), 1000
>>> x = keras.layers.Input(shape)
>>> model = keras_resnet.models.ResNet50(x)
>>> model.compile("adam", "categorical_crossentropy", ["accuracy"])
"""
if blocks is None:
blocks = [3, 4, 6, 3]
numerical_names = [False, False, False, False]
return ResNet(inputs, blocks, numerical_names=numerical_names, block=bottleneck_2d, include_top=include_top, classes=classes, *args, **kwargs)
def ResNet101(inputs, blocks=None, include_top=True, classes=1000, *args, **kwargs):
"""
Constructs a `keras.models.Model` according to the ResNet101 specifications.
:param inputs: input tensor (e.g. an instance of `keras.layers.Input`)
:param blocks: the network’s residual architecture
:param include_top: if true, includes classification layers
:param classes: number of classes to classify (include_top must be true)
:return model: ResNet model with encoding output (if `include_top=False`) or classification output (if `include_top=True`)
Usage:
>>> import keras_resnet.models
>>> shape, classes = (224, 224, 3), 1000
>>> x = keras.layers.Input(shape)
>>> model = keras_resnet.models.ResNet101(x, classes=classes)
>>> model.compile("adam", "categorical_crossentropy", ["accuracy"])
"""
if blocks is None:
blocks = [3, 4, 23, 3]
numerical_names = [False, True, True, False]
return ResNet(inputs, blocks, numerical_names=numerical_names, block=bottleneck_2d, include_top=include_top, classes=classes, *args, **kwargs)
def ResNet152(inputs, blocks=None, include_top=True, classes=1000, *args, **kwargs):
"""
Constructs a `keras.models.Model` according to the ResNet152 specifications.
:param inputs: input tensor (e.g. an instance of `keras.layers.Input`)
:param blocks: the network’s residual architecture
:param include_top: if true, includes classification layers
:param classes: number of classes to classify (include_top must be true)
:return model: ResNet model with encoding output (if `include_top=False`) or classification output (if `include_top=True`)
Usage:
>>> import keras_resnet.models
>>> shape, classes = (224, 224, 3), 1000
>>> x = keras.layers.Input(shape)
>>> model = keras_resnet.models.ResNet152(x, classes=classes)
>>> model.compile("adam", "categorical_crossentropy", ["accuracy"])
"""
if blocks is None:
blocks = [3, 8, 36, 3]
numerical_names = [False, True, True, False]
return ResNet(inputs, blocks, numerical_names=numerical_names, block=bottleneck_2d, include_top=include_top, classes=classes, *args, **kwargs)
def ResNet200(inputs, blocks=None, include_top=True, classes=1000, *args, **kwargs):
"""
Constructs a `keras.models.Model` according to the ResNet200 specifications.
:param inputs: input tensor (e.g. an instance of `keras.layers.Input`)
:param blocks: the network’s residual architecture
:param include_top: if true, includes classification layers
:param classes: number of classes to classify (include_top must be true)
:return model: ResNet model with encoding output (if `include_top=False`) or classification output (if `include_top=True`)
Usage:
>>> import keras_resnet.models
>>> shape, classes = (224, 224, 3), 1000
>>> x = keras.layers.Input(shape)
>>> model = keras_resnet.models.ResNet200(x, classes=classes)
>>> model.compile("adam", "categorical_crossentropy", ["accuracy"])
"""
if blocks is None:
blocks = [3, 24, 36, 3]
numerical_names = [False, True, True, False]
return ResNet(inputs, blocks, numerical_names=numerical_names, block=bottleneck_2d, include_top=include_top, classes=classes, *args, **kwargs)
import keras.layers
import keras.regularizers
import keras_resnet.layers
parameters = {
"kernel_initializer": "he_normal"
}
def basic_2d(filters, stage=0, block=0, kernel_size=3, numerical_name=False, stride=None):
"""
A two-dimensional basic block.
:param filters: the output’s feature space
:param stage: int representing the stage of this block (starting from 0)
:param block: int representing this block (starting from 0)
:param kernel_size: size of the kernel
:param numerical_name: if true, uses numbers to represent blocks instead of chars (ResNet{101, 152, 200})
:param stride: int representing the stride used in the shortcut and the first conv layer, default derives stride from block id
Usage:
>>> import keras_resnet.blocks
>>> keras_resnet.blocks.basic_2d(64)
"""
if stride is None:
if block != 0 or stage == 0:
stride = 1
else:
stride = 2
if keras.backend.image_data_format() == "channels_last":
axis = 3
else:
axis = 1
if block > 0 and numerical_name:
block_char = "b{}".format(block)
else:
block_char = chr(ord('a') + block)
stage_char = str(stage + 2)
def f(x):
y = keras.layers.ZeroPadding2D(padding=1, name="padding{}{}_branch2a".format(stage_char, block_char))(x)
y = keras.layers.Conv2D(filters, kernel_size, strides=stride, use_bias=False, name="res{}{}_branch2a".format(stage_char, block_char), **parameters)(y)
y = keras.layers.BatchNormalization(axis=axis, epsilon=1e-5, name="bn{}{}_branch2a".format(stage_char, block_char))(y)
y = keras.layers.Activation("relu", name="res{}{}_branch2a_relu".format(stage_char, block_char))(y)
y = keras.layers.ZeroPadding2D(padding=1, name="padding{}{}_branch2b".format(stage_char, block_char))(y)
y = keras.layers.Conv2D(filters, kernel_size, use_bias=False, name="res{}{}_branch2b".format(stage_char, block_char), **parameters)(y)
y = keras.layers.BatchNormalization(axis=axis, epsilon=1e-5, name="bn{}{}_branch2b".format(stage_char, block_char))(y)
if block == 0:
shortcut = keras.layers.Conv2D(filters, (1, 1), strides=stride, use_bias=False, name="res{}{}_branch1".format(stage_char, block_char), **parameters)(x)
shortcut = keras.layers.BatchNormalization(axis=axis, epsilon=1e-5, name="bn{}{}_branch1".format(stage_char, block_char))(shortcut)
else:
shortcut = x
y = keras.layers.Add(name="res{}{}".format(stage_char, block_char))([y, shortcut])
y = keras.layers.Activation("relu", name="res{}{}_relu".format(stage_char, block_char))(y)
return y
return f
def bottleneck_2d(filters, stage=0, block=0, kernel_size=3, numerical_name=False, stride=None):
"""
A two-dimensional bottleneck block.
:param filters: the output’s feature space
:param stage: int representing the stage of this block (starting from 0)
:param block: int representing this block (starting from 0)
:param kernel_size: size of the kernel
:param numerical_name: if true, uses numbers to represent blocks instead of chars (ResNet{101, 152, 200})
:param stride: int representing the stride used in the shortcut and the first conv layer, default derives stride from block id
Usage:
>>> import keras_resnet.blocks
>>> bottleneck_2d(64)
"""
if stride is None:
if block != 0 or stage == 0:
stride = 1
else:
stride = 2
if keras.backend.image_data_format() == "channels_last":
axis = 3
else:
axis = 1
if block > 0 and numerical_name:
block_char = "b{}".format(block)
else:
block_char = chr(ord('a') + block)
stage_char = str(stage + 2)
def f(x):
y = keras.layers.Conv2D(filters, (1, 1), strides=stride, use_bias=False, name="res{}{}_branch2a".format(stage_char, block_char), **parameters)(x)
y = keras.layers.BatchNormalization(axis=axis, epsilon=1e-5, name="bn{}{}_branch2a".format(stage_char, block_char))(y)
y = keras.layers.Activation("relu", name="res{}{}_branch2a_relu".format(stage_char, block_char))(y)
y = keras.layers.ZeroPadding2D(padding=1, name="padding{}{}_branch2b".format(stage_char, block_char))(y)
y = keras.layers.Conv2D(filters, kernel_size, use_bias=False, name="res{}{}_branch2b".format(stage_char, block_char), **parameters)(y)
y = keras.layers.BatchNormalization(axis=axis, epsilon=1e-5, name="bn{}{}_branch2b".format(stage_char, block_char))(y)
y = keras.layers.Activation("relu", name="res{}{}_branch2b_relu".format(stage_char, block_char))(y)
y = keras.layers.Conv2D(filters * 4, (1, 1), use_bias=False, name="res{}{}_branch2c".format(stage_char, block_char), **parameters)(y)
y = keras.layers.BatchNormalization(axis=axis, epsilon=1e-5, name="bn{}{}_branch2c".format(stage_char, block_char))(y)
if block == 0:
shortcut = keras.layers.Conv2D(filters * 4, (1, 1), strides=stride, use_bias=False, name="res{}{}_branch1".format(stage_char, block_char), **parameters)(x)
shortcut = keras.layers.BatchNormalization(axis=axis, epsilon=1e-5, name="bn{}{}_branch1".format(stage_char, block_char))(shortcut)
else:
shortcut = x
y = keras.layers.Add(name="res{}{}".format(stage_char, block_char))([y, shortcut])
y = keras.layers.Activation("relu", name="res{}{}_relu".format(stage_char, block_char))(y)
return y
return f
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,629 | aaalgo/aardvark | refs/heads/master | /aardvark.py | #!/usr/bin/env python4
# This is the basic aaalgo tensorflow model training framework.
import errno
import os
import sys
import subprocess
AARDVARK_HOME = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(AARDVARK_HOME, 'zoo/slim'))
from abc import ABC, abstractmethod
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# C++ code, python3 setup.py build
import time, datetime
from multiprocessing import Process, Queue
import logging
import simplejson as json
from tqdm import tqdm
import numpy as np
import tensorflow as tf
import lovasz_losses_tf
from nets import nets_factory, resnet_utils
try:
import picpac
except:
picpac_so = os.path.join(AARDVARK_HOME, 'picpac.cpython-35m-x86_64-linux-gnu.so')
if not os.path.exists(picpac_so):
subprocess.check_call('wget http://www.aaalgo.com/picpac/binary/picpac.cpython-35m-x86_64-linux-gnu.so -O %s' % picpac_so, shell=True)
pass
import picpac
from tf_utils import *
from zoo import fuck_slim
import __main__
print("PICPAC:", picpac.__file__)
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('classes', 2, 'number of classes')
flags.DEFINE_bool('dice', None, 'use dice loss for segmentation')
flags.DEFINE_bool('lovasz', None, 'use lovasz loss for segmentation')
# PicPac-related parameters
flags.DEFINE_string('db', None, 'training db')
flags.DEFINE_string('val_db', None, 'validation db')
flags.DEFINE_string('mixin', None, 'db to be mixed into training')
flags.DEFINE_integer('channels', 3, 'image channels')
flags.DEFINE_boolean('cache', True, 'cache images in memory') # cache small db in memory
flags.DEFINE_string('augments', 'augments.json', 'augment config file')
flags.DEFINE_string('colorspace', 'RGB', 'colorspace')
flags.DEFINE_integer('picpac_dump', 20, 'dump training example for debugging')
flags.DEFINE_string('border_type', 'constant', '')
flags.DEFINE_integer('batch', 1, 'batch size')
flags.DEFINE_integer('max_size', 200000, 'max image size')
flags.DEFINE_integer('min_size', 1, 'min image size')
flags.DEFINE_integer('fix_width', 0, '')
flags.DEFINE_integer('fix_height', 0, '')
flags.DEFINE_integer('clip_stride', 16, '')
flags.DEFINE_integer('clip_shift', 0, '')
# model saving parameters
flags.DEFINE_string('model', 'model', 'model directory')
flags.DEFINE_string('resume', None, 'resume training from this model')
flags.DEFINE_integer('max_to_keep', 100, 'models to keep')
flags.DEFINE_integer('epoch_steps', None, 'by default all images')
flags.DEFINE_integer('max_epochs', 500, '')
flags.DEFINE_integer('ckpt_epochs', 10, '')
flags.DEFINE_integer('val_epochs', 10, '')
# optimizer settings
flags.DEFINE_float('lr', 0.01, 'Initial learning rate.')
flags.DEFINE_float('decay_rate', 0.95, '')
flags.DEFINE_float('decay_steps', 500, '')
flags.DEFINE_boolean('adam', True, '')
# stock slim networks
flags.DEFINE_float('weight_decay', 0.00004, '')
flags.DEFINE_boolean('patch_slim', False, '')
flags.DEFINE_boolean('compact', False, 'compact progress bar')
flags.DEFINE_boolean('multiprocess', False, '')
def load_augments (is_training):
augments = []
if is_training:
if FLAGS.augments:
with open(FLAGS.augments, 'r') as f:
augments = json.loads(f.read())
print("Using augments:")
print(json.dumps(augments))
pass
pass
return augments
def create_picpac_stream (path, is_training, extra_config):
assert os.path.exists(path)
print("CACHE:", FLAGS.cache)
# check db size, warn not to cache if file is big
statinfo = os.stat(path)
if statinfo.st_size > 0x40000000 and FLAGS.cache:
print_red("DB is probably too big too be cached, consider adding --cache 0")
config = {"db": path,
"loop": is_training,
"shuffle": is_training,
"reshuffle": is_training,
"annotate": [],
"channels": FLAGS.channels,
"stratify": is_training,
"dtype": "float32",
"batch": FLAGS.batch,
"colorspace": FLAGS.colorspace,
"cache": FLAGS.cache,
"transforms": []
}
if is_training:
config["dump"] = FLAGS.picpac_dump # dump 20 training samples for debugging and see
else:
config['threads'] = 1
if is_training and not FLAGS.mixin is None:
print("mixin support is incomplete in new picpac.")
assert os.path.exists(FLAGS.mixin)
config['mixin'] = FLAGS.mixin
config['mixin_group_reset'] = 0
config['mixin_group_delta'] = 1
pass
config.update(extra_config)
return picpac.ImageStream(config)
class Model(ABC):
def __init__ (self):
# build model here
super().__init__()
self.metrics = []
self.variables_to_train = None
@abstractmethod
def build_graph (self):
pass
def init_session (self, sess):
pass
@abstractmethod
def create_stream (self, path, is_training):
pass
@abstractmethod
def feed_dict (self, record):
pass
pass
class Model2D (Model):
def __init__ (self):
# build model here
super().__init__()
def extra_stream_config (self, is_training):
return {}
def create_stream (self, path, is_training):
return create_picpac_stream(path, is_training, self.extra_stream_config(True))
pass
class ClassificationModel(Model2D):
def __init__ (self):
super().__init__()
pass
@abstractmethod
def inference (self, images, classes, is_training):
pass
def build_graph (self):
is_training = tf.placeholder(tf.bool, name="is_training")
images = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images")
labels = tf.placeholder(tf.int32, shape=(None,))
self.is_training = is_training
self.images = images
self.labels = labels
logits = tf.identity(self.inference(images, FLAGS.classes, is_training), name='logits')
probs = tf.nn.softmax(logits, name='probs')
prob = tf.squeeze(tf.slice(tf.nn.softmax(logits), [0,1], [-1,1]), 1, name='prob')
# cross-entropy
xe = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
xe = tf.reduce_mean(xe, name='xe')
# accuracy
acc = tf.cast(tf.nn.in_top_k(logits, labels, 1), tf.float32)
acc = tf.reduce_mean(acc, name='ac')
# loss
tf.losses.add_loss(xe)
self.metrics.extend([xe, acc])
pass
def extra_stream_config (self, is_training):
augments = load_augments(is_training)
shift = 0
if is_training:
shift = FLAGS.clip_shift
return {"transforms": [
{"type": "resize", "max_size": FLAGS.max_size, "min_size": FLAGS.min_size},
] + augments + [
{"type": "clip", "shift": shift, "width": FLAGS.fix_width, "height": FLAGS.fix_height, "border_type": FLAGS.border_type},
]
}
def feed_dict (self, record, is_training = True):
# load picpac record into feed_dict
meta, images = record
return {self.is_training: is_training,
self.images: images,
self.labels: meta.labels}
pass
class SegmentationModel(Model2D):
def __init__ (self):
super().__init__()
pass
def inference (self, images, is_training):
pass
def build_graph (self):
is_training = tf.placeholder(tf.bool, name="is_training")
images = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images")
labels = tf.placeholder(tf.int32, shape=(None, None, None, 1))
self.is_training = is_training
self.images = images
self.labels = labels
logits = self.inference(images, FLAGS.classes, is_training)
self.logits = logits
labels1 = tf.reshape(labels, (-1,))
if FLAGS.classes == 1:
logits1 = tf.reshape(logits, (-1,))
probs = tf.sigmoid(logits, name='probs')
prob = tf.squeeze(probs, 3, name='prob')
self.probs = probs
if FLAGS.dice:
loss = tf.identity(dice_loss(tf.cast(labels1, tf.float32), prob), name='di')
elif FLAGS.lovasz:
loss = lovasz_losses_tf.lovasz_hinge(logits=logits, labels=labels1)
loss = tf.identity(loss, name='blov')
else:
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits1, labels=tf.cast(labels1, tf.float32))
loss = tf.reduce_mean(loss, name='bxe')
pass
else: # multiple channels
logits1 = tf.reshape(logits, (-1, FLAGS.classes))
probs = tf.nn.softmax(logits, name='probs')
self.probs = probs
prob = tf.identity(probs[:, :, :, 1], name='prob')
if FLAGS.dice:
assert False, 'Not supported'
elif FLAGS.lovasz:
loss = lovasz_losses_tf.lovasz_softmax(probs, labels1, per_image=True)
loss = tf.identity(loss, name='lov')
else:
# accuracy
acc = tf.cast(tf.nn.in_top_k(logits1, labels1, 1), tf.float32)
acc = tf.reduce_mean(acc, name='acc')
self.metrics.append(acc)
# cross-entropy
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits1, labels=labels1)
loss = tf.reduce_mean(loss, name='xe')
pass
tf.losses.add_loss(loss)
self.metrics.append(loss)
pass
def extra_stream_config (self, is_training):
augments = load_augments(is_training)
shift = 0
if is_training:
shift = FLAGS.clip_shift
return {"annotate": [1],
"transforms": [
{"type": "resize", "max_size": FLAGS.max_size, "min_size": FLAGS.min_size},
] + augments + [
{"type": "clip", "shift": shift, "width": FLAGS.fix_width, "height": FLAGS.fix_height, "round": FLAGS.clip_stride, "border_type": FLAGS.border_type},
{"type": "rasterize"},
]
}
def feed_dict (self, record, is_training = True):
# load picpac record into feed_dict
_, images, labels = record
return {self.is_training: is_training,
self.images: images,
self.labels: labels}
pass
class SegmentationModel3D (Model):
def __init__ (self, size=128):
super().__init__()
self.size = size
pass
def inference (self, images, is_training):
pass
def build_graph (self):
is_training = tf.placeholder(tf.bool, name="is_training")
images = tf.placeholder(tf.float32, shape=(None, self.size, self.size, self.size, FLAGS.channels), name="images")
labels = tf.placeholder(tf.int32, shape=(None, self.size, self.size, self.size))
self.is_training = is_training
self.images = images
self.labels = labels
logits = tf.identity(self.inference(images, FLAGS.classes, is_training), name='logits')
probs = tf.nn.softmax(logits, name='probs')
logits1 = tf.reshape(logits, (-1, FLAGS.classes))
labels1 = tf.reshape(labels, (-1,))
# accuracy
acc = tf.cast(tf.nn.in_top_k(logits1, labels1, 1), tf.float32)
acc = tf.reduce_mean(acc, name='acc')
self.metrics.append(acc)
xe = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits1, labels=labels1)
xe = tf.reduce_mean(xe, name='xe')
tf.losses.add_loss(xe)
self.metrics.append(xe)
pass
pass
class AutoEncoderModel(Model2D):
def __init__ (self):
super().__init__()
pass
def loss (self, images, pred):
pass
def inference (self, images, channels, is_training):
pass
def build_graph (self):
is_training = tf.placeholder(tf.bool, name="is_training")
images = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images")
self.is_training = is_training
self.images = images
prediction = self.inference(images, FLAGS.channels, 1)
prediction = tf.identity(prediction, name='decoded')
loss = self.loss(images, prediction)
tf.losses.add_loss(loss)
self.metrics.append(loss)
pass
def extra_stream_config (self, is_training):
augments = load_augments(is_training)
shift = 0
if is_training:
shift = FLAGS.clip_shift
return {
"transforms": [
{"type": "resize", "max_size": FLAGS.max_size, "min_size": FLAGS.min_size},
] + augments + [
{"type": "clip", "shift": shift, "width": FLAGS.fix_width, "height": FLAGS.fix_height, "round": FLAGS.clip_stride},
]
}
def feed_dict (self, record, is_training = True):
# load picpac record into feed_dict
_, images = record
return {self.is_training: is_training,
self.images: images}
pass
def default_argscope (is_training):
return fuck_slim.patch_resnet_arg_scope(is_training)(weight_decay=FLAGS.weight_decay)
def create_stock_slim_network (name, images, is_training, num_classes=None, global_pool=False, stride=None, scope=None, spatial_squeeze=True):
if scope is None:
scope = name
PIXEL_MEANS = tf.constant([[[[103.94, 116.78, 123.68]]]])
ch = images.shape[3]
fuck_slim.extend()
if FLAGS.patch_slim:
fuck_slim.patch(is_training)
network_fn = nets_factory.get_network_fn(name, num_classes=num_classes,
weight_decay=FLAGS.weight_decay, is_training=is_training)
net, _ = network_fn(images - PIXEL_MEANS[:, :, :, :ch], global_pool=global_pool, output_stride=stride, scope=scope, spatial_squeeze=spatial_squeeze)
#net, _ = network_fn(images - PIXEL_MEANS[:, :, :, :ch], global_pool=global_pool, output_stride=stride, scope=scope)
return net
def setup_finetune (ckpt, is_trainable):
print("Finetuning %s" % ckpt)
# TODO(sguada) variables.filter_variables()
variables_to_restore = []
model_vars = tf.get_collection(tf.GraphKeys.MODEL_VARIABLES)
for var in model_vars:
if not is_trainable(var.op.name):
#print(var.op.name)
variables_to_restore.append(var)
if tf.gfile.IsDirectory(ckpt):
ckpt = tf.train.latest_checkpoint(ckpt)
variables_to_train = []
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
for var in trainable_vars:
if is_trainable(var.op.name):
variables_to_train.append(var)
print("Restoring %d out of %d model variables" % (len(variables_to_restore), len(model_vars)))
print("Training %d out of %d trainable variables" % (len(variables_to_train), len(trainable_vars)))
if len(variables_to_train) < 20:
for var in variables_to_train:
print(" %s" % var.op.name)
return tf.contrib.framework.assign_from_checkpoint_fn(
ckpt, variables_to_restore,
ignore_missing_vars=False), variables_to_train
class Metrics: # display metrics
def __init__ (self, model):
self.metric_names = [x.name[:-2].split('/')[-1] for x in model.metrics]
self.cnt, self.sum = 0, np.array([0] * len(model.metrics), dtype=np.float32)
pass
def update (self, mm, cc):
self.sum += np.array(mm) * cc
self.cnt += cc
self.avg = self.sum / self.cnt
return ' '.join(['%s=%.3f' % (a, b) for a, b in zip(self.metric_names, list(self.avg))])
class AsyncLoad:
def __init__ (self, stream):
def producer (queue, stre):
while True:
queue.put(stre.next())
pass
self.queue = Queue()
self.worker = Process(target=producer, args=(self.queue, stream,))
self.worker.daemon = True
self.worker.start() # Launch reader_proc() as a separate python process
pass
def next (self):
return self.queue.get()
pass
def train (model):
bname = os.path.splitext(os.path.basename(__main__.__file__))[0]
logging.basicConfig(filename='%s-%s.log' % (bname, datetime.datetime.now().strftime('%Y%m%d-%H%M%S')),level=logging.DEBUG, format='%(asctime)s %(message)s')
logging.info("cwd: %s" % os.getcwd())
logging.info("cmdline: %s" % (' '.join(sys.argv)))
model.build_graph()
if FLAGS.model:
try: # create directory if not exists
os.makedirs(FLAGS.model)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
global_step = tf.train.create_global_step()
LR = tf.train.exponential_decay(FLAGS.lr, global_step, FLAGS.decay_steps, FLAGS.decay_rate, staircase=True)
if FLAGS.adam:
print("Using Adam optimizer, reducing LR by 100x")
optimizer = tf.train.AdamOptimizer(LR/100)
else:
optimizer = tf.train.MomentumOptimizer(learning_rate=LR, momentum=0.9)
metrics = model.metrics
reg_losses = tf.losses.get_regularization_losses()
if len(reg_losses) > 0:
reg_loss = tf.add_n(reg_losses, name='l2')
metrics.append(reg_loss)
for loss in tf.losses.get_losses():
print("LOSS:", loss.name)
total_loss = tf.losses.get_total_loss(name='L')
metrics.append(total_loss)
train_op = tf.contrib.training.create_train_op(total_loss, optimizer, global_step=global_step, variables_to_train=model.variables_to_train)
saver = tf.train.Saver(max_to_keep=FLAGS.max_to_keep)
stream = model.create_stream(FLAGS.db, True)
# load validation db
if FLAGS.multiprocess:
stream = AsyncLoad(stream)
pass
val_stream = None
if FLAGS.val_db:
val_stream = model.create_stream(FLAGS.val_db, False)
epoch_steps = FLAGS.epoch_steps
if epoch_steps is None:
epoch_steps = (stream.size() + FLAGS.batch-1) // FLAGS.batch
best = 0
ss_config = tf.ConfigProto()
ss_config.gpu_options.allow_growth=True
with tf.Session(config=ss_config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
model.init_session(sess)
if FLAGS.resume:
saver.restore(sess, FLAGS.resume)
global_start_time = time.time()
epoch, step = 0, 0
#bar_format = '{desc}: {percentage:03.0f}%|{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]'
#bar_format = '{desc}|{bar}|{n_fmt}/{total_fmt}[{elapsed}<{remaining},{rate_fmt}]'
if FLAGS.compact:
bar_format = '{desc}|{n_fmt}/{total_fmt},{rate_fmt}'
else:
bar_format = '{desc}: {percentage:03.0f}%|{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]'
while epoch < FLAGS.max_epochs:
start_time = time.time()
metrics = Metrics(model)
progress = tqdm(range(epoch_steps), leave=False, bar_format=bar_format)
for _ in progress:
record = stream.next()
mm, _ = sess.run([model.metrics, train_op], feed_dict=model.feed_dict(record, True))
metrics_txt = metrics.update(mm, record[1].shape[0])
progress.set_description(metrics_txt)
step += 1
pass
lr = sess.run(LR)
stop = time.time()
msg = 'train epoch=%d step=%d %s elapsed=%.3f time=%.3f lr=%.4f' % (
epoch, step, metrics_txt, stop - global_start_time, stop - start_time, lr)
print_green(msg)
logging.info(msg)
epoch += 1
is_best = False
if (epoch % FLAGS.val_epochs == 0) and val_stream:
# evaluation
metrics = Metrics(model)
val_stream.reset()
progress = tqdm(val_stream, leave=False, bar_format=bar_format)
for record in progress:
mm = sess.run(model.metrics, feed_dict=model.feed_dict(record, False))
metrics_txt = metrics.update(mm, record[1].shape[0])
progress.set_description(metrics_txt)
pass
if metrics.avg[-1] > best:
is_best = True
best = metrics.avg[-1]
msg = 'valid epoch=%d step=%d %s lr=%.4f best=%.3f' % (
epoch-1, step, metrics_txt, lr, best)
print_red(msg)
logging.info(msg)
if is_best and FLAGS.model:
ckpt_path = '%s/best' % FLAGS.model
saver.save(sess, ckpt_path)
# model saving
if (epoch % FLAGS.ckpt_epochs == 0) and FLAGS.model:
ckpt_path = '%s/%d' % (FLAGS.model, epoch)
saver.save(sess, ckpt_path)
print('saved to %s.' % ckpt_path)
pass
pass
def print_red (txt):
print('\033[91m' + txt + '\033[0m')
def print_green (txt):
print('\033[92m' + txt + '\033[0m')
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,630 | aaalgo/aardvark | refs/heads/master | /setup.py | #!/usr/bin/env python3
import sys
import os
import subprocess as sp
import numpy
from distutils.core import setup, Extension
libraries = []
cv2libs = sp.check_output('pkg-config --libs opencv', shell=True).decode('ascii')
if 'opencv_imgcodecs' in cv2libs:
libraries.append('opencv_imgcodecs')
pass
numpy_include = os.path.join(os.path.abspath(os.path.dirname(numpy.__file__)), 'core', 'include')
if sys.version_info[0] < 3:
boost_numpy = 'boost_numpy'
boost_python = 'boost_python'
else:
if os.path.exists('/usr/local/lib/libboost_python3.so'):
boost_numpy = 'boost_numpy3'
else:
boost_numpy = 'boost_numpy%d%d' % (sys.version_info[0], sys.version_info[1])
boost_python = 'boost_python-py%d%d' % (sys.version_info[0], sys.version_info[1])
pass
libraries.extend(['opencv_highgui', 'opencv_imgproc', 'opencv_core', 'boost_filesystem', 'boost_system', boost_numpy, boost_python, 'glog', 'gomp'])
cpp = Extension('cpp',
language = 'c++',
extra_compile_args = ['-O3', '-std=c++1y', '-g', '-fopenmp'],
include_dirs = ['/usr/local/include', numpy_include],
libraries = libraries,
library_dirs = ['/usr/local/lib'],
sources = ['python-api.cpp']
)
setup (name = 'cpp',
version = '0.0.1',
author = 'Wei Dong',
author_email = 'wdong@wdong.org',
license = 'propriertary',
description = 'This is a demo package',
ext_modules = [cpp],
)
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,631 | aaalgo/aardvark | refs/heads/master | /train-basic-keypoints.py | #!/usr/bin/env python3
import os
import sys
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'zoo/slim'))
import logging
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
import aardvark
from tf_utils import *
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('finetune', None, '')
flags.DEFINE_string('backbone', 'resnet_v2_50', 'architecture')
flags.DEFINE_integer('backbone_stride', 16, '')
flags.DEFINE_integer('feature_channels', 64, '')
flags.DEFINE_integer('stride', 4, '')
flags.DEFINE_integer('radius', 25, '')
flags.DEFINE_float('offset_weight', 1, '')
PIXEL_MEANS = tf.constant([[[[103.94, 116.78, 123.68]]]]) # VGG PIXEL MEANS USED BY TF
def params_loss (dxy, dxy_gt):
l1 = tf.losses.huber_loss(dxy, dxy_gt, reduction=tf.losses.Reduction.NONE, loss_collection=None)
return tf.reduce_sum(l1, axis=1)
class Model (aardvark.Model):
def __init__ (self):
super().__init__()
if FLAGS.classes > 1:
aardvark.print_red("Classes should be number of point classes,")
aardvark.print_red("not counting background. Usually 1.")
pass
def extra_stream_config (self, is_training):
augments = aardvark.load_augments(is_training)
shift = 0
if is_training:
shift = FLAGS.clip_shift
return {
"annotate": [1],
"transforms": [{"type": "resize", "max_size": FLAGS.max_size}
] + augments + [
{"type": "clip", "shift": shift, "width": FLAGS.fix_width, "height": FLAGS.fix_height, "round": FLAGS.clip_stride},
{"type": "keypoints.basic", 'downsize': FLAGS.stride, 'classes': FLAGS.classes, 'radius': FLAGS.radius},
#{"type": "anchors.dense.point", 'downsize': FLAGS.stride, 'lower_th': anchor_th, 'upper_th': anchor_th},
{"type": "drop"}, # remove original annotation
]
}
def feed_dict (self, record, is_training = True):
_, images, _, mask, offsets = record
return {self.is_training: is_training,
self.images: images,
self.mask: mask,
self.gt_offsets: offsets}
def build_graph (self):
if True: # setup inputs
# parameters
is_training = tf.placeholder(tf.bool, name="is_training")
images = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images")
# the reset are for training only
mask = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.classes))
gt_offsets = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.classes*2))
self.is_training = is_training
self.images = images
self.mask = mask
self.gt_offsets = gt_offsets
backbone = aardvark.create_stock_slim_network(FLAGS.backbone, images, is_training, global_pool=False, stride=FLAGS.backbone_stride)
with tf.variable_scope('head'), slim.arg_scope(aardvark.default_argscope(is_training)):
if FLAGS.finetune:
backbone = tf.stop_gradient(backbone)
#net = slim_multistep_upscale(net, FLAGS.backbone_stride / FLAGS.stride, FLAGS.reduction)
#backbone = net
stride = FLAGS.backbone_stride // FLAGS.stride
#backbone = slim.conv2d_transpose(backbone, FLAGS.feature_channels, st*2, st)
#prob = slim.conv2d(backbone, FLAGS.classes, 3, 1, activation_fn=tf.sigmoid)
prob = slim.conv2d_transpose(backbone, FLAGS.classes, stride*2, stride, activation_fn=tf.sigmoid)
#logits2 = tf.reshape(logits, (-1, 2))
#prob2 = tf.squeeze(tf.slice(tf.nn.softmax(logits2), [0, 1], [-1, 1]), 1)
#tf.reshape(prob2, tf.shape(mask), name='prob')
#xe = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits2, labels=mask)
dice = tf.identity(dice_loss(mask, prob), name='di')
tf.losses.add_loss(dice)
self.metrics.append(dice)
offsets = slim.conv2d_transpose(backbone, FLAGS.classes*2, stride*2, stride, activation_fn=None)
offsets2 = tf.reshape(offsets, (-1, 2)) # ? * 4
gt_offsets2 = tf.reshape(gt_offsets, (-1,2))
mask2 = tf.reshape(mask, (-1,))
pl = params_loss(offsets2, gt_offsets2) * mask2
pl = tf.reduce_sum(pl) / (tf.reduce_sum(mask2) + 1)
pl = tf.check_numerics(pl * FLAGS.offset_weight, 'pl', name='p1') # params-loss
tf.losses.add_loss(pl)
self.metrics.append(pl)
tf.identity(prob, name='prob')
tf.identity(offsets, 'offsets')
if FLAGS.finetune:
assert FLAGS.colorspace == 'RGB'
def is_trainable (x):
return x.startswith('head')
self.init_session, self.variables_to_train = aardvark.setup_finetune(FLAGS.finetune, is_trainable)
pass
def main (_):
model = Model()
aardvark.train(model)
pass
if __name__ == '__main__':
try:
tf.app.run()
except KeyboardInterrupt:
pass
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,632 | aaalgo/aardvark | refs/heads/master | /tf_utils.py | import tensorflow as tf
import tensorflow.contrib.slim as slim
def dice_loss (gt, prob):
return -2 * (tf.reduce_sum(gt * prob) + 0.00001) / (tf.reduce_sum(gt) + tf.reduce_sum(prob) + 0.00001)
def weighted_dice_loss (gt, prob, weight):
return -2 * (tf.reduce_sum(gt * prob * weight) + 0.00001) / (tf.reduce_sum(gt * weight) + tf.reduce_sum(prob * weight) + 0.00001)
def weighted_dice_loss_by_channel (gt, prob, weight, channels):
split = [1] * channels
gt = tf.split(gt, split, axis=3)
prob = tf.split(prob, split, axis=3)
weight = tf.split(weight, split, axis=3)
dice = []
for i in range(channels):
dice.append(weighted_dice_loss(gt[i], prob[i], weight[i]))
pass
return tf.add_n(dice) / channels, dice
def weighted_loss_by_channel (loss, weight, channels):
#loss = tf.reshape(loss, (-1, channels))
#weight = tf.reshape(weight, (-1, channels))
loss = tf.reduce_sum(loss * weight, axis=0)
loss = tf.reshape(loss, (channels,))
weight = tf.reduce_sum(weight, axis=0) + 0.0001
weight = tf.reshape(weight, (channels,))
return tf.reduce_mean(loss/weight)
def tf_repeat(tensor, repeats):
with tf.variable_scope("repeat"):
expanded_tensor = tf.expand_dims(tensor, -1)
multiples = [1] + repeats
tiled_tensor = tf.tile(expanded_tensor, multiples = multiples)
repeated_tesnor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats)
return repeated_tesnor
def slim_multistep_upscale (net, octaves, reduction, step=2):
ch = net.get_shape()[3]
print("UPSCALE", ch, reduction)
ch = ch // reduction
while octaves > 1:
ch = ch // 2
octaves = octaves // step
print("UPSCALE", ch, reduction)
net = slim.conv2d_transpose(net, ch, step * 2, step, padding='SAME')
return net
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,633 | aaalgo/aardvark | refs/heads/master | /zoo/sss/PSPNet.py | import tensorflow as tf
from tensorflow.contrib import slim
import numpy as np
import resnet_v2
import os, sys
def Upsampling(inputs,feature_map_shape):
return tf.image.resize_bilinear(inputs, size=feature_map_shape)
def ConvUpscaleBlock(inputs, n_filters, kernel_size=[3, 3], scale=2):
"""
Basic conv transpose block for Encoder-Decoder upsampling
Apply successivly Transposed Convolution, BatchNormalization, ReLU nonlinearity
"""
net = slim.conv2d_transpose(inputs, n_filters, kernel_size=[3, 3], stride=[2, 2], activation_fn=None)
net = tf.nn.relu(slim.batch_norm(net, fused=True))
return net
def ConvBlock(inputs, n_filters, kernel_size=[3, 3]):
"""
Basic conv block for Encoder-Decoder
Apply successivly Convolution, BatchNormalization, ReLU nonlinearity
"""
net = slim.conv2d(inputs, n_filters, kernel_size, activation_fn=None, normalizer_fn=None)
net = tf.nn.relu(slim.batch_norm(net, fused=True))
return net
def InterpBlock(net, level, feature_map_shape, pooling_type):
# Compute the kernel and stride sizes according to how large the final feature map will be
# When the kernel size and strides are equal, then we can compute the final feature map size
# by simply dividing the current size by the kernel or stride size
# The final feature map sizes are 1x1, 2x2, 3x3, and 6x6. We round to the closest integer
kernel_size = [int(np.round(float(feature_map_shape[0]) / float(level))), int(np.round(float(feature_map_shape[1]) / float(level)))]
stride_size = kernel_size
net = slim.pool(net, kernel_size, stride=stride_size, pooling_type='MAX')
net = slim.conv2d(net, 512, [1, 1], activation_fn=None)
net = slim.batch_norm(net, fused=True)
net = tf.nn.relu(net)
net = Upsampling(net, feature_map_shape)
return net
def PyramidPoolingModule(inputs, feature_map_shape, pooling_type):
"""
Build the Pyramid Pooling Module.
"""
interp_block1 = InterpBlock(inputs, 1, feature_map_shape, pooling_type)
interp_block2 = InterpBlock(inputs, 2, feature_map_shape, pooling_type)
interp_block3 = InterpBlock(inputs, 3, feature_map_shape, pooling_type)
interp_block6 = InterpBlock(inputs, 6, feature_map_shape, pooling_type)
res = tf.concat([inputs, interp_block6, interp_block3, interp_block2, interp_block1], axis=-1)
return res
def build_pspnet(inputs, label_size, num_classes, preset_model='PSPNet-Res50', pooling_type = "MAX",
weight_decay=1e-5, upscaling_method="conv", is_training=None, pretrained_dir="models"):
"""
Builds the PSPNet model.
Arguments:
inputs: The input tensor
label_size: Size of the final label tensor. We need to know this for proper upscaling
preset_model: Which model you want to use. Select which ResNet model to use for feature extraction
num_classes: Number of classes
pooling_type: Max or Average pooling
Returns:
PSPNet model
"""
assert not is_training is None
if preset_model == 'PSPNet-Res50':
with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
logits, end_points = resnet_v2.resnet_v2_50(inputs, is_training=is_training, scope='resnet_v2_50')
resnet_scope='resnet_v2_50'
# PSPNet requires pre-trained ResNet weights
init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_50.ckpt'), slim.get_model_variables('resnet_v2_50'))
elif preset_model == 'PSPNet-Res101':
with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
logits, end_points = resnet_v2.resnet_v2_101(inputs, is_training=is_training, scope='resnet_v2_101')
resnet_scope='resnet_v2_101'
# PSPNet requires pre-trained ResNet weights
init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_101.ckpt'), slim.get_model_variables('resnet_v2_101'))
elif preset_model == 'PSPNet-Res152':
with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
logits, end_points = resnet_v2.resnet_v2_152(inputs, is_training=is_training, scope='resnet_v2_152')
resnet_scope='resnet_v2_152'
# PSPNet requires pre-trained ResNet weights
init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_152.ckpt'), slim.get_model_variables('resnet_v2_152'))
else:
raise ValueError("Unsupported ResNet model '%s'. This function only supports ResNet 50, ResNet 101, and ResNet 152" % (preset_model))
feature_map_shape = [int(x / 8.0) for x in label_size]
print(feature_map_shape)
psp = PyramidPoolingModule(end_points['pool3'], feature_map_shape=feature_map_shape, pooling_type=pooling_type)
net = slim.conv2d(psp, 512, [3, 3], activation_fn=None)
net = slim.batch_norm(net, fused=True)
net = tf.nn.relu(net)
if upscaling_method.lower() == "conv":
net = ConvUpscaleBlock(net, 256, kernel_size=[3, 3], scale=2)
net = ConvBlock(net, 256)
net = ConvUpscaleBlock(net, 128, kernel_size=[3, 3], scale=2)
net = ConvBlock(net, 128)
net = ConvUpscaleBlock(net, 64, kernel_size=[3, 3], scale=2)
net = ConvBlock(net, 64)
elif upscaling_method.lower() == "bilinear":
net = Upsampling(net, label_size)
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits')
return net, init_fn
def mean_image_subtraction(inputs, means=[123.68, 116.78, 103.94]):
inputs=tf.to_float(inputs)
num_channels = inputs.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=3, num_or_size_splits=num_channels, value=inputs)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=3, values=channels)
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.