index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
65,564 | wubozhi/itc-testing-tools | refs/heads/master | /python/cppcheck.py | import sys
import os.path
import system
import dirutils
import tempfile
import xml.etree.ElementTree as ET
import shutil
from pathlib import Path
xml_report_path = os.path.abspath(sys.argv[1])
temp_path = os.path.abspath(sys.argv[2])
directory = os.path.abspath(sys.argv[3])
csv = os.path.abspath(sys.argv[4])
exe = sys.argv[5]
opts = sys.argv[6]
# create temporary dir to run the analyzer
tmpdir_path = os.path.join(str(Path.home()),"tmp", "cppcheck-" + next(tempfile._get_candidate_names()))
shutil.copytree(directory, tmpdir_path)
print("\n======[CPPCHECK]=======")
print("[CWD]:", tmpdir_path)
print("[CSV]:", csv)
print("[EXE]:", exe)
print("[EXE OPTIONS]:", opts)
source_files = dirutils.list_files(tmpdir_path, '.c') + dirutils.list_files(tmpdir_path, '.cpp')
dirutils.file_line_error_header(csv)
dirutils.reset_file(temp_path)
for source_file in source_files:
if source_file.endswith("main.c"):
continue
if source_file.endswith("invalid_extern_1.c"):
continue
if source_file.endswith("invalid_extern.c"):
source_file = source_file + " " + os.path.join(tmpdir_path, "invalid_extern_1.c")
cppcheck = exe + opts + " " + source_file + " --output-file=" + xml_report_path
(output, err, exit, time) = system.system_call(cppcheck, ".")
dirutils.tool_exec_log(temp_path, cppcheck, output, err, exit)
tree = ET.parse(xml_report_path)
root = tree.getroot()
errors = root[1]
sys.stdout = open(csv, "a")
for error in errors:
msg = "\"" + error.attrib['verbose'] + "\""
for location in error:
if (location.tag == "location"):
print(os.path.basename(location.attrib['file']) + ",", location.attrib['line'] + ",", msg)
sys.stdout = sys.__stdout__
print("[CLEANUP]: removing ", tmpdir_path)
shutil.rmtree(tmpdir_path)
print("======[DONE WITH CPPCHECK]=======")
| {"/python/clanalyze.py": ["/python/system.py"], "/benchmark.py": ["/python/system.py", "/python/clanalyze.py", "/python/latex.py"]} |
65,565 | wubozhi/itc-testing-tools | refs/heads/master | /benchmark.py | import sys
import os.path
import os
import python.system
import python.clanalyze
import python.latex
rep_directory = os.path.realpath(sys.argv[1])
print("Results dir:", rep_directory, "\n")
# ## Location of ITC workbench: this should be modified by need
W_C_DEFECTS_DIR = "../itc-benchmarks/01.w_Defects/"
W_CPP_DEFECTS_DIR = "../itc-benchmarks/03.w_Defects_Cpp/"
WO_C_DEFECTS_DIR = "../itc-benchmarks/02.wo_Defects/"
WO_CPP_DEFECTS_DIR = "../itc-benchmarks/04.wo_Defects_Cpp/"
STATISTICS="./python/statistics.py"
# ## Output files
# # COUNT ALL ERRORS
C_COUNT_ERROR_FILE = rep_directory + "/setup/temp/c_count_errors.csv"
CPP_COUNT_ERROR_FILE = rep_directory + "/setup/temp/cpp_count_errors.csv"
# # MERGE FILES
C_MERGE_FILE = rep_directory + "/setup/c_merge_file.csv"
CPP_MERGE_FILE = rep_directory + "/setup/cpp_merge_file.csv"
# # COUNT_ERROR & GATHER_ERROR for the ITC benchmark only
COUNT_ERRORS = "./bash/count-errors-per-file.sh"
GATHER_ERRORS = "./bash/gather-errors-by-line.sh"
MERGE_EXE= "./bash/merge-csv.sh"
# GATHER ERRORS FORM ITC BENCHMARK PER LINE
C_ERRORS_PER_LINE_FILE = rep_directory + "/setup/temp/c_errors_per_line.csv"
CPP_ERRORS_PER_LINE_FILE = rep_directory + "/setup/temp/cpp_errors_per_line.csv"
C_WO_ERRORS_PER_LINE_FILE = rep_directory + "/setup/temp/c_wo_errors_per_line.csv"
CPP_WO_ERRORS_PER_LINE_FILE = rep_directory + "/setup/temp/cpp_wo_errors_per_line.csv"
# ## Tools configurations
# ## CLANG CORE
CLANG_CORE = "./python/clang.py"
CLANG_CORE_PP = "./python/clang++.py"
CLANG_CORE_EXE = "clang"
CLANG_CORE_EXE_CPP = "clang++"
CLANG_CORE_OUTPUT_C_W = rep_directory + "/clangcore/temp/c_w_errors_per_line.csv"
CLANG_CORE_OUTPUT_C_WO = rep_directory + "/clangcore/temp/c_wo_errors_per_line.csv"
CLANG_CORE_OUTPUT_CPP_W = rep_directory + "/clangcore/temp/cpp_w_errors_per_line.csv"
CLANG_CORE_OUTPUT_CPP_WO = rep_directory + "/clangcore/temp/cpp_wo_errors_per_line.csv"
CLANG_CORE_OPTS = "'-cc1 -analyze -analyzer-checker=core'"
CLANG_CORE_OUT_SUBDEFECTS = rep_directory + "/clangcore/c_subdefects.csv"
CLANG_CORE_OUT_DEFECTS = rep_directory + "/clangcore/c_defects.csv"
CLANG_CORE_OUT_TOTAL = rep_directory + "/clangcore/c_total.csv"
CLANG_CORE_OUT_CPP_SUBDEFECTS = rep_directory + "/clangcore/cpp_subdefects.csv"
CLANG_CORE_OUT_CPP_DEFECTS = rep_directory + "/clangcore/cpp_defects.csv"
CLANG_CORE_OUT_CPP_TOTAL = rep_directory + "/clangcore/cpp_total.csv"
CLANGCORE_TEMP_C_W = rep_directory + "/clangcore/temp/c_w_temp.txt"
CLANGCORE_TEMP_C_WO = rep_directory + "/clangcore/temp/c_wo_temp.txt"
CLANGCORE_TEMP_CPP_W = rep_directory + "/clangcore/temp/cpp_w_temp.txt"
CLANGCORE_TEMP_CPP_WO = rep_directory + "/clangcore/temp/cpp_wo_temp.txt"
# ## CLANG ALPHA
CLANG_ALPHA = "./python/clang.py"
CLANG_ALPHA_PP = "./python/clang++.py"
CLANG_ALPHA_EXE = "clang"
CLANG_ALPHA_EXE_CPP = "clang++"
CLANG_ALPHA_OUTPUT_C_W = rep_directory + "/clangalpha/temp/c_w_errors_per_line.csv"
CLANG_ALPHA_OUTPUT_C_WO = rep_directory + "/clangalpha/temp/c_wo_errors_per_line.csv"
CLANG_ALPHA_OUTPUT_CPP_W = rep_directory + "/clangalpha/temp/cpp_w_errors_per_line.csv"
CLANG_ALPHA_OUTPUT_CPP_WO = rep_directory + "/clangalpha/temp/cpp_wo_errors_per_line.csv"
CLANG_ALPHA_OPTS = "'-cc1 -analyze -analyzer-checker=alpha'"
CLANG_ALPHA_OUT_SUBDEFECTS = rep_directory + "/clangalpha/c_subdefects.csv"
CLANG_ALPHA_OUT_DEFECTS = rep_directory + "/clangalpha/c_defects.csv"
CLANG_ALPHA_OUT_TOTAL = rep_directory + "/clangalpha/c_total.csv"
CLANG_ALPHA_OUT_CPP_SUBDEFECTS = rep_directory + "/clangalpha/cpp_subdefects.csv"
CLANG_ALPHA_OUT_CPP_DEFECTS = rep_directory + "/clangalpha/cpp_defects.csv"
CLANG_ALPHA_OUT_CPP_TOTAL = rep_directory + "/clangalpha/cpp_total.csv"
CLANGALPHA_TEMP_C_W = rep_directory + "/clangalpha/temp/c_w_temp.txt"
CLANGALPHA_TEMP_C_WO = rep_directory + "/clangalpha/temp/c_wo_temp.txt"
CLANGALPHA_TEMP_CPP_W = rep_directory + "/clangalpha/temp/cpp_w_temp.txt"
CLANGALPHA_TEMP_CPP_WO = rep_directory + "/clangalpha/temp/cpp_wo_temp.txt"
# ## CLANG CORE ALPHA
CLANG_CORE_ALPHA = "./python/clang.py"
CLANG_CORE_ALPHA_PP = "./python/clang++.py"
CLANG_CORE_ALPHA_EXE = "clang"
CLANG_CORE_ALPHA_EXE_CPP = "clang++"
CLANG_CORE_ALPHA_OUTPUT_C_W = rep_directory + "/clangcorealpha/temp/c_w_errors_per_line.csv"
CLANG_CORE_ALPHA_OUTPUT_C_WO = rep_directory + "/clangcorealpha/temp/c_wo_errors_per_line.csv"
CLANG_CORE_ALPHA_OUTPUT_CPP_W = rep_directory + "/clangcorealpha/temp/cpp_w_errors_per_line.csv"
CLANG_CORE_ALPHA_OUTPUT_CPP_WO = rep_directory + "/clangcorealpha/temp/cpp_wo_errors_per_line.csv"
CLANG_CORE_ALPHA_OPTS = "'-cc1 -analyze -analyzer-checker=core,alpha'"
CLANG_CORE_ALPHA_OUT_SUBDEFECTS = rep_directory + "/clangcorealpha/c_subdefects.csv"
CLANG_CORE_ALPHA_OUT_DEFECTS = rep_directory + "/clangcorealpha/c_defects.csv"
CLANG_CORE_ALPHA_OUT_TOTAL = rep_directory + "/clangcorealpha/c_total.csv"
CLANG_CORE_ALPHA_OUT_CPP_SUBDEFECTS = rep_directory + "/clangcorealpha/cpp_subdefects.csv"
CLANG_CORE_ALPHA_OUT_CPP_DEFECTS = rep_directory + "/clangcorealpha/cpp_defects.csv"
CLANG_CORE_ALPHA_OUT_CPP_TOTAL = rep_directory + "/clangcorealpha/cpp_total.csv"
CLANGCOREALPHA_TEMP_C_W = rep_directory + "/clangcorealpha/temp/c_w_temp.txt"
CLANGCOREALPHA_TEMP_C_WO = rep_directory + "/clangcorealpha/temp/c_wo_temp.txt"
CLANGCOREALPHA_TEMP_CPP_W = rep_directory + "/clangcorealpha/temp/cpp_w_temp.txt"
CLANGCOREALPHA_TEMP_CPP_WO = rep_directory + "/clangcorealpha/temp/cpp_wo_temp.txt"
# ## CPPCHECK
CPPCHECK = "./python/cppcheck.py"
CPPCHECK_EXE = "cppcheck"
CPPCHECK_EXE_CPP = "cppcheck"
CPPCHECK_OUTPUT_C_W = rep_directory + "/cppcheck/temp/c_w_errors_per_line.csv"
CPPCHECK_OUTPUT_C_WO = rep_directory + "/cppcheck/temp/c_wo_errors_per_line.csv"
CPPCHECK_OUTPUT_CPP_W = rep_directory + "/cppcheck/temp/cpp_w_errors_per_line.csv"
CPPCHECK_OUTPUT_CPP_WO = rep_directory + "/cppcheck/temp/cpp_wo_errors_per_line.csv"
CPPCHECK_OPTS = "\" --xml --xml-version=2\""
CPPCHECK_OUT_SUBDEFECTS = rep_directory + "/cppcheck/c_subdefects.csv"
CPPCHECK_OUT_DEFECTS = rep_directory + "/cppcheck/c_defects.csv"
CPPCHECK_OUT_TOTAL = rep_directory + "/cppcheck/c_total.csv"
CPPCHECK_OUT_CPP_SUBDEFECTS = rep_directory + "/cppcheck/cpp_subdefects.csv"
CPPCHECK_OUT_CPP_DEFECTS = rep_directory + "/cppcheck/cpp_defects.csv"
CPPCHECK_OUT_CPP_TOTAL = rep_directory + "/cppcheck/cpp_total.csv"
CPPCHECK_TEMP_C_W = rep_directory + "/cppcheck/temp/c_w_temp.xml"
CPPCHECK_TEMP_C_WO = rep_directory + "/cppcheck/temp/c_wo_temp.xml"
CPPCHECK_TEMP_CPP_W = rep_directory + "/cppcheck/temp/cpp_w_temp.xml"
CPPCHECK_TEMP_CPP_WO = rep_directory + "/cppcheck/temp/cpp_wo_temp.xml"
CPPCHECK_TEMP_C_W_TXT = rep_directory + "/cppcheck/temp/c_w_temp.txt"
CPPCHECK_TEMP_C_WO_TXT = rep_directory + "/cppcheck/temp/c_wo_temp.txt"
CPPCHECK_TEMP_CPP_W_TXT = rep_directory + "/cppcheck/temp/cpp_w_temp.txt"
CPPCHECK_TEMP_CPP_WO_TXT = rep_directory + "/cppcheck/temp/cpp_wo_temp.txt"
# ## CPPLINT
CPPLINT = "./python/cpplint.py"
CPPLINT_EXE = "cpplint"
CPPLINT_EXE_CPP = "cpplint"
CPPLINT_OUTPUT_C_W = rep_directory + "/cpplint/temp/c_w_errors_per_line.csv"
CPPLINT_OUTPUT_C_WO = rep_directory + "/cpplint/temp/c_wo_errors_per_line.csv"
CPPLINT_OUTPUT_CPP_W = rep_directory + "/cpplint/temp/cpp_w_errors_per_line.csv"
CPPLINT_OUTPUT_CPP_WO = rep_directory + "/cpplint/temp/cpp_wo_errors_per_line.csv"
CPPLINT_OPTS = ""
CPPLINT_OUT_SUBDEFECTS = rep_directory + "/cpplint/c_subdefects.csv"
CPPLINT_OUT_DEFECTS = rep_directory + "/cpplint/c_defects.csv"
CPPLINT_OUT_TOTAL = rep_directory + "/cpplint/c_total.csv"
CPPLINT_OUT_CPP_SUBDEFECTS = rep_directory + "/cpplint/cpp_subdefects.csv"
CPPLINT_OUT_CPP_DEFECTS = rep_directory + "/cpplint/cpp_defects.csv"
CPPLINT_OUT_CPP_TOTAL = rep_directory + "/cpplint/cpp_total.csv"
CPPLINT_TEMP_C_W = rep_directory + "/cpplint/temp/c_w_temp.txt"
CPPLINT_TEMP_C_WO = rep_directory + "/cpplint/temp/c_wo_temp.txt"
CPPLINT_TEMP_CPP_W = rep_directory + "/cpplint/temp/cpp_w_temp.txt"
CPPLINT_TEMP_CPP_WO = rep_directory + "/cpplint/temp/cpp_wo_temp.txt"
# ## OCLINT
OCLINT = "./python/oclint.py"
OCLINT_EXE = "oclint"
OCLINT_EXE_CPP = "oclint"
OCLINT_OUTPUT_C_W = rep_directory + "/oclint/temp/c_w_errors_per_line.csv"
OCLINT_OUTPUT_C_WO = rep_directory + "/oclint/temp/c_wo_errors_per_line.csv"
OCLINT_OUTPUT_CPP_W = rep_directory + "/oclint/temp/cpp_w_errors_per_line.csv"
OCLINT_OUTPUT_CPP_WO = rep_directory + "/oclint/temp/cpp_wo_errors_per_line.csv"
OCLINT_OPTS = ""
OCLINT_OUT_SUBDEFECTS = rep_directory + "/oclint/c_subdefects.csv"
OCLINT_OUT_DEFECTS = rep_directory + "/oclint/c_defects.csv"
OCLINT_OUT_TOTAL = rep_directory + "/oclint/c_total.csv"
OCLINT_OUT_CPP_SUBDEFECTS = rep_directory + "/oclint/cpp_subdefects.csv"
OCLINT_OUT_CPP_DEFECTS = rep_directory + "/oclint/cpp_defects.csv"
OCLINT_OUT_CPP_TOTAL = rep_directory + "/oclint/cpp_total.csv"
OCLINT_TEMP_C_W = rep_directory + "/oclint/temp/c_w_temp.txt"
OCLINT_TEMP_C_WO = rep_directory + "/oclint/temp/c_wo_temp.txt"
OCLINT_TEMP_CPP_W = rep_directory + "/oclint/temp/cpp_w_temp.txt"
OCLINT_TEMP_CPP_WO = rep_directory + "/oclint/temp/cpp_wo_temp.txt"
# ## FLINTPP
FLINTPP = "./python/flint++.py"
FLINTPP_EXE = "flint++"
FLINTPP_EXE_CPP = "flint++"
FLINTPP_OUTPUT_C_W = rep_directory + "/flintpp/temp/c_w_errors_per_line.csv"
FLINTPP_OUTPUT_C_WO = rep_directory + "/flintpp/temp/c_wo_errors_per_line.csv"
FLINTPP_OUTPUT_CPP_W = rep_directory + "/flintpp/temp/cpp_w_errors_per_line.csv"
FLINTPP_OUTPUT_CPP_WO = rep_directory + "/flintpp/temp/cpp_wo_errors_per_line.csv"
FLINTPP_OPTS = " -j "
FLINTPP_OUT_SUBDEFECTS = rep_directory + "/flintpp/c_subdefects.csv"
FLINTPP_OUT_DEFECTS = rep_directory + "/flintpp/c_defects.csv"
FLINTPP_OUT_TOTAL = rep_directory + "/flintpp/c_total.csv"
FLINTPP_OUT_CPP_SUBDEFECTS = rep_directory + "/flintpp/cpp_subdefects.csv"
FLINTPP_OUT_CPP_DEFECTS = rep_directory + "/flintpp/cpp_defects.csv"
FLINTPP_OUT_CPP_TOTAL = rep_directory + "/flintpp/cpp_total.csv"
FLINTPP_TEMP_C_W = rep_directory + "/flintpp/temp/c_w_temp.json"
FLINTPP_TEMP_C_WO = rep_directory + "/flintpp/temp/c_wo_temp.json"
FLINTPP_TEMP_CPP_W = rep_directory + "/flintpp/temp/cpp_w_temp.json"
FLINTPP_TEMP_CPP_WO = rep_directory + "/flintpp/temp/cpp_wo_temp.json"
FLINTPP_TEMP_C_W_TXT = rep_directory + "/flintpp/temp/c_w_temp.txt"
FLINTPP_TEMP_C_WO_TXT = rep_directory + "/flintpp/temp/c_wo_temp.txt"
FLINTPP_TEMP_CPP_W_TXT = rep_directory + "/flintpp/temp/cpp_w_temp.txt"
FLINTPP_TEMP_CPP_WO_TXT = rep_directory + "/flintpp/temp/cpp_wo_temp.txt"
# ## SPARSE
SPARSE = "./python/sparse.py"
SPARSE_EXE = "sparse"
SPARSE_EXE_CPP = "sparse"
SPARSE_OUTPUT_C_W = rep_directory + "/sparse/temp/c_w_errors_per_line.csv"
SPARSE_OUTPUT_C_WO = rep_directory + "/sparse/temp/c_wo_errors_per_line.csv"
SPARSE_OUTPUT_CPP_W = rep_directory + "/sparse/temp/cpp_w_errors_per_line.csv"
SPARSE_OUTPUT_CPP_WO = rep_directory + "/sparse/temp/cpp_wo_errors_per_line.csv"
SPARSE_OPTS = ""
SPARSE_OUT_SUBDEFECTS = rep_directory + "/sparse/c_subdefects.csv"
SPARSE_OUT_DEFECTS = rep_directory + "/sparse/c_defects.csv"
SPARSE_OUT_TOTAL = rep_directory + "/sparse/c_total.csv"
SPARSE_OUT_CPP_SUBDEFECTS = rep_directory + "/sparse/cpp_subdefects.csv"
SPARSE_OUT_CPP_DEFECTS = rep_directory + "/sparse/cpp_defects.csv"
SPARSE_OUT_CPP_TOTAL = rep_directory + "/sparse/cpp_total.csv"
SPARSE_TEMP_C_W = rep_directory + "/sparse/temp/c_w_temp.txt"
SPARSE_TEMP_C_WO = rep_directory + "/sparse/temp/c_wo_temp.txt"
SPARSE_TEMP_CPP_W = rep_directory + "/sparse/temp/cpp_w_temp.txt"
SPARSE_TEMP_CPP_WO = rep_directory + "/sparse/temp/cpp_wo_temp.txt"
# ## FLAWFINDER
FLAWFINDER = "./python/flawfinder.py"
FLAWFINDER_EXE = "flawfinder"
FLAWFINDER_OUTPUT_C_W = rep_directory + "/flawfinder/temp/c_w_errors_per_line.csv"
FLAWFINDER_OUTPUT_C_WO = rep_directory + "/flawfinder/temp/c_wo_errors_per_line.csv"
FLAWFINDER_OUTPUT_CPP_W = rep_directory + "/flawfinder/temp/cpp_w_errors_per_line.csv"
FLAWFINDER_OUTPUT_CPP_WO = rep_directory + "/flawfinder/temp/cpp_wo_errors_per_line.csv"
FLAWFINDER_OPTS = ""
FLAWFINDER_OUT_SUBDEFECTS = rep_directory + "/flawfinder/c_subdefects.csv"
FLAWFINDER_OUT_DEFECTS = rep_directory + "/flawfinder/c_defects.csv"
FLAWFINDER_OUT_TOTAL = rep_directory + "/flawfinder/c_total.csv"
FLAWFINDER_OUT_CPP_SUBDEFECTS = rep_directory + "/flawfinder/cpp_subdefects.csv"
FLAWFINDER_OUT_CPP_DEFECTS = rep_directory + "/flawfinder/cpp_defects.csv"
FLAWFINDER_OUT_CPP_TOTAL = rep_directory + "/flawfinder/cpp_total.csv"
FLAWFINDER_TEMP_C_W = rep_directory + "/flawfinder/temp/c_w_temp.txt"
FLAWFINDER_TEMP_C_WO = rep_directory + "/flawfinder/temp/c_wo_temp.txt"
FLAWFINDER_TEMP_CPP_W = rep_directory + "/flawfinder/temp/cpp_w_temp.txt"
FLAWFINDER_TEMP_CPP_WO = rep_directory + "/flawfinder/temp/cpp_wo_temp.txt"
# ## UNO
UNO = "./python/uno.py"
UNO_EXE = "uno"
UNO_EXE_CPP = "uno"
UNO_OUTPUT_C_W = rep_directory + "/uno/temp/c_w_errors_per_line.csv"
UNO_OUTPUT_C_WO = rep_directory + "/uno/temp/c_wo_errors_per_line.csv"
UNO_OUTPUT_CPP_W = rep_directory + "/uno/temp/cpp_w_errors_per_line.csv"
UNO_OUTPUT_CPP_WO = rep_directory + "/uno/temp/cpp_wo_errors_per_line.csv"
UNO_OPTS = ""
UNO_OUT_SUBDEFECTS = rep_directory + "/uno/c_subdefects.csv"
UNO_OUT_DEFECTS = rep_directory + "/uno/c_defects.csv"
UNO_OUT_TOTAL = rep_directory + "/uno/c_total.csv"
UNO_OUT_CPP_SUBDEFECTS = rep_directory + "/uno/cpp_subdefects.csv"
UNO_OUT_CPP_DEFECTS = rep_directory + "/uno/cpp_defects.csv"
UNO_OUT_CPP_TOTAL = rep_directory + "/uno/cpp_total.csv"
UNO_TEMP_C_W = rep_directory + "/uno/temp/c_w_temp.txt"
UNO_TEMP_C_WO = rep_directory + "/uno/temp/c_wo_temp.txt"
UNO_TEMP_CPP_W = rep_directory + "/uno/temp/cpp_w_temp.txt"
UNO_TEMP_CPP_WO = rep_directory + "/uno/temp/cpp_wo_temp.txt"
# ## INFER
INFER = "./python/infer.py"
INFER_EXE = "infer"
INFER_OUTPUT_C_W = rep_directory + "/infer/temp/c_w_errors_per_line.csv"
INFER_OUTPUT_C_WO = rep_directory + "/infer/temp/c_wo_errors_per_line.csv"
INFER_OUTPUT_CPP_W = rep_directory + "/infer/temp/cpp_w_errors_per_line.csv"
INFER_OUTPUT_CPP_WO = rep_directory + "/infer/temp/cpp_wo_errors_per_line.csv"
INFER_OPTS = ""
INFER_OUT_SUBDEFECTS = rep_directory + "/infer/c_subdefects.csv"
INFER_OUT_DEFECTS = rep_directory + "/infer/c_defects.csv"
INFER_OUT_TOTAL = rep_directory + "/infer/c_total.csv"
INFER_OUT_CPP_SUBDEFECTS = rep_directory + "/infer/cpp_subdefects.csv"
INFER_OUT_CPP_DEFECTS = rep_directory + "/infer/cpp_defects.csv"
INFER_OUT_CPP_TOTAL = rep_directory + "/infer/cpp_total.csv"
INFER_TEMP_C_W = rep_directory + "/infer/temp/c_w_temp.txt"
INFER_TEMP_C_WO = rep_directory + "/infer/temp/c_wo_temp.txt"
INFER_TEMP_CPP_W = rep_directory + "/infer/temp/cpp_w_temp.txt"
INFER_TEMP_CPP_WO = rep_directory + "/infer/temp/cpp_wo_temp.txt"
# ## CLANALYZE
CLANALYZE = "./python/clanalyze.py"
CLANALYZE_EXE = "cl /analyze"
CLANALYZE_OUTPUT_C_W = rep_directory + "/clanalyze/temp/c_w_errors_per_line.csv"
CLANALYZE_OUTPUT_C_WO = rep_directory + "/clanalyze/temp/c_wo_errors_per_line.csv"
CLANALYZE_OUTPUT_CPP_W = rep_directory + "/clanalyze/temp/cpp_w_errors_per_line.csv"
CLANALYZE_OUTPUT_CPP_WO = rep_directory + "/clanalyze/temp/cpp_wo_errors_per_line.csv"
CLANALYZE_OPTS = ""
CLANALYZE_OUT_SUBDEFECTS = rep_directory + "/clanalyze/c_subdefects.csv"
CLANALYZE_OUT_DEFECTS = rep_directory + "/clanalyze/c_defects.csv"
CLANALYZE_OUT_TOTAL = rep_directory + "/clanalyze/c_total.csv"
CLANALYZE_OUT_CPP_SUBDEFECTS = rep_directory + "/clanalyze/cpp_subdefects.csv"
CLANALYZE_OUT_CPP_DEFECTS = rep_directory + "/clanalyze/cpp_defects.csv"
CLANALYZE_OUT_CPP_TOTAL = rep_directory + "/clanalyze/cpp_total.csv"
CLANALYZE_TEMP_C_W = rep_directory + "/clanalyze/temp/c_w_temp.txt"
CLANALYZE_TEMP_C_WO = rep_directory + "/clanalyze/temp/c_wo_temp.txt"
CLANALYZE_TEMP_CPP_W = rep_directory + "/clanalyze/temp/cpp_w_temp.txt"
CLANALYZE_TEMP_CPP_WO = rep_directory + "/clanalyze/temp/cpp_wo_temp.txt"
# ## SPLINT
SPLINT = "./python/splint.py"
SPLINT_EXE = "splint"
SPLINT_OUTPUT_C_W = rep_directory + "/splint/temp/c_w_errors_per_line.csv"
SPLINT_OUTPUT_C_WO = rep_directory + "/splint/temp/c_wo_errors_per_line.csv"
SPLINT_OUTPUT_CPP_W = rep_directory + "/splint/temp/cpp_w_errors_per_line.csv"
SPLINT_OUTPUT_CPP_WO = rep_directory + "/splint/temp/cpp_wo_errors_per_line.csv"
SPLINT_OPTS = ""
SPLINT_OUT_SUBDEFECTS = rep_directory + "/splint/c_subdefects.csv"
SPLINT_OUT_DEFECTS = rep_directory + "/splint/c_defects.csv"
SPLINT_OUT_TOTAL = rep_directory + "/splint/c_total.csv"
SPLINT_OUT_CPP_SUBDEFECTS = rep_directory + "/splint/cpp_subdefects.csv"
SPLINT_OUT_CPP_DEFECTS = rep_directory + "/splint/cpp_defects.csv"
SPLINT_OUT_CPP_TOTAL = rep_directory + "/splint/cpp_total.csv"
SPLINT_TEMP_C_W = rep_directory + "/splint/temp/c_w_temp.txt"
SPLINT_TEMP_C_WO = rep_directory + "/splint/temp/c_wo_temp.txt"
SPLINT_TEMP_CPP_W = rep_directory + "/splint/temp/cpp_w_temp.txt"
SPLINT_TEMP_CPP_WO = rep_directory + "/splint/temp/cpp_wo_temp.txt"
# ## FRAMAC
FRAMAC = "./python/framac.py"
FRAMAC_EXE = "frama-c"
FRAMAC_EXE_CPP = "frama-c"
FRAMAC_OUTPUT_C_W = rep_directory + "/framac/temp/c_w_errors_per_line.csv"
FRAMAC_OUTPUT_C_WO = rep_directory + "/framac/temp/c_wo_errors_per_line.csv"
FRAMAC_OUTPUT_CPP_W = rep_directory + "/framac/temp/cpp_w_errors_per_line.csv"
FRAMAC_OUTPUT_CPP_WO = rep_directory + "/framac/temp/cpp_wo_errors_per_line.csv"
FRAMAC_OPTS = ""
FRAMAC_OUT_SUBDEFECTS = rep_directory + "/framac/c_subdefects.csv"
FRAMAC_OUT_DEFECTS = rep_directory + "/framac/c_defects.csv"
FRAMAC_OUT_TOTAL = rep_directory + "/framac/c_total.csv"
FRAMAC_OUT_CPP_SUBDEFECTS = rep_directory + "/framac/cpp_subdefects.csv"
FRAMAC_OUT_CPP_DEFECTS = rep_directory + "/framac/cpp_defects.csv"
FRAMAC_OUT_CPP_TOTAL = rep_directory + "/framac/cpp_total.csv"
FRAMAC_TEMP_C_W = rep_directory + "/framac/temp/c_w_temp.txt"
FRAMAC_TEMP_C_WO = rep_directory + "/framac/temp/c_wo_temp.txt"
FRAMAC_TEMP_CPP_W = rep_directory + "/framac/temp/cpp_w_temp.txt"
FRAMAC_TEMP_CPP_WO = rep_directory + "/framac/temp/cpp_wo_temp.txt"
def make_dirs_forgive(path):
try:
os.makedirs(path)
except FileExistsError:
print("Already exists: " + path)
def prepare_dirs():
print("Preparing folders\n")
make_dirs_forgive(os.path.join(rep_directory, "setup", "temp"))
make_dirs_forgive(os.path.join(rep_directory, "cppcheck", "temp"))
make_dirs_forgive(os.path.join(rep_directory, "clanalyze", "temp"))
make_dirs_forgive(os.path.join(rep_directory, "sparse", "temp"))
make_dirs_forgive(os.path.join(rep_directory, "uno", "temp"))
make_dirs_forgive(os.path.join(rep_directory, "clangalpha", "temp"))
make_dirs_forgive(os.path.join(rep_directory, "clangcore", "temp"))
make_dirs_forgive(os.path.join(rep_directory, "clangcorealpha", "temp"))
make_dirs_forgive(os.path.join(rep_directory, "flawfinder", "temp"))
make_dirs_forgive(os.path.join(rep_directory, "infer", "temp"))
make_dirs_forgive(os.path.join(rep_directory, "splint", "temp"))
make_dirs_forgive(os.path.join(rep_directory, "framac", "temp"))
make_dirs_forgive(os.path.join(rep_directory, "cpplint", "temp"))
make_dirs_forgive(os.path.join(rep_directory, "oclint", "temp"))
make_dirs_forgive(os.path.join(rep_directory, "flintpp", "temp"))
def call_python(args):
command = sys.executable + " " + " ".join(args)
# try:
(output, err, exit, time) = python.system.system_call(command)
# except FileNotFoundError as err:
# print("ERROR FileNotFoundError")
# print(err)
# print(err.filename)
# time = 0
return time
def call_bash(args):
(output, err, exit, time) = python.system.system_call("bash " + " ".join(args))
return time
def run_cppcheck():
print("Running cppcheck")
t1 = call_python([CPPCHECK, CPPCHECK_TEMP_C_W, CPPCHECK_TEMP_C_W_TXT, W_C_DEFECTS_DIR, CPPCHECK_OUTPUT_C_W, CPPCHECK_EXE, CPPCHECK_OPTS])
t2 = call_python([CPPCHECK, CPPCHECK_TEMP_C_WO, CPPCHECK_TEMP_C_WO_TXT, WO_C_DEFECTS_DIR, CPPCHECK_OUTPUT_C_WO, CPPCHECK_EXE, CPPCHECK_OPTS])
t3 = call_python([CPPCHECK, CPPCHECK_TEMP_CPP_W, CPPCHECK_TEMP_CPP_W_TXT, W_CPP_DEFECTS_DIR, CPPCHECK_OUTPUT_CPP_W, CPPCHECK_EXE_CPP, CPPCHECK_OPTS])
t4 = call_python([CPPCHECK, CPPCHECK_TEMP_CPP_WO, CPPCHECK_TEMP_CPP_WO_TXT, WO_CPP_DEFECTS_DIR, CPPCHECK_OUTPUT_CPP_WO, CPPCHECK_EXE_CPP, CPPCHECK_OPTS])
sys.stdout=open(os.path.join(rep_directory, "cppcheck" ,"timing.csv"), "w")
print("cppcheck", ", " , t1 + t3, ", ", t2 + t4)
sys.stdout=sys.__stdout__
def run_cppcheck_stats(tools):
print("Running cppcheck stats")
call_python([STATISTICS, C_MERGE_FILE, CPPCHECK_OUTPUT_C_W, CPPCHECK_OUTPUT_C_WO, CPPCHECK_OUT_SUBDEFECTS, CPPCHECK_OUT_DEFECTS, CPPCHECK_OUT_TOTAL, tools])
call_python([STATISTICS, CPP_MERGE_FILE, CPPCHECK_OUTPUT_CPP_W, CPPCHECK_OUTPUT_CPP_WO, CPPCHECK_OUT_CPP_SUBDEFECTS, CPPCHECK_OUT_CPP_DEFECTS, CPPCHECK_OUT_CPP_TOTAL, tools])
def run_cpplint():
print("Running cpplint")
t1 = call_python([CPPLINT, CPPLINT_TEMP_C_W, W_C_DEFECTS_DIR, CPPLINT_OUTPUT_C_W, CPPLINT_EXE, CPPLINT_OPTS])
t2 = call_python([CPPLINT, CPPLINT_TEMP_C_WO, WO_C_DEFECTS_DIR, CPPLINT_OUTPUT_C_WO, CPPLINT_EXE, CPPLINT_OPTS])
t3 = call_python([CPPLINT, CPPLINT_TEMP_CPP_W, W_CPP_DEFECTS_DIR, CPPLINT_OUTPUT_CPP_W, CPPLINT_EXE_CPP, CPPLINT_OPTS])
t4 = call_python([CPPLINT, CPPLINT_TEMP_CPP_WO, WO_CPP_DEFECTS_DIR, CPPLINT_OUTPUT_CPP_WO, CPPLINT_EXE_CPP, CPPLINT_OPTS])
sys.stdout=open(os.path.join(rep_directory, "cpplint" ,"timing.csv"), "w")
print("cpplint", ", " , t1 + t3, ", ", t2 + t4)
sys.stdout=sys.__stdout__
def run_cpplint_stats(tools):
print("Running cpplint")
call_python([STATISTICS, C_MERGE_FILE, CPPLINT_OUTPUT_C_W, CPPLINT_OUTPUT_C_WO, CPPLINT_OUT_SUBDEFECTS, CPPLINT_OUT_DEFECTS, CPPLINT_OUT_TOTAL, tools])
call_python([STATISTICS, CPP_MERGE_FILE, CPPLINT_OUTPUT_CPP_W, CPPLINT_OUTPUT_CPP_WO, CPPLINT_OUT_CPP_SUBDEFECTS, CPPLINT_OUT_CPP_DEFECTS, CPPLINT_OUT_CPP_TOTAL, tools])
def run_flintpp():
print("Running flintpp")
t1 = call_python([FLINTPP, FLINTPP_TEMP_C_W, FLINTPP_TEMP_C_W_TXT, W_C_DEFECTS_DIR, FLINTPP_OUTPUT_C_W, FLINTPP_EXE, FLINTPP_OPTS])
t2 = call_python([FLINTPP, FLINTPP_TEMP_C_WO, FLINTPP_TEMP_C_WO_TXT, WO_C_DEFECTS_DIR, FLINTPP_OUTPUT_C_WO, FLINTPP_EXE, FLINTPP_OPTS])
t3 = call_python([FLINTPP, FLINTPP_TEMP_CPP_W, FLINTPP_TEMP_CPP_W_TXT, W_CPP_DEFECTS_DIR, FLINTPP_OUTPUT_CPP_W, FLINTPP_EXE_CPP, FLINTPP_OPTS])
t4 = call_python([FLINTPP, FLINTPP_TEMP_CPP_WO, FLINTPP_TEMP_CPP_WO_TXT, WO_CPP_DEFECTS_DIR, FLINTPP_OUTPUT_CPP_WO, FLINTPP_EXE_CPP, FLINTPP_OPTS])
sys.stdout=open(os.path.join(rep_directory, "flintpp" ,"timing.csv"), "w")
print("flintpp", ", " , t1 + t3, ", ", t2 + t4)
sys.stdout=sys.__stdout__
def run_flintpp_stats(tools):
print("Running flintpp")
call_python([STATISTICS, C_MERGE_FILE, FLINTPP_OUTPUT_C_W, FLINTPP_OUTPUT_C_WO, FLINTPP_OUT_SUBDEFECTS, FLINTPP_OUT_DEFECTS, FLINTPP_OUT_TOTAL, tools])
call_python([STATISTICS, CPP_MERGE_FILE, FLINTPP_OUTPUT_CPP_W, FLINTPP_OUTPUT_CPP_WO, FLINTPP_OUT_CPP_SUBDEFECTS, FLINTPP_OUT_CPP_DEFECTS, FLINTPP_OUT_CPP_TOTAL, tools])
def run_oclint():
print("Running oclint")
t1 = call_python([OCLINT, OCLINT_TEMP_C_W, W_C_DEFECTS_DIR, OCLINT_OUTPUT_C_W, OCLINT_EXE, OCLINT_OPTS])
t2 = call_python([OCLINT, OCLINT_TEMP_C_WO, WO_C_DEFECTS_DIR, OCLINT_OUTPUT_C_WO, OCLINT_EXE, OCLINT_OPTS])
t3 = call_python([OCLINT, OCLINT_TEMP_CPP_W, W_CPP_DEFECTS_DIR, OCLINT_OUTPUT_CPP_W, OCLINT_EXE_CPP, OCLINT_OPTS])
t4 = call_python([OCLINT, OCLINT_TEMP_CPP_WO, WO_CPP_DEFECTS_DIR, OCLINT_OUTPUT_CPP_WO, OCLINT_EXE_CPP, OCLINT_OPTS])
sys.stdout=open(os.path.join(rep_directory, "oclint" ,"timing.csv"), "w")
print("oclint", ", " , t1 + t3, ", ", t2 + t4)
sys.stdout=sys.__stdout__
def run_oclint_stats(tools):
print("Running oclint")
call_python([STATISTICS, C_MERGE_FILE, OCLINT_OUTPUT_C_W, OCLINT_OUTPUT_C_WO, OCLINT_OUT_SUBDEFECTS, OCLINT_OUT_DEFECTS, OCLINT_OUT_TOTAL, tools])
call_python([STATISTICS, CPP_MERGE_FILE, OCLINT_OUTPUT_CPP_W, OCLINT_OUTPUT_CPP_WO, OCLINT_OUT_CPP_SUBDEFECTS, OCLINT_OUT_CPP_DEFECTS, OCLINT_OUT_CPP_TOTAL, tools])
def run_framac():
print("Running framac")
t1 = call_python([FRAMAC, FRAMAC_TEMP_C_W, W_C_DEFECTS_DIR, FRAMAC_OUTPUT_C_W, FRAMAC_EXE, FRAMAC_OPTS])
t2 = call_python([FRAMAC, FRAMAC_TEMP_C_WO, WO_C_DEFECTS_DIR, FRAMAC_OUTPUT_C_WO, FRAMAC_EXE, FRAMAC_OPTS])
t3 = call_python([FRAMAC, FRAMAC_TEMP_CPP_W, W_CPP_DEFECTS_DIR, FRAMAC_OUTPUT_CPP_W, FRAMAC_EXE_CPP, FRAMAC_OPTS])
t4 = call_python([FRAMAC, FRAMAC_TEMP_CPP_WO, WO_CPP_DEFECTS_DIR, FRAMAC_OUTPUT_CPP_WO, FRAMAC_EXE_CPP, FRAMAC_OPTS])
sys.stdout=open(os.path.join(rep_directory, "framac" ,"timing.csv"), "w")
print("framac", ", " , t1 + t3, ", ", t2 + t4)
sys.stdout=sys.__stdout__
def run_framac_stats(tools):
print("Running framac stats")
call_python([STATISTICS, C_MERGE_FILE, FRAMAC_OUTPUT_C_W, FRAMAC_OUTPUT_C_WO, FRAMAC_OUT_SUBDEFECTS, FRAMAC_OUT_DEFECTS, FRAMAC_OUT_TOTAL, tools])
call_python([STATISTICS, CPP_MERGE_FILE, FRAMAC_OUTPUT_CPP_W, FRAMAC_OUTPUT_CPP_WO, FRAMAC_OUT_CPP_SUBDEFECTS, FRAMAC_OUT_CPP_DEFECTS, FRAMAC_OUT_CPP_TOTAL, tools])
def run_sparse():
print("Running sparse")
t1 = call_python([SPARSE, SPARSE_TEMP_C_W, W_C_DEFECTS_DIR, SPARSE_OUTPUT_C_W, SPARSE_EXE, SPARSE_OPTS])
t2 = call_python([SPARSE, SPARSE_TEMP_C_WO, WO_C_DEFECTS_DIR, SPARSE_OUTPUT_C_WO, SPARSE_EXE, SPARSE_OPTS])
t3 = call_python([SPARSE, SPARSE_TEMP_CPP_W, W_CPP_DEFECTS_DIR, SPARSE_OUTPUT_CPP_W, SPARSE_EXE_CPP, SPARSE_OPTS])
t4 = call_python([SPARSE, SPARSE_TEMP_CPP_WO, WO_CPP_DEFECTS_DIR, SPARSE_OUTPUT_CPP_WO, SPARSE_EXE_CPP, SPARSE_OPTS])
sys.stdout=open(os.path.join(rep_directory, "sparse" ,"timing.csv"), "w")
print("sparse", ", " , t1 + t3, ", ", t2 + t4)
sys.stdout=sys.__stdout__
def run_sparse_stats(tools):
print("Running sparse stats")
call_python([STATISTICS, C_MERGE_FILE, SPARSE_OUTPUT_C_W, SPARSE_OUTPUT_C_WO, SPARSE_OUT_SUBDEFECTS, SPARSE_OUT_DEFECTS, SPARSE_OUT_TOTAL, tools])
call_python([STATISTICS, CPP_MERGE_FILE, SPARSE_OUTPUT_CPP_W, SPARSE_OUTPUT_CPP_WO, SPARSE_OUT_CPP_SUBDEFECTS, SPARSE_OUT_CPP_DEFECTS, SPARSE_OUT_CPP_TOTAL, tools])
def run_uno():
print("Running uno")
t1 = call_python([UNO, UNO_TEMP_C_W, W_C_DEFECTS_DIR, UNO_OUTPUT_C_W, UNO_EXE, UNO_OPTS])
t2 = call_python([UNO, UNO_TEMP_C_WO, WO_C_DEFECTS_DIR, UNO_OUTPUT_C_WO, UNO_EXE, UNO_OPTS])
t3 = call_python([UNO, UNO_TEMP_CPP_W, W_CPP_DEFECTS_DIR, UNO_OUTPUT_CPP_W, UNO_EXE_CPP, UNO_OPTS])
t4 = call_python([UNO, UNO_TEMP_CPP_WO, WO_CPP_DEFECTS_DIR, UNO_OUTPUT_CPP_WO, UNO_EXE_CPP, UNO_OPTS])
sys.stdout=open(os.path.join(rep_directory, "uno" ,"timing.csv"), "w")
print("uno", ", " , t1 + t3, ", ", t2 + t4)
sys.stdout=sys.__stdout__
def run_uno_stats(tools):
print("Running uno stats")
call_python([STATISTICS, C_MERGE_FILE, UNO_OUTPUT_C_W, UNO_OUTPUT_C_WO, UNO_OUT_SUBDEFECTS, UNO_OUT_DEFECTS, UNO_OUT_TOTAL, tools])
call_python([STATISTICS, CPP_MERGE_FILE, UNO_OUTPUT_CPP_W, UNO_OUTPUT_CPP_WO, UNO_OUT_CPP_SUBDEFECTS, UNO_OUT_CPP_DEFECTS, UNO_OUT_CPP_TOTAL, tools])
def run_flawfinder():
print("Running flawfinder")
t1 = call_python([FLAWFINDER, FLAWFINDER_TEMP_C_W, W_C_DEFECTS_DIR, FLAWFINDER_OUTPUT_C_W, FLAWFINDER_EXE, FLAWFINDER_OPTS])
t2 = call_python([FLAWFINDER, FLAWFINDER_TEMP_C_WO, WO_C_DEFECTS_DIR, FLAWFINDER_OUTPUT_C_WO, FLAWFINDER_EXE, FLAWFINDER_OPTS])
t3 = call_python([FLAWFINDER, FLAWFINDER_TEMP_CPP_W, W_CPP_DEFECTS_DIR, FLAWFINDER_OUTPUT_CPP_W, FLAWFINDER_EXE, FLAWFINDER_OPTS])
t4 = call_python([FLAWFINDER, FLAWFINDER_TEMP_CPP_WO, WO_CPP_DEFECTS_DIR, FLAWFINDER_OUTPUT_CPP_WO, FLAWFINDER_EXE, FLAWFINDER_OPTS])
sys.stdout=open(os.path.join(rep_directory, "flawfinder" ,"timing.csv"), "w")
print("flawfinder", ", " , t1 + t3, ", ", t2 + t4)
sys.stdout=sys.__stdout__
def run_flawfinder_stats(tools):
print("Running flawfinder stats")
call_python([STATISTICS, C_MERGE_FILE, FLAWFINDER_OUTPUT_C_W, FLAWFINDER_OUTPUT_C_WO, FLAWFINDER_OUT_SUBDEFECTS, FLAWFINDER_OUT_DEFECTS, FLAWFINDER_OUT_TOTAL, tools])
call_python([STATISTICS, CPP_MERGE_FILE, FLAWFINDER_OUTPUT_CPP_W, FLAWFINDER_OUTPUT_CPP_WO, FLAWFINDER_OUT_CPP_SUBDEFECTS, FLAWFINDER_OUT_CPP_DEFECTS, FLAWFINDER_OUT_CPP_TOTAL, tools])
def run_splint():
print("Running splint")
t1 = call_python([SPLINT, SPLINT_TEMP_C_W, W_C_DEFECTS_DIR, SPLINT_OUTPUT_C_W, SPLINT_EXE, SPLINT_OPTS])
t2 = call_python([SPLINT, SPLINT_TEMP_C_WO, WO_C_DEFECTS_DIR, SPLINT_OUTPUT_C_WO, SPLINT_EXE, SPLINT_OPTS])
t3 = call_python([SPLINT, SPLINT_TEMP_CPP_W, W_CPP_DEFECTS_DIR, SPLINT_OUTPUT_CPP_W, SPLINT_EXE, SPLINT_OPTS])
t4 = call_python([SPLINT, SPLINT_TEMP_CPP_WO, WO_CPP_DEFECTS_DIR, SPLINT_OUTPUT_CPP_WO, SPLINT_EXE, SPLINT_OPTS])
sys.stdout=open(os.path.join(rep_directory, "splint" ,"timing.csv"), "w")
print("splint", ", " , t1 + t3, ", ", t2 + t4)
sys.stdout=sys.__stdout__
def run_splint_stats(tools):
print("Running splint stats")
call_python([STATISTICS, C_MERGE_FILE, SPLINT_OUTPUT_C_W, SPLINT_OUTPUT_C_WO, SPLINT_OUT_SUBDEFECTS, SPLINT_OUT_DEFECTS, SPLINT_OUT_TOTAL, tools])
call_python([STATISTICS, CPP_MERGE_FILE, SPLINT_OUTPUT_CPP_W, SPLINT_OUTPUT_CPP_WO, SPLINT_OUT_CPP_SUBDEFECTS, SPLINT_OUT_CPP_DEFECTS, SPLINT_OUT_CPP_TOTAL, tools])
def run_clang_core():
print("Running clangcore")
t1 = call_python([CLANG_CORE, CLANGCORE_TEMP_C_W, W_C_DEFECTS_DIR, CLANG_CORE_OUTPUT_C_W, CLANG_CORE_EXE, CLANG_CORE_OPTS])
t2 = call_python([CLANG_CORE, CLANGCORE_TEMP_C_WO, WO_C_DEFECTS_DIR, CLANG_CORE_OUTPUT_C_WO, CLANG_CORE_EXE, CLANG_CORE_OPTS])
t3 = call_python([CLANG_CORE_PP, CLANGCORE_TEMP_CPP_W, W_CPP_DEFECTS_DIR, CLANG_CORE_OUTPUT_CPP_W, CLANG_CORE_EXE_CPP, CLANG_CORE_OPTS])
t4 = call_python([CLANG_CORE_PP, CLANGCORE_TEMP_CPP_WO, WO_CPP_DEFECTS_DIR, CLANG_CORE_OUTPUT_CPP_WO, CLANG_CORE_EXE_CPP, CLANG_CORE_OPTS])
sys.stdout=open(os.path.join(rep_directory, "clangcore" ,"timing.csv"), "w")
print("clangcore", ", " , t1 + t3, ", ", t2 + t4)
sys.stdout=sys.__stdout__
def run_clang_core_stats(tools):
print("Running clangcore stats")
call_python([STATISTICS, C_MERGE_FILE, CLANG_CORE_OUTPUT_C_W, CLANG_CORE_OUTPUT_C_WO, CLANG_CORE_OUT_SUBDEFECTS, CLANG_CORE_OUT_DEFECTS, CLANG_CORE_OUT_TOTAL, tools])
call_python([STATISTICS, CPP_MERGE_FILE, CLANG_CORE_OUTPUT_CPP_W, CLANG_CORE_OUTPUT_CPP_WO, CLANG_CORE_OUT_CPP_SUBDEFECTS, CLANG_CORE_OUT_CPP_DEFECTS, CLANG_CORE_OUT_CPP_TOTAL, tools])
def run_clang_core_alpha():
print("Running clangcorealpha")
t1 = call_python([CLANG_CORE_ALPHA, CLANGCOREALPHA_TEMP_C_W, W_C_DEFECTS_DIR, CLANG_CORE_ALPHA_OUTPUT_C_W, CLANG_CORE_ALPHA_EXE, CLANG_CORE_ALPHA_OPTS])
t2 = call_python([CLANG_CORE_ALPHA, CLANGCOREALPHA_TEMP_C_WO, WO_C_DEFECTS_DIR, CLANG_CORE_ALPHA_OUTPUT_C_WO, CLANG_CORE_ALPHA_EXE, CLANG_CORE_ALPHA_OPTS])
t3 = call_python([CLANG_CORE_ALPHA_PP, CLANGCOREALPHA_TEMP_CPP_W, W_CPP_DEFECTS_DIR, CLANG_CORE_ALPHA_OUTPUT_CPP_W, CLANG_CORE_ALPHA_EXE_CPP, CLANG_CORE_ALPHA_OPTS])
t4 = call_python([CLANG_CORE_ALPHA_PP, CLANGCOREALPHA_TEMP_CPP_WO, WO_CPP_DEFECTS_DIR, CLANG_CORE_ALPHA_OUTPUT_CPP_WO, CLANG_CORE_ALPHA_EXE_CPP, CLANG_CORE_ALPHA_OPTS])
sys.stdout=open(os.path.join(rep_directory, "clangcorealpha" ,"timing.csv"), "w")
print("clangcorealpha", ", " , t1 + t3, ", ", t2 + t4)
sys.stdout=sys.__stdout__
def run_clang_core_alpha_stats(tools):
print("Running clangcore stats")
call_python([STATISTICS, C_MERGE_FILE, CLANG_CORE_ALPHA_OUTPUT_C_W, CLANG_CORE_ALPHA_OUTPUT_C_WO, CLANG_CORE_ALPHA_OUT_SUBDEFECTS, CLANG_CORE_ALPHA_OUT_DEFECTS, CLANG_CORE_ALPHA_OUT_TOTAL, tools])
call_python([STATISTICS, CPP_MERGE_FILE, CLANG_CORE_ALPHA_OUTPUT_CPP_W, CLANG_CORE_ALPHA_OUTPUT_CPP_WO, CLANG_CORE_ALPHA_OUT_CPP_SUBDEFECTS, CLANG_CORE_ALPHA_OUT_CPP_DEFECTS, CLANG_CORE_ALPHA_OUT_CPP_TOTAL, tools])
def run_clang_alpha():
print("Running clangalpha")
t1 = call_python([CLANG_ALPHA, CLANGALPHA_TEMP_C_W, W_C_DEFECTS_DIR, CLANG_ALPHA_OUTPUT_C_W, CLANG_ALPHA_EXE, CLANG_ALPHA_OPTS])
t2 = call_python([CLANG_ALPHA, CLANGALPHA_TEMP_C_WO, WO_C_DEFECTS_DIR, CLANG_ALPHA_OUTPUT_C_WO, CLANG_ALPHA_EXE, CLANG_ALPHA_OPTS])
t3 = call_python([CLANG_ALPHA_PP, CLANGALPHA_TEMP_CPP_W, W_CPP_DEFECTS_DIR, CLANG_ALPHA_OUTPUT_CPP_W, CLANG_ALPHA_EXE_CPP, CLANG_ALPHA_OPTS])
t4 = call_python([CLANG_ALPHA_PP, CLANGALPHA_TEMP_CPP_WO, WO_CPP_DEFECTS_DIR, CLANG_ALPHA_OUTPUT_CPP_WO, CLANG_ALPHA_EXE_CPP, CLANG_ALPHA_OPTS])
sys.stdout=open(os.path.join(rep_directory, "clangalpha" ,"timing.csv"), "w")
print("clangalpha", ", " , t1 + t3, ", ", t2 + t4)
sys.stdout=sys.__stdout__
def run_clang_alpha_stats(tools):
print("Running clangalpha stats")
call_python([STATISTICS, C_MERGE_FILE, CLANG_ALPHA_OUTPUT_C_W, CLANG_ALPHA_OUTPUT_C_WO, CLANG_ALPHA_OUT_SUBDEFECTS, CLANG_ALPHA_OUT_DEFECTS, CLANG_ALPHA_OUT_TOTAL, tools])
call_python([STATISTICS, CPP_MERGE_FILE, CLANG_ALPHA_OUTPUT_CPP_W, CLANG_ALPHA_OUTPUT_CPP_WO, CLANG_ALPHA_OUT_CPP_SUBDEFECTS, CLANG_ALPHA_OUT_CPP_DEFECTS, CLANG_ALPHA_OUT_CPP_TOTAL, tools])
def run_infer():
print("Running infer")
t1 = call_python([INFER, INFER_TEMP_C_W, W_C_DEFECTS_DIR, INFER_OUTPUT_C_W, INFER_EXE])
t2 = call_python([INFER, INFER_TEMP_C_WO, WO_C_DEFECTS_DIR, INFER_OUTPUT_C_WO, INFER_EXE])
t3 = call_python([INFER, INFER_TEMP_CPP_W, W_CPP_DEFECTS_DIR, INFER_OUTPUT_CPP_W, INFER_EXE])
t4 = call_python([INFER, INFER_TEMP_CPP_WO, WO_CPP_DEFECTS_DIR, INFER_OUTPUT_CPP_WO, INFER_EXE])
sys.stdout=open(os.path.join(rep_directory, "infer" ,"timing.csv"), "w")
print("infer", ", " , t1 + t3, ", ", t2 + t4)
sys.stdout=sys.__stdout__
def run_infer_stats(tools):
print("Running infer stats")
call_python([STATISTICS, C_MERGE_FILE, INFER_OUTPUT_C_W, INFER_OUTPUT_C_WO, INFER_OUT_SUBDEFECTS, INFER_OUT_DEFECTS, INFER_OUT_TOTAL, tools])
call_python([STATISTICS, CPP_MERGE_FILE, INFER_OUTPUT_CPP_W, INFER_OUTPUT_CPP_WO, INFER_OUT_CPP_SUBDEFECTS, INFER_OUT_CPP_DEFECTS, INFER_OUT_CPP_TOTAL, tools])
def run_clanalyze():
print("Running cl /analyze")
t1 = python.clanalyze.clanalyze(W_C_DEFECTS_DIR, CLANALYZE_TEMP_C_W, CLANALYZE_OUTPUT_C_W, CLANALYZE_EXE, CLANALYZE_OPTS)
t2 = python.clanalyze.clanalyze(WO_C_DEFECTS_DIR, CLANALYZE_TEMP_C_WO, CLANALYZE_OUTPUT_C_WO, CLANALYZE_EXE, CLANALYZE_OPTS)
# call_python([CLANALYZE, W_C_DEFECTS_DIR, CLANALYZE_OUTPUT_C_W, CLANALYZE_EXE, CLANALYZE_OPTS])
# call_python([CLANALYZE, WO_C_DEFECTS_DIR, CLANALYZE_OUTPUT_C_WO, CLANALYZE_EXE, CLANALYZE_OPTS])
t3 = python.clanalyze.clanalyze(W_CPP_DEFECTS_DIR, CLANALYZE_TEMP_CPP_W, CLANALYZE_OUTPUT_CPP_W, CLANALYZE_EXE, CLANALYZE_OPTS)
t4 = python.clanalyze.clanalyze(WO_CPP_DEFECTS_DIR, CLANALYZE_TEMP_CPP_WO, CLANALYZE_OUTPUT_CPP_WO, CLANALYZE_EXE, CLANALYZE_OPTS)
sys.stdout=open(os.path.join(rep_directory, "clanalyze" ,"timing.csv"), "w")
print("clanalyze", t1 + t3, t2 + t4)
sys.stdout=sys.__stdout__
# call_python([CLANALYZE, W_CPP_DEFECTS_DIR, CLANALYZE_OUTPUT_CPP_W, CLANALYZE_EXE, CLANALYZE_OPTS])
# call_python([CLANALYZE, WO_CPP_DEFECTS_DIR, CLANALYZE_OUTPUT_CPP_WO, CLANALYZE_EXE, CLANALYZE_OPTS])
def run_clanalyze_stats(tools):
print("Running cl /analyze")
call_python([STATISTICS, C_MERGE_FILE, CLANALYZE_OUTPUT_C_W, CLANALYZE_OUTPUT_C_WO, CLANALYZE_OUT_SUBDEFECTS, CLANALYZE_OUT_DEFECTS, CLANALYZE_OUT_TOTAL, tools])
call_python([STATISTICS, CPP_MERGE_FILE, CLANALYZE_OUTPUT_CPP_W, CLANALYZE_OUTPUT_CPP_WO, CLANALYZE_OUT_CPP_SUBDEFECTS, CLANALYZE_OUT_CPP_DEFECTS, CLANALYZE_OUT_CPP_TOTAL, tools])
def generate_main_itc_csvs():
print("Gather errors from the main itc-benchmark...")
call_bash([GATHER_ERRORS, W_C_DEFECTS_DIR, C_ERRORS_PER_LINE_FILE, "0"])
call_bash([GATHER_ERRORS, WO_C_DEFECTS_DIR, C_WO_ERRORS_PER_LINE_FILE, "1"])
call_bash([GATHER_ERRORS, W_CPP_DEFECTS_DIR, CPP_ERRORS_PER_LINE_FILE, "0"])
call_bash([GATHER_ERRORS, WO_CPP_DEFECTS_DIR, CPP_WO_ERRORS_PER_LINE_FILE, "1"])
call_bash([MERGE_EXE, C_ERRORS_PER_LINE_FILE, C_WO_ERRORS_PER_LINE_FILE, C_MERGE_FILE])
call_bash([MERGE_EXE, CPP_ERRORS_PER_LINE_FILE, CPP_WO_ERRORS_PER_LINE_FILE, CPP_MERGE_FILE])
def all_tools():
# return ['cppcheck', 'sparse', 'uno', 'infer', 'splint', 'flawfinder', 'clangcore', 'clangalpha', 'framac', 'oclint', 'flintpp', 'clanalyze']
return ['cppcheck', 'sparse', 'uno', 'infer', 'splint', 'flawfinder', 'clangcorealpha', 'framac', 'oclint', 'flintpp', 'clanalyze']
# return ['cppcheck', 'infer', 'flawfinder', 'clangcorealpha', 'oclint', 'flintpp']
import glob
def clean_temp_stats():
tools = all_tools()
for tool in tools:
for f in glob.glob(os.path.join(rep_directory, tool, "c*.csv")):
print("Removing", f)
os.remove(f)
def run_stats(tools):
if tool == 'cppcheck':
run_cppcheck_stats(tools)
elif tool == 'clanalyze':
run_clanalyze_stats(tools)
elif tool == 'sparse':
run_sparse_stats(tools)
elif tool == "uno":
run_uno_stats(tools)
elif tool == 'infer':
run_infer_stats(tools)
elif tool == 'splint':
run_splint_stats(tools)
elif tool == "flawfinder":
run_flawfinder_stats(tools)
elif tool == 'clangcore':
run_clang_core_stats(tools)
elif tool == 'clangcorealpha':
run_clang_core_alpha_stats(tools)
elif tool == "clangalpha":
run_clang_alpha_stats(tools)
elif tool == 'framac':
run_framac_stats(tools)
elif tool == 'cpplint':
run_cpplint_stats(tools)
elif tool == 'oclint':
run_oclint_stats(tools)
elif tool == 'flintpp':
run_flintpp_stats(tools)
else:
print("Unknown tool", tool)
action = sys.argv[2]
if action == 'setup':
prepare_dirs()
generate_main_itc_csvs()
elif action == 'prepare_dirs':
prepare_dirs()
elif action == "clean":
clean_temp_stats()
elif action == 'run':
tool = sys.argv[3]
if tool == 'cppcheck':
run_cppcheck()
elif tool == 'clanalyze':
run_clanalyze()
elif tool == 'sparse':
run_sparse()
elif tool == "uno":
run_uno()
elif tool == 'infer':
run_infer()
elif tool == 'splint':
run_splint()
elif tool == "flawfinder":
run_flawfinder()
elif tool == 'clangcore':
run_clang_core()
elif tool == 'clangcorealpha':
run_clang_core_alpha()
elif tool == "clangalpha":
run_clang_alpha()
elif tool == 'framac':
run_framac()
elif tool == 'cpplint':
run_cpplint()
elif tool == 'oclint':
run_oclint()
elif tool == 'flintpp':
run_flintpp()
else:
print("Unknown tool", tool)
elif action == 'stat':
tool = sys.argv[3]
tools = ""
if len(sys.argv) > 4: # handle 'unique stat' tool list
tools = sys.argv[4]
run_stats(tools)
elif action == 'total':
tool_list = all_tools()
for tool in tool_list:
tools = ",".join(list(filter(lambda x : x != tool, tool_list)))
run_stats(tools)
# generate main latex table
latex_dir = os.path.join(os.path.dirname(rep_directory), "latex")
python.latex.total("total.tex", rep_directory, latex_dir, all_tools())
elif action == 'defects':
tool_list = all_tools()
for tool in tool_list:
tools = ",".join(list(filter(lambda x : x != tool, tool_list)))
run_stats(tools)
latex_dir = os.path.join(os.path.dirname(rep_directory), "latex")
python.latex.defects_dr("defects_dr.tex", rep_directory, latex_dir, all_tools())
python.latex.defects_fpr("defects_fpr.tex", rep_directory, latex_dir, all_tools())
python.latex.defects_pr("defects_pr.tex", rep_directory, latex_dir, all_tools())
python.latex.defects_rdr("defects_rdr.tex", rep_directory, latex_dir, all_tools())
python.latex.defects_unique("defects_unique.tex", rep_directory, latex_dir, all_tools())
elif action == 'subdefects':
tool_list = all_tools()
for tool in tool_list:
tools = ",".join(list(filter(lambda x : x != tool, tool_list)))
run_stats(tools)
latex_dir = os.path.join(os.path.dirname(rep_directory), "latex")
python.latex.subdefects_pr("subdefects_pr.tex", rep_directory, latex_dir, all_tools())
python.latex.subdefects_rdr("subdefects_rdr.tex", rep_directory, latex_dir, all_tools())
python.latex.subdefects_unique("subdefects_unique.tex", rep_directory, latex_dir, all_tools())
python.latex.subdefects_all("subdefects_detected_by.tex", rep_directory, latex_dir, all_tools())
else:
print("Action ", action, " not supported or incomplete.\n")
| {"/python/clanalyze.py": ["/python/system.py"], "/benchmark.py": ["/python/system.py", "/python/clanalyze.py", "/python/latex.py"]} |
65,566 | wubozhi/itc-testing-tools | refs/heads/master | /python/system.py |
import shlex
from subprocess import Popen, PIPE
import time
# simply run a system command and return the needed info
def system_call(cmd, dir="."):
print("[CMD]:", cmd)
vtime = time.time();
process = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=dir)
(output, err) = process.communicate()
exit_code = process.wait()
if (exit_code == 0):
print("[DONE]", "\n")
else:
print("[FAILED]:", err.decode("utf-8"))
print("[EXIT CODE]:", exit_code)
# print("[OUTPUT]:", output.decode("utf-8"))
vtime = round(time.time() - vtime, 3);
return (output, err, exit_code, vtime)
| {"/python/clanalyze.py": ["/python/system.py"], "/benchmark.py": ["/python/system.py", "/python/clanalyze.py", "/python/latex.py"]} |
65,567 | wubozhi/itc-testing-tools | refs/heads/master | /python/statistics.py | import sys
import os
from collections import defaultdict
from math import sqrt
# input files
merged_csv_filename=sys.argv[1]
tool_filename_w_defects=sys.argv[2]
tool_filename_wo_defects=sys.argv[3]
tool_out_subdefects=sys.argv[4]
tool_out_defects=sys.argv[5]
tool_out_total=sys.argv[6]
# handle list of tools to detect unique bugs
tools = []
if len(sys.argv) > 7:
tools = sys.argv[7].split(",")
# print(merged_csv_filename)
# print(tool_filename_w_defects)
# print(tool_filename_wo_defects)
# print(tool_out_subdefects)
# print(tool_out_defects)
# print(tool_out_total)
# exit(1)
def get_defects_reported_by_tool(tool_filename_w_defects, tool_filename_wo_defects):
w_defects_found = defaultdict(lambda: [])
wo_defects_found = defaultdict(lambda: [])
firstLine = True
with open(tool_filename_w_defects, "r") as merged_file:
for line in merged_file:
if (firstLine):
firstLine = False
continue
a = line.split(",")
filename = a[0].strip()
line = int(a[1].strip())
w_defects_found[filename].append(line)
firstLine = True
with open(tool_filename_wo_defects, "r") as merged_file:
for line in merged_file:
if (firstLine):
firstLine = False
continue
a = line.split(",")
filename = a[0].strip()
line = int(a[1].strip())
wo_defects_found[filename].append(line)
return (w_defects_found, wo_defects_found)
def get_filename_w_defects(tool):
csv_file = os.path.basename(os.path.realpath(tool_filename_w_defects))
csv = os.path.join(os.path.dirname(os.path.realpath(tool_filename_w_defects)), "..", "..", tool, "temp", csv_file)
return csv
def get_filename_wo_defects(tool):
csv_file = os.path.basename(os.path.realpath(tool_filename_wo_defects))
csv = os.path.join(os.path.dirname(os.path.realpath(tool_filename_wo_defects)), "..", "..", tool, "temp", csv_file)
return csv
# get defects for current tool
(w_defects_found, wo_defects_found) = get_defects_reported_by_tool(tool_filename_w_defects, tool_filename_wo_defects)
# get defects for other tools
defects_by_tool = {}
for tool in tools:
w_defects = get_filename_w_defects(tool)
wo_defects = get_filename_wo_defects(tool)
defects_by_tool[tool] = get_defects_reported_by_tool(w_defects, wo_defects)
# print(defects_by_tool)
### statistics
# read merged file into datastructures
defect_dict = {}
subdefect_dict = {}
line_dict = {}
line_wo_dict = {}
variations = []
variations_by_filename = defaultdict(lambda: [])
filenames_by_defect = defaultdict(lambda: set())
firstLine = True
with open(merged_csv_filename, "r") as merged_file:
for line in merged_file:
if (firstLine):
firstLine = False
continue
a = line.split(",")
filename = a[0].strip()
line_w_def = int(a[1].strip())
line_wo_def = int(a[2].strip())
def_type = a[3].strip()
def_subtype = a[4].strip()
filenames_by_defect[def_type].add(filename)
if (not (filename in defect_dict.keys())):
defect_dict[filename] = def_type
subdefect_dict[filename] = def_subtype
line_dict[filename] = []
line_wo_dict[filename] = []
line_dict[filename].append(line_w_def)
line_wo_dict[filename].append(line_wo_def)
x = line_w_def in w_defects_found[filename]
y = line_wo_def in wo_defects_found[filename]
detected_by_w = set()
detected_by_wo = set()
for tool in tools:
(tool_w_defects_found,tool_wo_defects_found) = defects_by_tool[tool]
if line_w_def in tool_w_defects_found[filename]:
detected_by_w.add(tool)
if line_wo_def in tool_wo_defects_found[filename]:
detected_by_wo.add(tool)
tup = (filename, line_w_def, line_wo_def, x, y, detected_by_w, detected_by_wo)
variations.append(tup)
variations_by_filename[filename].append(tup)
# for filename in defect_dict.keys():
# print(filename)
# print(defect_dict[filename])
# print(subdefect_dict[filename])
# print(line_dict[filename])
# print(line_wo_dict[filename])
# print()
# print("=============")
sys.stdout = open("temp.txt", "a")
for variation in variations:
print(variation)
sys.stdout = sys.__stdout__
# Sub-defects stats
sys.stdout = open(tool_out_subdefects, 'w')
print("Filename, Defect, Subdefect, TP, FP, Variations, Detection rate, False pos rate, Productivity, Robust detection count, Robust detection rate", ",", "Unique")
for filename in defect_dict.keys():
count_tp = 0
count_fp = 0
robust_counter = 0
count_total = len(variations_by_filename[filename])
unique = 0
for variation in variations_by_filename[filename]:
if (variation[3]):
count_tp = count_tp + 1
if (variation[4]):
count_fp = count_fp + 1
if (variation[3] and not variation[4]):
robust_counter = robust_counter + 1
diff = variation[5] - variation[6]
if (variation[3] and (not variation[4]) and len(diff) == 0):
unique = unique + 1
dr = (count_tp * 100) / count_total
fpr = (count_fp * 100) / count_total
prod = sqrt(dr * (100 - fpr))
robustness = (robust_counter * 100) / count_total
print(filename,",", defect_dict[filename],",", subdefect_dict[filename],",", count_tp,",", count_fp,",", count_total, ",", round(dr,2), ",", round(fpr,2), ",", round(prod,2), ",", robust_counter , ",", round(robustness,2), ",", unique)
# Defects stats
sys.stdout = open(tool_out_defects, 'w')
print("Defect, TP, FP, Variations, Detection rate, False pos rate, Productivity, Robust detection count, Robust detection rate, Unique")
for defect in filenames_by_defect.keys():
count_tp = 0
count_fp = 0
count_total = 0
robust_counter = 0
unique = 0
for filename in filenames_by_defect[defect]:
count_total = count_total + len(variations_by_filename[filename])
for variation in variations_by_filename[filename]:
if (variation[3]):
count_tp = count_tp + 1
if (variation[4]):
count_fp = count_fp + 1
if (variation[3] and not variation[4]):
robust_counter = robust_counter + 1
diff = variation[5] - variation[6]
if (variation[3] and (not variation[4]) and len(diff) == 0):
unique = unique + 1
dr = (count_tp * 100) / count_total
fpr = (count_fp * 100) / count_total
prod = sqrt(dr * (100 - fpr))
robustness = (robust_counter * 100) / count_total
print(defect,",", count_tp,",", count_fp,",", count_total, ",", round(dr,2), ",", round(fpr,2), ",", round(prod,2), ",", robust_counter, ",", round(robustness,2), ",", unique)
# Total stats
sys.stdout = open(tool_out_total, 'w')
count_tp = 0
count_fp = 0
count_total = 0
robust_counter = 0
unique = 0
print("TP, FP, Variations, Detection rate, False pos rate, Productivity, Robust detection count, Robust detection rate, Unique")
for filename in defect_dict.keys():
count_total = count_total + len(variations_by_filename[filename])
for variation in variations_by_filename[filename]:
if (variation[3]):
count_tp = count_tp + 1
if (variation[4]):
count_fp = count_fp + 1
if (variation[3] and not variation[4]):
robust_counter = robust_counter + 1
diff = variation[5] - variation[6]
if (variation[3] and (not variation[4]) and len(diff) == 0):
unique = unique + 1
dr = (count_tp * 100) / count_total
fpr = (count_fp * 100) / count_total
prod = sqrt(dr * (100 - fpr))
robustness = (robust_counter * 100) / count_total
print(count_tp,",", count_fp,",", count_total, ",", round(dr,2), ",", round(fpr,2), ",", round(prod,2), ",", robust_counter, ",", round(robustness,2), ",", unique)
| {"/python/clanalyze.py": ["/python/system.py"], "/benchmark.py": ["/python/system.py", "/python/clanalyze.py", "/python/latex.py"]} |
65,568 | wubozhi/itc-testing-tools | refs/heads/master | /python/flawfinder.py | import sys
import os.path
import system
import dirutils
import tempfile
import shutil
from pathlib import Path
temp_path = os.path.abspath(sys.argv[1])
directory = os.path.abspath(sys.argv[2])
csv = os.path.abspath(sys.argv[3])
exe = sys.argv[4]
# create temporary dir to run the analyzer
tmpdir_path = os.path.join(str(Path.home()),"tmp", "flawfinder-" + next(tempfile._get_candidate_names()))
shutil.copytree(directory, tmpdir_path)
print("======[FLAWFINDER]=======")
print("[CWD]:", tmpdir_path)
print("[CSV]:", csv)
print("[EXE]:", exe)
source_files = dirutils.list_files(tmpdir_path, '.c') + dirutils.list_files(tmpdir_path, '.cpp')
dirutils.file_line_error_header(csv)
dirutils.reset_file(temp_path)
for source_file in source_files:
if source_file.endswith("main.c"):
continue
if source_file.endswith("invalid_extern_1.c"):
continue
if source_file.endswith("invalid_extern.c"):
source_file = source_file + " " + os.path.join(tmpdir_path, "invalid_extern_1.c")
flawfinder = exe + " " + source_file
(output, err, exit, time) = system.system_call(flawfinder, tmpdir_path)
dirutils.tool_exec_log(temp_path, flawfinder, output, err, exit)
all_lines = output.splitlines()
lines = []
line_codes = []
collect_flag = False
for line in all_lines:
dec = line.decode("utf-8").strip()
if (collect_flag):
lines.append(dec)
if (len(dec.split(":")) >= 3):
line_codes.append(True)
else:
line_codes.append(False)
if dec == "FINAL RESULTS:":
collect_flag = True
if dec == "ANALYSIS SUMMARY:":
break
sys.stdout = open(csv, "a")
for i in range(0,len(lines)):
if (line_codes[i]):
a = lines[i].split(":")
filename = os.path.basename(a[0])
line_no = a[1]
error_message = ""
j = 2
while (j < len(a)):
error_message = error_message + ":" + a[j]
j = j + 1
j = i + 1
while (j < len(lines)):
if (not line_codes[j]):
error_message += error_message + " " + lines[j].strip()
j = j + 1
else:
break;
print(filename, ",", line_no, ",", "\"" + error_message + "\"")
sys.stdout = sys.__stdout__
print("[CLEANUP]: removing ", tmpdir_path)
shutil.rmtree(tmpdir_path)
print("======[DONE WITH FLAWFINDER]=======")
| {"/python/clanalyze.py": ["/python/system.py"], "/benchmark.py": ["/python/system.py", "/python/clanalyze.py", "/python/latex.py"]} |
65,569 | wubozhi/itc-testing-tools | refs/heads/master | /python/clang++.py | import sys
import os.path
import system
import dirutils
import tempfile
import platform
import shutil
import re
from pathlib import Path
temp_path = os.path.abspath(sys.argv[1])
directory = os.path.abspath(sys.argv[2])
csv = os.path.abspath(sys.argv[3])
exe = sys.argv[4]
opts = sys.argv[5]
def detect_clang_compilation_opts(source_files):
opts = ""
for source_file in source_files:
test_compile = exe + " -c " + source_file + " -v"
(out, er, ex, t) = system.system_call(test_compile, ".")
if (ex == 0):
return extract_opts(er.decode('utf-8'))
return opts
def extract_opts(log):
res = re.findall("(\-resource\-dir\s+[^\s]*?)\s", log)
incl = re.findall("(\-internal[^\s]*isystem\s+[^\s]*?)\s", log)
return " ".join(res + incl)
# create temporary dir to run the analyzer
tmpdir_path = os.path.join(str(Path.home()),"tmp", "clang-" + next(tempfile._get_candidate_names()))
shutil.copytree(directory, tmpdir_path)
print("\n======[CLANG++]=======")
print("[CWD]:", tmpdir_path)
print("[CSV]:", csv)
print("[EXE]:", exe)
print("[EXE OPTIONS]:", opts)
source_files = dirutils.list_files(tmpdir_path, '.cpp')
sys_opts = detect_clang_compilation_opts(source_files)
dirutils.file_line_error_header(csv)
dirutils.reset_file(temp_path)
for source_file in source_files:
if source_file.endswith("main.c"):
continue
if source_file.endswith("invalid_extern_1.c"):
continue
if source_file.endswith("invalid_extern.c"):
source_file = source_file + " " + os.path.join(tmpdir_path, "invalid_extern_1.c")
clang = exe + " " + opts + " " + sys_opts + " " + source_file
(output, err, exit, time) = system.system_call(clang, ".")
dirutils.tool_exec_log(temp_path, clang, output, err, exit)
sys.stdout = open(csv, "a")
lines = err.splitlines()
for line in lines:
parsed = line.decode("utf-8").strip().split(":")
if (len(parsed) >= 4 and parsed[0] == source_file and not parsed[3].endswith('note')):
print(os.path.basename(parsed[0]), ",", parsed[1], ",", parsed[3] + ":" + parsed[4])
sys.stdout = sys.__stdout__
print("[CLEANUP]: removing ", tmpdir_path)
shutil.rmtree(tmpdir_path)
print("======[DONE WITH CLANG++]=======")
| {"/python/clanalyze.py": ["/python/system.py"], "/benchmark.py": ["/python/system.py", "/python/clanalyze.py", "/python/latex.py"]} |
65,570 | wubozhi/itc-testing-tools | refs/heads/master | /python/infer.py | import json
import sys
import os.path
import system
import shutil
import tempfile
import dirutils
from pathlib import Path
temp_path = os.path.abspath(sys.argv[1])
directory = os.path.abspath(sys.argv[2])
csv = os.path.abspath(sys.argv[3])
exe = sys.argv[4]
# create temporary dir to run the analyzer
tmpdir_path = os.path.join(str(Path.home()),"tmp", "infer-" + next(tempfile._get_candidate_names()))
shutil.copytree(directory, tmpdir_path)
print("======[INFER]=======")
print("[CWD]:", tmpdir_path)
print("[CSV]:", csv)
print("[EXE]:", exe)
source_files = dirutils.list_files(tmpdir_path, '.c') + dirutils.list_files(tmpdir_path, '.cpp')
dirutils.file_line_error_header(csv)
dirutils.reset_file(temp_path)
for source_file in source_files:
if source_file.endswith("main.c"):
continue
if source_file.endswith("invalid_extern_1.c"):
continue
if source_file.endswith("invalid_extern.c"):
source_file = source_file + " " + os.path.join(tmpdir_path, "invalid_extern_1.c")
infer = exe + " run -- gcc -c " + source_file
(output, err, exit, time) = system.system_call(infer, tmpdir_path)
dirutils.tool_exec_log(temp_path, infer, output, err, exit)
sys.stdout = open(csv, "a")
report_file = os.path.join(tmpdir_path, "infer-out", "report.json")
if (os.path.exists(report_file)):
with open(report_file) as json_report_file:
data = json.load(json_report_file)
for d in data:
print(d['file'].strip(), ",", str(d['line']), ",", "\"" + d['qualifier'] + "\"")
sys.stdout = sys.__stdout__
shutil.rmtree(os.path.dirname(report_file))
print("[CLEANUP]: removing ", tmpdir_path)
shutil.rmtree(tmpdir_path)
print("======[DONE WITH INFER]=======")
| {"/python/clanalyze.py": ["/python/system.py"], "/benchmark.py": ["/python/system.py", "/python/clanalyze.py", "/python/latex.py"]} |
65,571 | wubozhi/itc-testing-tools | refs/heads/master | /python/uno.py | import sys
import os.path
import system
import dirutils
import tempfile
from itertools import takewhile
import shutil
from pathlib import Path
temp_path = os.path.abspath(sys.argv[1])
directory = os.path.abspath(sys.argv[2])
csv = os.path.abspath(sys.argv[3])
exe = sys.argv[4]
if (len(sys.argv) > 5):
opts = sys.argv[5]
else:
opts = ""
# create temporary dir to run the analyzer
tmpdir_path = os.path.join(str(Path.home()),"tmp", "uno-" + next(tempfile._get_candidate_names()))
shutil.copytree(directory, tmpdir_path)
print("======[UNO]=======")
print("[CWD]:", tmpdir_path)
print("[CSV]:", csv)
print("[EXE]:", exe)
print("[EXE OPTIONS]:", opts)
dirutils.file_line_error_header(csv)
dirutils.reset_file(temp_path)
source_files = dirutils.list_files(tmpdir_path, '.c') + dirutils.list_files(tmpdir_path, '.cpp')
for source_file in source_files:
if source_file.endswith("main.c"):
continue
if source_file.endswith("invalid_extern_1.c"):
continue
if source_file.endswith("invalid_extern.c"):
source_file = source_file + " " + os.path.join(tmpdir_path, "invalid_extern_1.c")
uno = exe + " " + opts + " " + source_file
(output, err, exit, time) = system.system_call(uno, tmpdir_path)
dirutils.tool_exec_log(temp_path, uno, output, err, exit)
lines = output.splitlines()
sys.stdout = open(csv, "a")
for line in lines:
a = line.decode("utf-8").strip().split(":")
if (len(a) >= 4) and (a[0] == 'uno'):
if len(a[2]) > 10: # hack to work around bug in printint wrong array indexing
print(os.path.basename(a[1]), ",", ''.join(takewhile(str.isdigit, a[2].strip())), ",", a[2])
else:
print(os.path.basename(a[1]), ",", a[2], ",", a[3])
sys.stdout = sys.__stdout__
print("[CLEANUP]: removing ", tmpdir_path)
shutil.rmtree(tmpdir_path)
print("======[DONE WITH UNO]=======")
| {"/python/clanalyze.py": ["/python/system.py"], "/benchmark.py": ["/python/system.py", "/python/clanalyze.py", "/python/latex.py"]} |
65,618 | amnahEltahir/voltammetry | refs/heads/main | /voltammetry/preprocessing.py | import pandas as pd
import math
import numpy as np
import random as rand
from recordclass import recordclass
from statsmodels import robust
class PreprocessedData:
def __init__(self, voltammogram_data, muLabels, window_size=1500, trainingSampleSize=500,corr_over=False):
if corr_over:
print("Correcting overflow negative values")
voltammogram_data = correct_overflow(Voltammogram=voltammogram_data)
print('Start partition')
else:
print('Start partition')
print("Finding stable section with window size", window_size)
[good_window, exclude_ix] = find_stable_section(voltammogram_data, window_size)
print("Partitioning data with training sample size", trainingSampleSize)
[training_part, testing_part] = partition_data(voltammogram_data, muLabels.labels, good_window, exclude_ix,
trainingSampleSize)
print("Flattening Data")
[self.training, self.testing] = flatten_data(training_part, testing_part)
print("PRE-PROCESSING COMPLETE!!!!")
def correct_overflow(Voltammogram):
sample_wise=0
sweep_wise=1
exp_wise = 2
#print(np.shape(Voltammogram))
for experiment in range(np.shape(Voltammogram)[exp_wise]):
#print('experiment',experiment)
for ix in range(np.shape(Voltammogram[:,:,experiment])[sweep_wise]):
#print('sweep',ix)
for elem in range(0, np.shape(Voltammogram[:,ix,experiment])[0] - 1):
if np.diff(Voltammogram[:,ix,experiment])[elem] < -3000:
Voltammogram[elem + 1,ix,experiment] = -Voltammogram[elem + 1,ix,experiment]
return Voltammogram
def find_stable_section(Voltammogram, window_size=150):
"""
Select window with stable median value
:param Voltammogram: array, Voltammetry data
:param window_size: int, number of sweeps in window (default = 150)
:return: good_window: array of good window index
:return: exclude_ix: list, indices of outliers
"""
sample_wise = 0
sweep_wise = 1
num_experiments = np.shape(Voltammogram)[2]
# sample_num = np.shape(Voltammogram)[sample_wise]
good_window = np.zeros((window_size, num_experiments))
exclude_ix = []
for i in range(num_experiments):
vgrams = Voltammogram[:, :, i]
n_sweeps = np.shape(vgrams)[sweep_wise]
# Calculate "before" and "after" window sizes based on move
window_head = math.floor(window_size / 2)
window_tail = math.ceil(window_size / 2)
# Step 1: Find median waveform for the window centered at each step
vgram_df = pd.DataFrame(vgrams)
vgram_median = vgram_df.rolling(window_size, center=True, axis=1).median()
# Step 2: Find the "difference" by subtracting the window median from each sweep
diff_df_median = vgram_df - vgram_median
# Step 3: Find the RMS (sample_wise) of the difference
r = (diff_df_median ** 2).mean(axis=sample_wise) ** 0.5
# Step 4: Find the mean RMS of the window centered on each sweep
q = r.rolling(window_size).mean()
# Step 5: Find the window centered on the sweep with the lowest q value in the
half_index = math.floor(n_sweeps / 2)
start_index = half_index + window_head
end_index = n_sweeps - window_tail - 1
best_win_center = int(pd.Series.idxmin(q[end_index:start_index:-1]))
good_window[:, i] = np.array(range(best_win_center - window_head, best_win_center + window_tail))
# Step 6: mark any sweeps where the RMS of the difference is an outlier
ex = mad_outlier(r.loc[good_window[:, i]])
exclude_ix.append(ex)
good_window = good_window.astype(int)
return [good_window, exclude_ix]
def partition_data(voltammograms, labels, good_window, exclude_ix, trainingSampleSize=50):
"""
Partition data into "training" and testing
:param voltammograms: array of voltammetry data
:param labels: Data frame of experiment labels
:param good_window: array, window of region with stable median value
:param exclude_ix: list, indices of outliers
:param trainingSampleSize: int, number of sweeps in training
:return: training: structure with training data
:return: testing: structure with testing data
"""
rand.seed(0) # random sampling reproducible
num_experiments = voltammograms.shape[2] # Number of concentrations
# num_samplePoints = voltammograms.shape[0] # Number in points per voltammogram
num_sweeps = good_window.shape[0] # Number of sweeps in window
# num_Chems = labels.shape[1] # Number of chemicals/columns in label variable
# initialize training and testing structures
training = recordclass('training', 'sampleSize, index, vgrams, labels, experiment')
testing = recordclass('testing', 'sampleSize, index, vgrams, labels, experiment')
# Partition each experiment
training.sampleSize = trainingSampleSize
testing_sample_size = num_sweeps - trainingSampleSize
testing.sampleSize = testing_sample_size
# training partition
training.index = [None] * num_experiments
training.vgrams = [None] * num_experiments
training.labels = [None] * num_experiments
training.experiment = [None] * num_experiments
# testing partition
testing.index = [None] * num_experiments
testing.vgrams = [None] * num_experiments
testing.labels = [None] * num_experiments
testing.experiment = [None] * num_experiments
# Build training and testing structures
for i in range(num_experiments):
vgrams = pd.DataFrame(voltammograms[:, good_window[:, i], i])
labs = np.array(labels)[i]
pop = range(num_sweeps)
population = list(np.delete(pop, exclude_ix[i]))
sample = rand.sample(population, training.sampleSize)
index = []
for j in population:
index.append(j in sample)
# assign training data
training_index = np.where(index)
training.index[i] = np.array(training_index[0])
training.vgrams[i] = vgrams.loc[:, training_index[0]]
training.labels[i] = np.tile(labs, (len(training_index[0]), 1))
training.experiment[i] = [num_experiments + 1] * trainingSampleSize
# assign testing data
testing_index = np.where(~np.array(index))
testing.index[i] = np.array(testing_index[0])
testing.vgrams[i] = vgrams.loc[:, testing_index[0]]
testing.labels[i] = np.tile(labs, (len(testing_index[0]), 1))
testing.experiment[i] = [num_experiments + 1] * testing_sample_size
return [training, testing]
def flatten_data(training, testing):
"""
Transform voltammogram and label data into proper dimensions for cvglmnet
:param training: structure containing training and testing data
:param testing: structure containing testing data
:return: training: structure with flattened training data
:return: testing: structure with flattened testing data
"""
training.index = np.where(np.hstack(training.index) > -1)[0]
training.vgrams = np.hstack(training.vgrams).transpose()
training.labels = np.vstack(training.labels)
training.experiment = np.vstack(training.experiment)
training.n = np.shape(training.index)
testing.index = np.where(np.hstack(testing.index) > -1)[0]
testing.vgrams = np.hstack(testing.vgrams).transpose()
testing.labels = np.vstack(testing.labels)
testing.experiment = np.vstack(testing.experiment)
testing.n = np.shape(testing.index)
return [training, testing]
def mad_outlier(data, thresh=3):
"""
Use Median Absolute Deviation method, default in MATLAB
:param data: array of voltammetry data
:param thresh: threshold for MAD outlier detection
:return: exclude_index: indices of outliers
"""
MAD = robust.mad(data[np.isfinite(data)])
exclude_index = np.where((data < data.median() - thresh * MAD) | (data > data.median() + thresh * MAD))
return exclude_index
def leave_out_concentration(training, chemIx=0, leave_out_concentration=0):
idx_lo = np.not_equal(np.array(training.labels[:, chemIx], dtype=float),
np.asarray(leave_out_concentration, dtype=float)).nonzero()[0]
training.labels = training.labels[idx_lo][:]
training.vgrams = training.vgrams[idx_lo][:]
return training
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,619 | amnahEltahir/voltammetry | refs/heads/main | /misc/run_calibration_imped.py | import voltammetry
import sys
import matplotlib.pyplot as plt
import numpy as np
import random as rand
plt.style.use('ggplot')
plt.rcParams['axes.facecolor'] = [1, 1, 1]
plt.rcParams['axes.edgecolor'] = 'k'
random_seed = 0
rand.seed(random_seed)
abfpath = sys.argv[1]
vg = voltammetry.Data(abfpath)
labels = voltammetry.Mulabels(abfpath, 'run.csv')
data = voltammetry.PreprocessedData(vg.Voltammogram, labels, trainingSampleSize=125)
# find unique tuples
unique_tuples = np.unique(data.training.labels, axis=0)
numC = len(unique_tuples)
# set up folds
nfolds = int(7)
folds = np.zeros((numC))
cPerF = int(numC / nfolds)
folds[0:77] = np.random.permutation(np.repeat(np.arange(nfolds), cPerF))
foldid = np.zeros((len(data.training.labels), 1), dtype=np.int8)
for i, tup in enumerate(unique_tuples):
idx = np.where((data.training.labels[:,0] == tup[0]) & (data.training.labels[:,1] == tup[1]) & (data.training.labels[:,2] == tup[2]))
foldid[idx] = folds[int(i)]
# find best alpha
bestAlpha = 1.0
# bestAlpha = voltammetry.best_alpha(data.training)
t = vg.sweep_point_count * data.testing.index / vg.samplingRate
cvFit = voltammetry.train_analyte(data.training, alpha=bestAlpha, nfolds=nfolds, foldid=foldid, parallel=8)
predictions = voltammetry.test_analyte(data.testing, cvFit)
for chemIx in range(len(labels.targetAnalyte)):
stats = voltammetry.calcStepStats(chemIx, predictions, data.testing.labels)
calFig = voltammetry.plot_Calibration(t, predictions, data.testing.labels, labels.targetAnalyte, chemIx, stats)
calFig.suptitle(vg.name)
plt.show()
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,620 | amnahEltahir/voltammetry | refs/heads/main | /misc/plot_vgram_ave.py | import voltammetry
import sys
import matplotlib.pyplot as plt
import numpy as np
abfpath = sys.argv[1]
vg = voltammetry.Data(abfpath)
labels = voltammetry.Mulabels(abfpath, 'run.csv')
data = voltammetry.PreprocessedData(vg.Voltammogram, labels, trainingSampleSize=125)
testing_vgram_rms = np.sqrt((data.testing.vgrams ** 2).mean(axis=1))
brk = np.where(np.diff(np.sum(data.testing.labels, axis=1)))[0]
testing_vgram_rms[brk] = np.nan
plt.plot(testing_vgram_rms)
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,621 | amnahEltahir/voltammetry | refs/heads/main | /voltammetry/__init__.py | from __future__ import absolute_import
import sys
from .abfConvert import *
from .LabelData import *
from .preprocessing import *
from .calibrate import *
from .save_output import *
sys.path.append(os.path.join(os.path.dirname(__file__)))
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,622 | amnahEltahir/voltammetry | refs/heads/main | /misc/loocv.py | import voltammetry
import sys
import matplotlib.pyplot as plt
import numpy as np
abfpath = sys.argv[1]
loc = sys.argv[2]
vg = voltammetry.Data(abfpath)
labels = voltammetry.Mulabels(abfpath, 'run.csv')
data = voltammetry.PreprocessedData(vg.Voltammogram, labels, trainingSampleSize=125, window_size=1500)
data.training = voltammetry.leave_out_concentration(data.training, leave_out_concentration=loc)
bestAlpha = voltammetry.best_alpha(data.training)
t = vg.sweep_point_count * data.testing.index / vg.samplingRate
cvFit = voltammetry.train_analyte(data.training, alpha=bestAlpha)
predictions = voltammetry.test_analyte(data.testing, cvFit)
for chemIx in range(len(labels.targetAnalyte)):
loc_idx = int(np.where(labels.labels==loc)[chemIx])
stats = voltammetry.calcStepStats(chemIx, predictions, data.testing.labels)
calFig = voltammetry.plot_Calibration(t, predictions, data.testing.labels, labels.targetAnalyte, chemIx, stats)
calFig.suptitle(''.join(("Train Excluding ", str(loc), " nM")))
plt.show()
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,623 | amnahEltahir/voltammetry | refs/heads/main | /voltammetry/calibrate.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from recordclass import recordclass
import scipy.io
import random as rand
import time
from glmnet_python import cvglmnet, cvglmnetPredict
from joblib import Parallel, delayed
def best_alpha(training, nAlphas=11, family='mgaussian', ptype='mse', nfolds=10, parallel=True, keep=False,
grouped=True, random_seed=0, fnY=lambda x: np.diff(x), foldid=np.empty(0), standardize_resp=True):
"""
Find best alpha based on minimum cross-validated error.
:param training: voltammogram data structure
:param nAlphas: int, number of alphas to test between 0 and 1 (default = 11)
:param family: string, cvglment "family" option (default="mgaussian", mixed gaussian)
:param ptype: string, penalty type
:param nfolds: float, number of cross validation folds (default = 10)
:param parallel: boolean, use multiple cores for training (default = True)
:param keep: oolean, cvglmnet parameter (default = False)
:param grouped: boolean, cvglmnet parameter (default = True)
:param random_seed: integer, specifies random seed (default = 0)
:param fnY: function applied to voltammogram (default = lambda x: np.diff(x), first derivative)
:return: bestAlpha: float, optimum alpha based on cv error
"""
alphaRange = np.linspace(0, 1, nAlphas)
cvFitList = [None] * nAlphas
t = time.time()
for i in range(nAlphas):
alpha = alphaRange[i]
X = fnY(training.vgrams).astype(float)
Y = np.array(training.labels).astype(float)
rand.seed(random_seed)
if not foldid.any():
foldid = scipy.random.choice(nfolds, training.vgrams.shape[0], replace=True)
cvFitList[i] = cvglmnet(x=X, y=Y, family=family, alpha=alpha, ptype=ptype, nfolds=nfolds, foldid=foldid,
parallel=parallel, keep=keep, grouped=grouped)
elapsed = time.time() - t
print('TRAINING COMPLETE', '{:.3f}'.format(elapsed), ' seconds. \n')
# collect mean cross-validated error for each fit
cvm = pd.Series(np.empty(nAlphas))
for i in range(nAlphas):
fit = cvFitList[i]
cvm[i] = fit['cvm'][fit['lambdau'] == fit['lambda_min']]
bestCvm = cvm.min()
bestAlphaIx = cvm.idxmin()
bestAlpha = alphaRange[bestAlphaIx]
print('Best alpha = ', '{:.1f}'.format(bestAlpha), ' (error = ', '{:.2f}'.format(bestCvm), ')\n')
return bestAlpha
def train_analyte(training, family='mgaussian', alpha=1, ptype='mse', nfolds=10, parallel=True, keep=False,
grouped=True, random_seed=0, fnY=lambda x: np.diff(x), foldid=np.empty(0), standardize_resp=True):
"""
Cross validation training to generate elastic net model.
:param training: Voltamogram_data structure with training data
:param family: string, cvglment "family" option (default="mgaussian", mixed gaussian)
:param alpha: float [0,1] for elastic net (default = 1, LASSO)
:param ptype: string, penalty type
:param nfolds: double, number of cross validation folds (default = 10)
:param parallel: boolean, use multiple cores for training (default = True)
:param keep: boolean, cvglmnet parameter (default = False)
:param grouped: boolean, cvglmnet parameter (default = True)
:param random_seed: integer, specifies random seed (default = 0)
:param fnY: function applied to voltammogram (default = lambda x: np.diff(x), first derivative)
:return: cvFit: cvfit object, model based on training data
"""
rand.seed(random_seed)
if not foldid.any():
foldid = scipy.random.choice(nfolds, training.vgrams.shape[0], replace=True)
x = fnY(training.vgrams).astype(float)
y = np.array(training.labels).astype(float)
[r, c] = y.shape # GLMnet has issue with 1 vector labels, add vector of zeros
if c == 1:
y = np.concatenate((y, np.zeros((r, 1))), axis=1)
t = time.time()
cvFit = cvglmnet(x=x, y=y, family=family, alpha=alpha, ptype=ptype, nfolds=nfolds, foldid=foldid, parallel=parallel,
keep=keep, grouped=grouped)
elapsed = time.time() - t
print('TRAINING COMPLETE ', '{:.3f}'.format(elapsed), ' seconds. \n')
return cvFit
def test_analyte(testing, cvFit, fnY=lambda x: np.diff(x), s='lambda_min'):
"""
Test elastic net model
:param testing: Voltammogram data structure with testing data
:param cvFit: cvFit object, fit calculated using training_analyte
:param fnY: function applied to voltammogram (default = lambda x: np.diff(x), first derivative)
:param s: int, select lambda based on MSE (default = 'lambda_min', lambda of minimum MSE)
:return: yy: array, predictions based on cvFit
"""
xx = fnY(testing.vgrams)
yy = cvglmnetPredict(cvFit, xx, s)
# if yy.ndim == 3:
# yy = np.squeeze(yy,axis=2)
return yy
def calcStepStats(chemIx, predictions, labels):
"""
Calculate statistics of model predictions
:param chemIx: int, index of target analyte
:param predictions: ndarray, predictions from testing
:param labels: Data Frame, all test labels
:return: stats: stats structure, statistics calculated on predictions
"""
muList = np.unique(labels[:, chemIx])
muList = muList[~np.isnan(muList)]
nSteps = muList.size
# Initialize stats structure
stats = recordclass('stats', 'labels, prediction_RMSE, prediction_SNR, prediction_SNRE, mean, sd, n, sem')
np.seterr(divide='ignore', invalid='ignore') # SNR and SNRE calculations divide by 0
# initialize variables for calculating stats
signal = np.squeeze(predictions, axis=2)[:, chemIx]
signal_len = signal.shape[0]
truth = np.array(labels[:, chemIx])
noise = np.array(signal - truth)
estimate = np.empty(signal_len)
noiseEst = np.empty(signal_len)
stats.labels = np.empty(nSteps)
stats.prediction_RMSE = np.empty(nSteps)
stats.prediction_SNR = np.empty(nSteps)
stats.prediction_SNRE = np.empty(nSteps)
stats.mean = np.empty(nSteps)
stats.sd = np.empty(nSteps)
stats.n = np.empty(nSteps)
stats.sem = np.empty(nSteps)
stats.fullRMSE = np.nan
stats.fullSNR = np.nan
stats.fullSNRE = np.nan
# Calculate stats for each step
for ix in range(nSteps):
selectIx = np.where(labels[:, chemIx] == muList[ix])[0]
stepSize = len(selectIx)
estimate[selectIx] = np.tile(signal[selectIx].mean(), (stepSize))
noiseEst[selectIx] = signal[selectIx] - estimate[selectIx]
stats.labels[ix] = truth[selectIx[0]]
stats.prediction_RMSE[ix] = np.sqrt(np.square(noise[selectIx]).mean())
stats.prediction_SNR[ix] = calculate_SNR(signal[selectIx], noise[selectIx])
stats.prediction_SNRE[ix] = calculate_SNR(signal[selectIx], noiseEst[selectIx])
stats.mean[ix] = np.mean(signal[selectIx])
stats.sd[ix] = np.std(signal[selectIx])
stats.n[ix] = np.size(signal[selectIx])
stats.sem[ix] = stats.sd[ix] / np.sqrt(stats.n[ix])
# Calculate full data statistics
stats.fullRMSE = np.sqrt((noise ** 2).mean(axis=0))
stats.fullSNR = calculate_SNR(signal, noise)
stats.fullSNRE = calculate_SNR(signal, noiseEst)
np.seterr(divide=None, invalid=None) # revert to warning for division by 0
return stats
def plot_Calibration(time_array, predictions, labels, targetAnalyte, chemIx, stats):
"""
Plot fits with labels, as well as RMSE and SNR for a given analyte.
:param time_array: array, time used in x axis
:param predictions: array, predictions from cvglmnetPredict
:param labels: array, label concentrations
:param targetAnalyte: str array, list of analytes
:param chemIx: int, index of chemical being modeled
:param stats: stats structure, structure of calculated statistics for variable
:return: fig: handle for figure of results
"""
#X = time_array
X = np.arange(len(predictions))
Y = np.array(predictions)/1000
L = np.array(labels)/1000
L[np.where(np.diff(L[:,chemIx]))] = np.nan
chemLabel = targetAnalyte[chemIx]
labColor = 'k'
units = ''
if chemLabel == 'NE':
chemLabel = 'NE'
units = '(nM)'
labColor = '#b4531f'
if (chemLabel == 'Dopamine') | (chemLabel == 'DA'):
chemLabel = 'DA'
units = '(nM)'
labColor = '#1f77b4'
if (chemLabel == 'Serotonin') | (chemLabel == '5HT'):
chemLabel = '5HT'
units = '(nM)'
labColor = '#b49e1f'
if chemLabel == '5HIAA':
chemLabel = '5HIAA'
units = '(nM)'
labColor = '#871fb4'
if chemLabel == 'pH':
chemLabel = 'pH'
units = ''
labColor = '#3ebd30'
fig = plt.figure()
muLabel = ''.join([chemLabel, units])
gs = GridSpec(7, 5)
# Plot Predictions
ax1 = plt.subplot(gs[1:4, 0:5])
hPred = plt.scatter(X, Y[:, chemIx], marker='.', color=labColor)
#plt.title(chemLabel)
#plt.xlabel('Time (s)')
plt.xlabel('sweep')
plt.ylabel(muLabel)
# Plot actual concentrations
# hAct = plt.scatter(X, L[:, chemIx], color='k', marker='.', linewidth=0.5)
hAct, = ax1.plot(X, L[:, chemIx], color='k', linewidth=2.0)
ax1.legend((hPred, hAct), ('prediction', 'known value'))
plt.axis('tight')
# Plot RMSE
ax2 = plt.subplot(gs[5:7, 0:2])
y = stats.prediction_RMSE/1000
x = stats.labels/1000
ax2.scatter(x, y, color=labColor)
ax2.plot(plt.xlim(), [stats.fullRMSE/1000, stats.fullRMSE/1000], linestyle='--', markersize=1,
color=labColor)
plt.title('RMSE')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.xlabel(muLabel)
plt.ylabel(''.join(['RMSE', units]))
plt.grid()
plt.axis('tight')
# Plot SNR
ax3 = plt.subplot(gs[5:7, 3:5])
y = stats.prediction_SNR
x = stats.labels/1000
ax3.scatter(x, y, color=labColor)
ax3.plot(plt.xlim(), [stats.fullSNR, stats.fullSNR], linestyle='--', markersize=1, color=labColor)
plt.title('SNR')
plt.xlabel(muLabel)
plt.ylabel('SNR (dB)')
plt.grid()
return fig, ax1, ax2, ax3
def calculate_SNR(sig, noise):
"""
Calculate SNR in decibels
:param sig: array of signal
:param noise: array of noise
:return: array, SNR calculated as 10*log10(Power_signal-Power_noise/Power_noise)
"""
SNR = np.nan# default SNR value
P_sig = (sig ** 2).sum(axis=0) / len(sig) # sum square signal
P_noise = (noise ** 2).sum(axis=0) / len(noise)
if P_noise == 0:
pass
else:
SNR = np.abs(10 * np.log10(np.abs(P_sig - P_noise) / P_noise))
return SNR
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,624 | amnahEltahir/voltammetry | refs/heads/main | /misc/split_channels.py | import os
import pyabf
import glob
import sys
import h5py
import numpy as np
abf_path = sys.argv[1] # path of directory containing abf data files
replace_str = sys.argv[2] # path of substring to be replaced in file name
ch1 = sys.argv[3] # first channel name
ch2 = sys.argv[4] # second channel name
out_path = os.path.split(abf_path)[0] # name of output path
abf_name = os.path.basename(abf_path) # name of experiment file name
channels = [ch1, ch2] # make list of channel names
abf_glob = sorted(glob.glob(abf_path + "/*.abf")) # collection of files in directory
num_files = len(abf_glob) # number of abf files in directory
abf_0 = pyabf.ABF(abf_glob[0])
sweep_count = abf_0.sweepCount # Number of sweeps (max 10000)
sweep_point_count = abf_0.sweepPointCount # Number of points in sweep (97 Hz = 1032)
sampling_rate = abf_0.dataRate
channel_count = abf_0.channelCount
# Check to make sure that directory name has substring to be replaced
if replace_str not in abf_name:
print('Unable to rename files. Check if <' + replace_str + '>is in the directory name.')
exit(3)
# Check if the data only has one channel
if channel_count == 2:
print('Only one channel in file. Nothing to do.')
exit(2)
# Split channels
elif channel_count == 4:
print('Splitting channels.')
for i in range(2):
# Make output directories, making sure they don't already exist
ch_name = abf_name.replace(replace_str, channels[i])
ch_dir = os.path.join(out_path, ch_name)
if not os.path.isdir(ch_dir):
os.makedirs(ch_dir)
else:
if os.listdir(ch_dir):
print(ch_dir + ' Directory not empty. Channels already split.')
exit(1)
stTime = np.empty(num_files)
for j in range(num_files):
abf = pyabf.ABF(abf_glob[j])
for i in range(2):
ch_name = abf_name.replace(replace_str, channels[i])
ch_dir = os.path.join(out_path, ch_name)
Voltammogram = np.empty((sweep_point_count, sweep_count))
CMD = np.empty((sweep_point_count, sweep_count))
abf_file_name = os.path.splitext(os.path.basename(abf.abfFilePath).replace(replace_str,channels[i]))[0]
stTime = abf._headerV2.uFileStartTimeMS
Voltammogram[:, :] = np.asarray(np.reshape(abf.data[i*2, :], (sweep_point_count, -1), order='F'))
CMD[:, :] = np.asarray(np.reshape(abf.data[i*2+1, :], (sweep_point_count, -1), order='F'))
print(os.path.join(ch_dir, abf_file_name + '.h5'))
with h5py.File(os.path.join(ch_dir, abf_file_name + '.h5'), 'w') as f:
dset_vgram = f.create_dataset("Voltammogram", data=Voltammogram)
dset_cmd = f.create_dataset("CMD", data=CMD)
f.attrs["stTimeMS"] = stTime
f.attrs['samplingRate'] = abf.dataRate
f.attrs['sweepCount'] = abf.sweepCount
f.attrs['sweepPointCount'] = abf.sweepPointCount
f.attrs['expName'] = abf_name
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,625 | amnahEltahir/voltammetry | refs/heads/main | /misc/run_calibration_10percent.py | import voltammetry
import sys
import matplotlib.pyplot as plt
import numpy as np
import h5py
plt.style.use('ggplot')
plt.rcParams['axes.facecolor']=[1,1,1]
plt.rcParams['axes.edgecolor']='k'
abfpath = sys.argv[1]
vg = voltammetry.Data(abfpath)
labels = voltammetry.Mulabels(abfpath, 'run.csv')
idx = np.loadtxt('/Users/amnah/Desktop/wetlab_data/DIssertation_Datasets/misc/10p_sweep_idx.txt',dtype=np.uint16)
vg.Voltammogram = vg.Voltammogram[idx]
vg.CMD = vg.CMD[idx]
data = voltammetry.PreprocessedData(vg.Voltammogram, labels,window_size=425,trainingSampleSize=125,corr_over=True)
# only dopamine
bestAlpha = voltammetry.best_alpha(data.training)
#bestAlpha = 1.0
t = vg.sweep_point_count * data.testing.index / vg.samplingRate
cvFit = voltammetry.train_analyte(data.training, alpha=bestAlpha,parallel=8)#,fnY=lambda x: np.abs(np.fft.rfft(x)))
predictions = voltammetry.test_analyte(data.testing, cvFit)#,fnY=lambda x: np.abs(np.fft.rfft(x)))
with h5py.File(abfpath.split()[0]+'10percent_DA_results.h5','w') as f:
f.create_group('raw')
f.create_dataset('raw/labels',data=labels.labels)
f.create_dataset('raw/vgrams',data=vg.Voltammogram)
f.create_dataset('raw/CMD',data=vg.CMD)
f.create_dataset('raw/idx',data=idx)
f.attrs['targetAnalyte'] = np.string_(labels.targetAnalyte)
f.create_group('results')
f.create_dataset('results/predictions',data=predictions)
f.create_dataset('results/actual',data=data.testing.labels)
# for chemIx in range(len(labels.targetAnalyte)):
# stats = voltammetry.calcStepStats(chemIx, predictions, data.testing.labels)
# calFig = voltammetry.plot_Calibration(t, predictions, data.testing.labels, labels.targetAnalyte, chemIx, stats)
# calFig.suptitle(vg.name)
# plt.show()
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,626 | amnahEltahir/voltammetry | refs/heads/main | /misc/run_calibration.py | import voltammetry
import sys
import matplotlib.pyplot as plt
import numpy as np
import h5py
plt.style.use('ggplot')
plt.rcParams['axes.facecolor']=[1,1,1]
plt.rcParams['axes.edgecolor']='k'
abfpath = sys.argv[1]
vg = voltammetry.Data(abfpath)
labels = voltammetry.Mulabels(abfpath, 'run.csv')
data = voltammetry.PreprocessedData(vg.Voltammogram[16:1016], labels,window_size=425,trainingSampleSize=125,corr_over=True)
# idx = np.where(data.training.labels[:,1] ==0)
# data.training.labels = data.training.labels[idx]
# data.training.vgrams = data.training.vgrams[idx]
# idx_test = np.where(data.testing.labels[:,1]==0)
# data.testing.labels = data.testing.labels[idx_test]
# data.testing.vgrams = data.testing.vgrams[idx_test]
#bestAlpha = voltammetry.best_alpha(data.training)
bestAlpha = 1.0
t = vg.sweep_point_count * data.testing.index / vg.samplingRate
cvFit = voltammetry.train_analyte(data.training, alpha=bestAlpha,parallel=8)
predictions = voltammetry.test_analyte(data.testing, cvFit)
with h5py.File(abfpath.split()[0]+'_DA_pH_results.h5','w') as f:
f.create_group('training')
f.create_dataset('training/vgrams', data=data.training.vgrams)
f.create_dataset('training/labels', data=data.training.labels)
f.create_group('testing')
f.create_dataset('testing/vgrams', data=data.testing.vgrams)
f.create_dataset('testing/labels', data=data.testing.labels)
f.attrs['targetAnalyte'] = np.string_(labels.targetAnalyte)
f.create_group('results')
f.create_dataset('results/predictions', data=predictions)
# for chemIx in range(len(labels.targetAnalyte)):
# stats = voltammetry.calcStepStats(chemIx, predictions, data.testing.labels)
# calFig = voltammetry.plot_Calibration(t, predictions, data.testing.labels, labels.targetAnalyte, chemIx, stats)
# calFig.suptitle(vg.name)
# plt.show()
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,627 | amnahEltahir/voltammetry | refs/heads/main | /voltammetry/LabelData.py | import pandas as pd
import numpy as np
class Mulabels:
"""
Concentration labels according to their order in the sequence
"""
def __init__(self, data_dir, label_file_name):
"""
Initialize MuLabels class
:param data_dir: string, directory containing label file
:param label_file_name: string, name of label file
"""
"""Object read from CSV containing data frame of sequence labels"""
self.data = pd.read_csv(''.join((data_dir, '/', label_file_name)))
self.chems = list(self.data.columns.intersection(['DA', '5HT', '5HIAA', 'NE', 'pH']))
self.labels = self.data[self.chems]
target_analyte = []
chem_ix = []
for col in list(self.chems):
n_unique = len(np.unique(self.labels[col]))
if n_unique > 1:
target_analyte.append(col)
chem_ix.append(self.labels.columns.get_loc(col))
self.targetAnalyte = target_analyte
self.chemIx = chem_ix
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,628 | amnahEltahir/voltammetry | refs/heads/main | /misc/split_directories_2.py | import os
import sys
import glob
from shutil import copyfile
# Check that this is the right type of directory to split
path = sys.argv[1]
if ("YYY" not in path) | (not os.path.isdir(path)):
print("Not a valid directory!!!")
exit(1)
# Get relevant information about the path for making directories
data_parent = os.path.dirname(path)
data_base = os.path.basename(path)
[prefix, suffix] = data_base.split('YYY', 1)
uncorrelated_name = prefix + 'uncorrelated_100k_25' + suffix
uncorrelated_name_50 = prefix + 'uncorrelated_100k_10' + suffix
fastRB_name = prefix + 'uncorrelated_1kHz_500k' + suffix
chem001_name = prefix + 'two_pulse_1kHz_500k' + suffix
uncorrelated_out = data_parent + '/' + uncorrelated_name
uncorrelated_50_out = data_parent + '/' + uncorrelated_name_50
fastRB_out = data_parent + '/' + fastRB_name
chem001_out = data_parent + '/' + chem001_name
if os.path.isdir(uncorrelated_out) | os.path.isdir(uncorrelated_50_out):
print('This directory has already been split. \n')
print('Nothing to do.\n')
exit(2)
else:
os.makedirs(uncorrelated_out)
os.makedirs(uncorrelated_50_out)
os.makedirs(fastRB_out)
os.makedirs(chem001_out)
# Rename files and copy to appropriate directories
file_list = sorted(glob.glob(path + '/*.abf'))
for file in file_list:
name = os.path.splitext(os.path.basename(file))[0]
seq_num = int(name[-4::])
if seq_num % 4 == 0:
out_path = uncorrelated_out
new_num = int(seq_num / 4)
new_str = uncorrelated_name + "_{0:0>4}".format(new_num) + '.abf'
copyfile(file, uncorrelated_out + '/' + new_str)
continue
if seq_num % 2 == 0:
out_path = fastRB_out
new_num = int(seq_num/4)
new_str = fastRB_name + "_{0:0>4}".format(new_num) + '.abf'
copyfile(file, fastRB_out + '/' + new_str)
continue
if (seq_num - 1) % 4 == 0:
out_path = uncorrelated_50_out
new_num = int((seq_num-1) / 4)
new_str = uncorrelated_name_50 + "_{0:0>4}".format(new_num) + '.abf'
copyfile(file, uncorrelated_50_out + '/' + new_str)
continue
else:
out_path = chem001_out
new_num = int(((seq_num+1)/4) - 1)
new_str = chem001_name + "_{0:0>4}".format(new_num) + '.abf'
copyfile(file, chem001_out + '/' + new_str)
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,629 | amnahEltahir/voltammetry | refs/heads/main | /misc/split_directories.py | import os
import sys
import glob
from shutil import copyfile
# Check that this is the right type of directory to split
path = sys.argv[1]
replace_str = sys.argv[2] # path of substring to be replaced in file name
f1 = sys.argv[3] # first channel name
f2 = sys.argv[4] # second channel name
if (replace_str not in path) | (not os.path.isdir(path)):
print("Not a valid directory!!!")
exit(1)
# Get relevant information about the path for making directories
data_parent = os.path.dirname(path)
data_base = os.path.basename(path)
[prefix, suffix] = data_base.split(replace_str, 1)
triangle_name = prefix + f1 + suffix
uncorrelated_name = prefix + f2 + suffix
triangle_out = data_parent + '/' + triangle_name
uncorrelated_out = data_parent + '/' + uncorrelated_name
if os.path.isdir(triangle_out) | os.path.isdir(uncorrelated_out):
print('This directory has already been split. \n')
print('Nothing to do.\n')
exit(2)
else:
os.makedirs(triangle_out)
os.makedirs(uncorrelated_out)
# Rename files and copy to appropriate directories
file_list = sorted(glob.glob(path + '/*.abf'))
for file in file_list:
name = os.path.splitext(os.path.basename(file))[0]
seq_num = int(name[-4::])
if seq_num % 2 == 0:
out_path = triangle_out
new_num = int(seq_num/2)
new_str = triangle_name + "_{0:0>4}".format(new_num) + '.abf'
copyfile(file, triangle_out + '/' + new_str)
else:
out_path = uncorrelated_out
new_num = int((seq_num-1)/2)
new_str = uncorrelated_name + "_{0:0>4}".format(new_num) + '.abf'
copyfile(file, uncorrelated_out + '/' + new_str)
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,630 | amnahEltahir/voltammetry | refs/heads/main | /misc/calc_impedance.py | import os
import voltammetry
import sys
import numpy as np
abfpath = sys.argv[1]
abfname = os.path.basename(os.path.split(abfpath)[0])
if os.path.isdir(abfpath):
pass
else:
print('Not a valid path!!!')
exit(1)
vg = voltammetry.Data(abfpath)
len_measure = 1500
sweep_start = int(np.ceil(vg.sweep_point_count / 2))
sweep_stop = sweep_start + len_measure
V = vg.CMD[sweep_start:sweep_stop, 2, 0:5]
I = vg.Voltammogram[sweep_start:sweep_stop, 2, 0:5]
impedance = np.zeros(5)
for j in range(5):
V_rms = np.sqrt(np.mean(V[:, j] ** 2))
I_rms = np.sqrt(np.mean(I[:, j] ** 2))
impedance[j] = V_rms/I_rms
print(abfname, ' '.join(map("{:.3f}".format, impedance)))
exit(0)
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,631 | amnahEltahir/voltammetry | refs/heads/main | /misc/select_idx.py | import voltammetry
import sys
import numpy as np
import matplotlib.pyplot as plt
import h5py
abfpath = sys.argv[1]
vg = voltammetry.Data(abfpath)
labels = voltammetry.Mulabels(abfpath, 'run.csv')
idx =np.where((labels.labels['5HT']==0) | (labels.labels['DA'] == 0))
labels.labels = labels.labels.iloc[idx]
data = voltammetry.PreprocessedData(np.squeeze(vg.Voltammogram[:,:,idx]), labels,trainingSampleSize=125)
bestAlpha = 1
t = vg.sweep_point_count * data.testing.index / vg.samplingRate
cvFit = voltammetry.train_analyte(data.training, alpha=bestAlpha)
predictions = voltammetry.test_analyte(data.testing, cvFit)
"""
with h5py.File(vg.name+"_edge.h5",'w') as f:
data1 = f.create_dataset('predictions', data=predictions)
data2 = f.create_dataset('labels', data=data.testing.labels)
for chemIx in range(len(labels.targetAnalyte)):
stats = voltammetry.calcStepStats(chemIx, predictions, data.testing.labels)
calFig = voltammetry.plot_Calibration(t, predictions, data.testing.labels, labels.targetAnalyte, chemIx, stats)
calFig.suptitle(vg.name)
plt.show()
"""
stats = voltammetry.calcStepStats(0, predictions, data.testing.labels)
calFig = voltammetry.plot_Calibration(t, predictions, data.testing.labels, labels.targetAnalyte, 0, stats)
calFig.suptitle(vg.name)
plt.show() | {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,632 | amnahEltahir/voltammetry | refs/heads/main | /voltammetry/abfConvert.py | import os
import glob
import pyabf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cmx
import h5py
import matplotlib as mpl
import shutil
from numpy.core.multiarray import ndarray
import matplotlib.colors as colors
class Data:
"""
Experimental data taken from directory of abf files.
"""
def __init__(self, data_path):
"""
Define class and class functions
:param data_path: path containing abf files
"""
[self.Voltammogram, self.CMD, self.stTime, self.name, self.samplingRate, self.sweep_count,
self.sweep_point_count] = loadData(data_path)
def _plotVoltammograms(self, CMAP=cmx.jet, fnY=lambda x: x):
"""
Plot raw voltammogram data.
:param CMAP: color map defined in matplotlib.cm (default = jet)
:param fnY: function applied to voltammograms (defualt = lambda x:x)
:return: fig: figure object
"""
fig = plt.figure()
N = np.shape(self.Voltammogram)[2] # Number of experiments run
vgram_colors = CMAP(np.linspace(0, 1, N))
for i in range(1, N+1):
vgrams = np.asarray(self.Voltammogram[:, :, i-1])
Rn = vgrams.shape[1]
mid = int(np.ceil(Rn / 2))
y = np.transpose(fnY(vgrams[:, mid]))
x = 1000 * np.array(range(self.sweep_point_count))/self.samplingRate
plt.plot(x[0:len(y)], y, color=vgram_colors[i-1])
plt.title(self.name)
plt.xlabel('Time (ms)')
plt.ylabel('current (nA)')
plt.axis('tight')
norm = colors.Normalize(vmin=1, vmax=N)
sm = cmx.ScalarMappable(cmap=CMAP, norm=norm)
sm.set_array([])
bounds = np.arange(0.5,N+1.5)
cbar = plt.colorbar(sm, ticks=np.linspace(0, N+1, N+2), label='experiment #', boundaries=bounds)
cbar.set_ticklabels(range(1,N+1))
return fig
# noinspection PyProtectedMember
def abf2H5(abf_path):
"""
Convert ABF to H5
:param abf_path: string, abf directory path
:return: Voltammogram: list, Voltammetry data from directory
:return: CMD: list, forcing function from abf data
:return: stTime: array, start time of each file
:return: abf_name: string with base name of directory
:return: sampling_rate: integer, number of samples per second
"""
abf_name = os.path.basename(abf_path)
if abf_name == '':
abf_name = os.path.basename(os.path.split(abf_path)[0])
out_dir = os.path.split(abf_path)[0]
# Combine data from abf files in given path
abf_glob = sorted(glob.glob(abf_path + "/*.abf")) # collection of files in directory
num_files = len(abf_glob) # number of abf files in directory
abf_0 = pyabf.ABF(abf_glob[0])
sweep_count = abf_0.sweepCount # Number of sweeps (max 10000)
sweep_point_count = abf_0.sweepPointCount # Number of points in sweep (97 Hz = 1032)
Voltammogram = np.empty((sweep_point_count, sweep_count))
CMD = np.empty((sweep_point_count, sweep_count))
for i in range(num_files):
abf = pyabf.ABF(abf_glob[i])
abf_file_name = os.path.splitext(os.path.basename(abf.abfFilePath))[0]
stTime = abf._headerV2.uFileStartTimeMS
Voltammogram[:, :] = np.asarray(np.reshape(abf.data[0, :], (sweep_point_count, -1), order='F'))
CMD[:, :] = np.asarray(np.reshape(abf.data[1, :], (sweep_point_count, -1), order='F'))
with h5py.File(os.path.join(out_dir, abf_file_name + '.h5'), 'w') as f:
dset_vgram = f.create_dataset("Voltammogram", data=Voltammogram)
dset_cmd = f.create_dataset("CMD", data=CMD)
f.attrs["stTimeMS"] = stTime
f.attrs['samplingRate'] = abf.dataRate
f.attrs['sweepCount'] = abf.sweepCount
f.attrs['sweepPointCount'] = abf.sweepPointCount
f.attrs['expName'] = abf_name
def loadData(h5_path):
"""
:param h5_path: string, h5 data directory path
:return: Voltammogram: list, Voltammetry data from directory
:return: CMD: list, forcing function from abf data
:return: stTime: array, start time of each file
:return: abf_name: string with base name of directory
:return: sampling_rate: integer, number of samples per second
"""
h5_name = os.path.basename(h5_path)
# combine data from hdf5 files in given path
h5_glob = sorted(glob.glob(h5_path + "/*.h5")) # collection of files in directory
if not h5_glob:
print('No h5 files found.')
exit(1)
else:
num_files = len(h5_glob) # number of files in directory
h5_0 = h5py.File(h5_glob[0], 'r')
sweep_count = h5_0.attrs['sweepCount']
sweep_point_count = h5_0.attrs['sweepPointCount']
sampling_rate = h5_0.attrs['samplingRate']
Voltammogram = np.empty((sweep_point_count, sweep_count, num_files))
CMD = np.empty((sweep_point_count, sweep_count, num_files))
stTime = np.empty(num_files)
for i in range(num_files):
h5 = h5py.File(h5_glob[i], 'r')
stTime = h5.attrs['stTimeMS']
Voltammogram[:, :, i] = h5['Voltammogram']
CMD[:, :, i] = h5['CMD']
return[Voltammogram, CMD, stTime, h5_name, sampling_rate, sweep_count, sweep_point_count]
# def save_ABF(abf_path, overwrite=False):
# """
# Save abf files to csv directory
# :param abf_path: string, Path of csv output files Files
# :param overwrite: boolean, replace files saved in output directory
# :return:
# """
# outDir = ''.join((abf_path, '/OUT'))
#
# # Options for saving files
# if not os.path.isdir(outDir):
#
# print('Saving data to ', outDir)
# print('...')
# os.makedirs(outDir)
# _writeCSV(abf_path, outDir)
# print('Done.')
# else:
# if overwrite:
#
# print('Removing old files...')
# shutil.rmtree(outDir)
# os.makedirs(outDir)
# print('Saving data to ', outDir)
# print('...')
# _writeCSV(abf_path, outDir)
# print('Done.')
# else:
# print('Files already exist -- Not overwriting')
#
# # TODO: Parse substring for saving model
# def parse_file_name(abfpath):
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,633 | amnahEltahir/voltammetry | refs/heads/main | /misc/plot_total_results.py | import h5py
import matplotlib.pyplot as plt
import numpy as np
import sys
from matplotlib.gridspec import GridSpec
import glob
plt.style.use('ggplot')
plt.rcParams.update({'font.size': 24})
concentrations = np.arange(0,2750,50)
results_glob = sorted(glob.glob("2019*.h5"))
num_datafiles=len(results_glob)
SNR=[]
RMSE=[]
for i in range(3):
for j in range(len(results_glob)):
data = h5py.File(results_glob[j])
SNR.append(data.attrs['full_SNR'][i])
RMSE.append(data.attrs['full_RMSE'][i])
plt.figure()
plt.scatter(np.arange(0,20),SNR[0:20],s=50,color='#1f77b4')
plt.scatter(np.arange(0,20),SNR[20:40],s=50,color='#b49e1f')
plt.scatter(np.arange(0,20),SNR[40:60],s=50,color='#3ebd30')
plt.title("Prediction SNR")
plt.xticks(np.arange(0,20),('A_1','A_2','B_1','B_2','C_1','C_2','D_1','D_2','E_1','E_2','F_1','F_2','G_1',
'G_2','H_1','H_2','I_1','I_2','J_1','J_2'),rotation=45)
plt.legend(('DA','5HT','pH'))
plt.ylabel('SNR (dB)')
plt.figure()
plt.scatter(np.arange(0,20),RMSE[0:20],s=50,color='#1f77b4')
plt.scatter(np.arange(0,20),RMSE[20:40],s=50,color='#b49e1f')
plt.scatter(np.arange(0,20),RMSE[40:60],s=50,color='#3ebd30')
plt.title("Prediction RMSE")
plt.xticks(np.arange(0,20),('A_1','A_2','B_1','B_2','C_1','C_2','D_1','D_2','E_1','E_2','F_1','F_2','G_1',
'G_2','H_1','H_2','I_1','I_2','J_1','J_2'),rotation=45)
plt.legend(('DA','5HT','pH'))
plt.ylabel('RMSE (nM)')
plt.axis('tight')
plt.show()
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,634 | amnahEltahir/voltammetry | refs/heads/main | /voltammetry/save_output.py | import pickle
import os
from collections import Iterable
import numpy as np
from voltammetry import calcStepStats
import scipy.io as sio
def save_model(prefix, cvFit, predictions, data, labels, out_dir="OUT"):
"""
:param prefix: str - prefix of hdr, mat and pkl files generated for model
:param cvFit: fit - cross validation fit from cvglmnet
:param predictions: array of predictions generated from cvFit
:param data: Data - object containing with training/testing split
:param labels: MuLabels - object containing concentration label info
:param out_dir: str - location to output model info (default "OUT/")
:return:
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
else:
pass
pkl_filename = os.path.join(out_dir, prefix + ".pkl")
with open(pkl_filename, 'wb') as file:
pickle.dump(cvFit, file)
mat_filename = os.path.join(out_dir, prefix + ".mat")
sio.savemat(mat_filename, mdict={'cvFit': cvFit})
hdr_filename = os.path.join(out_dir, prefix + '.hdr')
with open(hdr_filename, "w") as header_file:
header_file.write(prefix)
header_file.write('\n' + "training sample size: " + str(data.training.sampleSize))
header_file.write('\n' + "testing sample size: " + str(data.testing.sampleSize))
attributes = ["labels", "prediction_RMSE", "prediction_SNR", "prediction_SNRE", "mean", "sd", "n", "sem",
"fullRMSE", "fullSNR", "fullSNRE"]
for chemIx in range(len(labels.targetAnalyte)):
header_file.write('\n---\n' + labels.targetAnalyte[chemIx] + ':')
stats = calcStepStats(chemIx, predictions, data.testing.labels)
for attr in attributes:
if isinstance(stats.__dict__.get(attr), Iterable):
attr_str = ''
for mem in stats.__dict__.get(attr):
attr_str += str(mem) + '\t'
else:
attr_str = np.array2string(stats.__dict__.get(attr))
header_file.write('\n' + attr + '\t')
header_file.write(attr_str)
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,635 | amnahEltahir/voltammetry | refs/heads/main | /misc/save_results_for_figs.py | import voltammetry
import sys
import h5py
import os
import glmnet_python
from glmnet_python import cvglmnetCoef
import numpy as np
abfpath = sys.argv[1]
abf_name = os.path.basename(abfpath)
out_dir = os.path.split(abfpath)[0]
vg = voltammetry.Data(abfpath)
labels = voltammetry.Mulabels(abfpath, 'run.csv')
#idx =np.where((labels.labels['NE']==0) | (labels.labels['DA'] == 0))
#labels.labels = labels.labels.iloc[idx]
#data = voltammetry.PreprocessedData(np.squeeze(vg.Voltammogram[:,:,idx]), labels,trainingSampleSize=125)
data = voltammetry.PreprocessedData(vg.Voltammogram, labels,trainingSampleSize=125)
bestAlpha = 1.0
t = vg.sweep_point_count * data.testing.index / vg.samplingRate
cvFit = voltammetry.train_analyte(data.training, alpha=bestAlpha)
predictions = voltammetry.test_analyte(data.testing, cvFit)
SNR = np.empty(0)
RMSE = np.empty(0)
fullSNR = np.empty(0)
fullRMSE = np.empty(0)
for chemIx in range(len(labels.targetAnalyte)):
stats = voltammetry.calcStepStats(chemIx, predictions, data.testing.labels)
coefs = cvglmnetCoef(cvFit, 'lambda_min')
SNR = np.concatenate((SNR,stats.prediction_SNR))
RMSE = np.concatenate((RMSE, stats.prediction_RMSE))
fullSNR = np.hstack((fullSNR,stats.fullSNR))
fullRMSE = np.hstack((fullRMSE, stats.fullRMSE))
#with h5py.File(abf_name + "_a_" + str(int(100*bestAlpha)) + '_' + '_edge.h5', 'w') as f:
with h5py.File(abf_name + "_a_" + str(int(100 * bestAlpha)) + '.h5', 'w') as f:
f.create_dataset("trainingV", data=data.training.vgrams)
f.create_dataset("testingV", data=data.testing.vgrams)
f.attrs["coefs"] = coefs
f.attrs['targetAnalyte'] = np.string_(labels.targetAnalyte)
f.create_dataset("predictions", data=predictions)
f.create_dataset("actual", data=data.testing.labels)
f.create_dataset("trainingLabels", data=data.training.labels)
f.attrs['SNR'] = np.array(SNR)
f.attrs['RMSE'] = np.array(RMSE)
f.attrs['full_SNR'] = np.array(fullSNR)
f.attrs['full_RMSE'] = np.array(fullRMSE)
f.attrs['CMD'] = vg.CMD[:, 0, 0]
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,636 | amnahEltahir/voltammetry | refs/heads/main | /misc/mk1Dresults.py | import h5py
import matplotlib.pyplot as plt
import numpy as np
import sys
from matplotlib.gridspec import GridSpec
plt.style.use('ggplot')
data_file = sys.argv[1]
data = h5py.File(data_file)
num_samples = data['predictions'].shape[0]
X = np.arange(num_samples)
Y = data['predictions']
L = np.array(data['actual'])
targetAnalyte = data.attrs['targetAnalyte']
f_SNR = data.attrs['SNR']
f_RMSE = data.attrs['RMSE']
f_fullSNR = data.attrs['full_SNR']
f_fullRMSE = data.attrs['full_RMSE']
chemLabel = []
chem_indeces = []
SNR = []
RMSE = []
fullSNR = []
fullRMSE = []
ph_index = []
for i in range(len(targetAnalyte)):
if targetAnalyte[i].decode('UTF-8') != 'pH':
chemLabel.append(targetAnalyte[i].decode('UTF-8'))
chem_indeces.append(i)
else:
ph_index = i
nSteps = len(np.unique(L[:,i]))
SNR.append(f_SNR[np.arange(nSteps)])
RMSE.append(f_RMSE[np.arange(nSteps)])
fullSNR.append(f_fullSNR[i])
fullRMSE.append(f_fullRMSE[i])
f_SNR = np.delete(f_SNR,np.arange(nSteps))
f_RMSE = np.delete(f_RMSE,np.arange(nSteps))
num_analytes = len(chemLabel)
Figure = plt.figure()
# Plot Predictions
def analyte_specs(chemL):
units = ''
labColor ='y'
if chemL == 'NE':
chemL = 'NE'
units = '(nM)'
labColor = '#b4531f'
if (chemL == 'Dopamine') | (chemL == 'DA'):
chemL = 'DA'
units = '(nM)'
labColor = '#1f77b4'
if (chemL == 'Serotonin') | (chemL == '5HT'):
chemL = '5HT'
units = '(nM)'
labColor = '#b49e1f'
if chemL == '5HIAA':
chemL = '5HIAA'
units = '(nM)'
labColor = '#871fb4'
if chemL == 'pH':
chemL = 'pH'
units = ''
labColor = '#3ebd30'
return(chemL,units,labColor)
## Plot results
if ph_index:
gs = GridSpec(9, 5)
ax_ph = plt.subplot(gs[8:9, :])
chemL = 'pH'
[chemID,units,labColor] = analyte_specs(chemL)
muLabel = ''.join([chemID, units])
plt.scatter(X, Y[:, ph_index], marker='.', color=labColor,alpha=0.5)
L[np.where(np.diff(L[:, ph_index]))] = np.nan
plt.plot(X, L[:, ph_index], color='k', linewidth=1.0)
plt.xlabel('Sweep #')
plt.ylabel(''.join(['pH']))
plt.axis('tight')
plt.title('pH Predictions')
else:
gs = GridSpec(7,5)
# Plot Predictions
ax1 = plt.subplot(gs[1:4, :])
for chemIx in chem_indeces:
chemL = chemLabel[chemIx]
[chemID,units,labColor] = analyte_specs(chemL)
muLabel = ''.join([chemID, units])
plt.scatter(X, Y[:, chemIx], marker='.', color=labColor,alpha=0.4)
L[np.where(np.diff(L[:, chemIx]))] = np.nan
plt.plot(X, L[:, chemIx], color='k', linewidth=3.0)
plt.fill_between(X,np.squeeze(Y[:,chemIx]),facecolor=labColor,alpha=0.2)
plt.xlabel('Sweep #')
plt.ylabel(''.join(['Concentration (nM)']))
plt.axis('tight')
plt.title('Neurotransmitter Predictions')
## RMSE subplot
ax2 = plt.subplot(gs[5:7, 0:2])
for chemIx in chem_indeces:
chemL = chemLabel[chemIx]
[chemID,units,labColor] = analyte_specs(chemL)
y = RMSE[chemIx]
x = np.unique(L[:,chemIx])
xx = x[~np.isnan(x)]
ax2.scatter(xx, y, color=labColor)
ax2.plot(plt.xlim(), [fullRMSE[chemIx], fullRMSE[chemIx]], linestyle='--', markersize=1,
color=labColor)
plt.title('RMSE')
plt.ylabel(''.join(['RMSE (nM)']))
plt.xlabel(''.join(['concentration (nM)']))
plt.axis('tight')
# Plot SNR
ax3 = plt.subplot(gs[5:7, 3:5])
for chemIx in chem_indeces:
chemL = chemLabel[chemIx]
[chemID,units,labColor] = analyte_specs(chemL)
y = SNR[chemIx]
x = np.unique(L[:,chemIx])
xx = x[~np.isnan(x)]
ax3.scatter(xx, y, color=labColor)
ax3.plot(plt.xlim(), [fullSNR[chemIx], fullSNR[chemIx]], linestyle='--', markersize=1,
color=labColor)
plt.title('SNR')
plt.xlabel(muLabel)
plt.ylabel('SNR (dB)')
plt.axis('tight')
plt.xlabel('concentration (nM)')
plt.ylabel('SNR (dB)')
plt.axis('tight')
plt.show()
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,637 | amnahEltahir/voltammetry | refs/heads/main | /voltammetry/setup.py | from setuptools import setup
setup(name="voltammetry",
version="0.0.1",
author="AmnahEltahir"
) | {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,638 | amnahEltahir/voltammetry | refs/heads/main | /misc/split_directories_3.py | import os
import sys
import glob
from shutil import copyfile
# Check that this is the right type of directory to split
path = sys.argv[1]
if ("YYY" not in path) | (not os.path.isdir(path)):
print("Not a valid directory!!!")
exit(1)
# Get relevant information about the path for making directories
data_parent = os.path.dirname(path)
data_base = os.path.basename(path)
[prefix, suffix] = data_base.split('YYY', 1)
r00 = prefix + 'FSCV_10Hz_100k' + suffix
r01 = prefix + 'uncorrelated_97Hz_100k' + suffix
r02 = prefix + 'uncorrelated_97Hz_100k_25' + suffix
r00_out = data_parent + '/' + r00
r01_out = data_parent + '/' + r01
r02_out = data_parent + '/' + r02
if os.path.isdir(r00_out) | os.path.isdir(r01_out):
print('This directory has already been split. \n')
print('Nothing to do.\n')
exit(2)
else:
os.makedirs(r00_out)
os.makedirs(r01_out)
os.makedirs(r02_out)
# Rename files and copy to appropriate directories
file_list = sorted(glob.glob(path + '/*.abf'))
for file in file_list:
name = os.path.splitext(os.path.basename(file))[0]
seq_num = int(name[-4::])
if seq_num % 3 == 0:
out_path = r00_out
new_num = int(seq_num / 3)
new_str = r00 + "_{0:0>4}".format(new_num) + '.abf'
copyfile(file, r00_out + '/' + new_str)
print(name + '\t' + 'r00' + '\n')
continue
if (seq_num - 1) % 3 == 0:
out_path = r01_out
new_num = int((seq_num-1) / 3)
new_str = r01 + "_{0:0>4}".format(new_num) + '.abf'
copyfile(file, r01_out + '/' + new_str)
print(name + '\t' + 'r01' + '\n')
continue
else:
out_path = r02_out
new_num = int(((seq_num+1)/3) - 1)
new_str = r02 + "_{0:0>4}".format(new_num) + '.abf'
copyfile(file, r02_out + '/' + new_str)
print(name + '\t' + 'r02' + '\n')
| {"/misc/run_calibration_imped.py": ["/voltammetry/__init__.py"], "/misc/plot_vgram_ave.py": ["/voltammetry/__init__.py"], "/voltammetry/__init__.py": ["/voltammetry/abfConvert.py", "/voltammetry/LabelData.py", "/voltammetry/preprocessing.py", "/voltammetry/calibrate.py", "/voltammetry/save_output.py"], "/misc/loocv.py": ["/voltammetry/__init__.py"], "/misc/run_calibration_10percent.py": ["/voltammetry/__init__.py"], "/misc/run_calibration.py": ["/voltammetry/__init__.py"], "/misc/calc_impedance.py": ["/voltammetry/__init__.py"], "/misc/select_idx.py": ["/voltammetry/__init__.py"], "/voltammetry/save_output.py": ["/voltammetry/__init__.py"], "/misc/save_results_for_figs.py": ["/voltammetry/__init__.py"]} |
65,639 | carlsunderman/graph-builder | refs/heads/master | /main.py | import json
import random
import os
import networkx as nx
from io import BytesIO
import base64
import matplotlib.pyplot as plt
from flask import Flask, request, render_template, redirect, url_for, session
import graph as g
app = Flask(__name__)
app.secret_key = os.urandom(24)
def getSessionGraph():
return nx.adjacency_graph(session['graph'])
def setSessionGraph(graph):
session['graph'] = nx.adjacency_data(graph)
def updateSessionGraph(update = lambda _: None, **kw):
graph = getSessionGraph()
update(graph,**kw)
setSessionGraph(graph)
@app.before_request
def before_request():
# store empty graph
if 'graph' not in session:
session['graph'] = nx.adjacency_data(nx.Graph())
@app.route("/")
def graphInterfaceView():
graph = getSessionGraph()
# draw graph
nx.draw_networkx(graph,font_color='white', pos=nx.circular_layout(graph))
# base64 encode graph image
figfile = BytesIO()
plt.savefig(figfile, format='png')
plt.close()
figfile.seek(0)
figdata_png = base64.b64encode(figfile.getvalue()).decode('ascii')
return render_template('index.html',
image_base64 = figdata_png,
matrix = str(nx.to_numpy_array(graph)).replace('.',',').replace('\n',','),
graph = graph,
data = json.dumps(nx.node_link_data(graph)) )
# add and remove node
@app.route("/addnode")
def addnode():
updateSessionGraph(g.add)
return redirect(url_for('graphInterfaceView'))
@app.route("/removenode")
def removenode():
updateSessionGraph(g.remove, node=int(request.args.get('label')))
return redirect(url_for('graphInterfaceView'))
# add and remove edge
@app.route("/addedge")
def addedge():
updateSessionGraph(lambda graph: graph.add_edge(request.args.get('label1'),request.args.get('label2')))
return redirect(url_for('graphInterfaceView'))
@app.route("/toggleedge")
def toggleedge():
updateSessionGraph(g.toggle,edge=[int(n) for n in request.args.get('label').split('_')])
return redirect(url_for('graphInterfaceView'))
# complement graph
@app.route("/complement")
def complementgraph():
graph = getSessionGraph()
setSessionGraph(nx.complement(graph))
return redirect(url_for('graphInterfaceView'))
# complement edges for single node
@app.route("/complementnode")
def complementnode():
updateSessionGraph(g.complement,node=int(request.args.get('label')))
return redirect(url_for('graphInterfaceView'))
# clear edges
@app.route("/clearedges")
def clearedges():
updateSessionGraph(lambda graph: graph.remove_edges_from(graph.edges()))
return redirect(url_for('graphInterfaceView'))
# clear graph
@app.route("/clear")
def cleargraph():
updateSessionGraph(lambda graph: graph.clear())
return redirect(url_for('graphInterfaceView'))
if __name__ == '__main__':
app.run()
| {"/main.py": ["/graph.py"], "/tests/test_complement.py": ["/graph.py"], "/tests/test_add.py": ["/graph.py"], "/tests/test_toggleEdge.py": ["/graph.py"], "/tests/test_remove.py": ["/graph.py"]} |
65,640 | carlsunderman/graph-builder | refs/heads/master | /graph.py | def add(graph):
for i in range(len(graph.nodes)+1):
if i not in graph.nodes:
graph.add_node(i)
break
def remove(graph,node):
if graph.has_node(node):
graph.remove_node(node)
def toggle(graph,edge):
if len(edge) != 2 or edge[0] == edge[1]: return
if graph.has_edge(edge[0],edge[1]):
graph.remove_edge(edge[0],edge[1])
else:
graph.add_edge(edge[0],edge[1])
def complement(graph,node):
if node not in graph.nodes: return
for n in graph.nodes:
toggle(graph,[node,n]) | {"/main.py": ["/graph.py"], "/tests/test_complement.py": ["/graph.py"], "/tests/test_add.py": ["/graph.py"], "/tests/test_toggleEdge.py": ["/graph.py"], "/tests/test_remove.py": ["/graph.py"]} |
65,641 | carlsunderman/graph-builder | refs/heads/master | /tests/test_complement.py | import networkx as nx
import graph
simpleGraph = {
0: [1,2],
1: [0,3,4],
2: [0],
3: [1,4],
4: [1,3]
}
edgelessGraph = {
0: [],
1: [],
2: []
}
def makeGraph(edgeList):
return nx.Graph(edgeList)
def sortedPairList(pairList):
return sorted([tuple(sorted(pair)) for pair in pairList])
def test_populated():
name = 'populated'
g = makeGraph(simpleGraph)
want = [(0, 3), (0, 4), (1, 3), (1, 4), (3, 4)]
graph.complement(g,0)
assert sortedPairList(g.edges) == want, name
def test_complementTwice():
name = 'complementTwice'
g = makeGraph(simpleGraph)
want = [(0, 1), (0, 2), (1, 3), (1, 4), (3, 4)]
graph.complement(g,0)
graph.complement(g,0)
assert sortedPairList(g.edges) == want, name
def test_empty():
name = 'empty'
g = makeGraph(None)
want = []
# no-op
graph.complement(g,0)
assert sortedPairList(g.edges) == want, name
def test_noEdges():
name = 'noEdges'
g = makeGraph(edgelessGraph)
want = [(0, 1), (0, 2)]
graph.complement(g,0)
assert sortedPairList(g.edges) == want, name
def test_badNode():
name = 'badNode'
g = makeGraph(simpleGraph)
want = [(0, 1), (0, 2), (1, 3), (1, 4), (3, 4)]
# no-op
graph.complement(g,6)
assert sortedPairList(g.edges) == want, name | {"/main.py": ["/graph.py"], "/tests/test_complement.py": ["/graph.py"], "/tests/test_add.py": ["/graph.py"], "/tests/test_toggleEdge.py": ["/graph.py"], "/tests/test_remove.py": ["/graph.py"]} |
65,642 | carlsunderman/graph-builder | refs/heads/master | /tests/test_add.py | import networkx as nx
import graph
simpleGraph = {
0: [1,2],
1: [0,3,4],
2: [0],
3: [1,4],
4: [1,3]
}
def makeGraph(edgeList):
return nx.Graph(edgeList)
def test_populated():
name = 'populated'
g = makeGraph(simpleGraph)
want = [0,1,2,3,4,5]
graph.add(g)
assert list(g.nodes) == want, name
def test_empty():
name = 'empty'
g = makeGraph(None)
want = [0]
graph.add(g)
assert list(g.nodes) == want, name
def test_deletedNode():
name = 'deletedNode'
g = makeGraph(simpleGraph)
want = [0,1,2,3,4]
g.remove_node(2)
graph.add(g)
assert sorted(list(g.nodes)) == want, name
def test_multipleDeletedNodes():
name = 'multipleDeletedNodes'
g = makeGraph(simpleGraph)
want = [0,1,2,3]
g.remove_node(2)
g.remove_node(4)
graph.add(g)
assert sorted(list(g.nodes)) == want, name | {"/main.py": ["/graph.py"], "/tests/test_complement.py": ["/graph.py"], "/tests/test_add.py": ["/graph.py"], "/tests/test_toggleEdge.py": ["/graph.py"], "/tests/test_remove.py": ["/graph.py"]} |
65,643 | carlsunderman/graph-builder | refs/heads/master | /tests/test_toggleEdge.py | import networkx as nx
import graph
simpleGraph = {
0: [1,2],
1: [0,3,4],
2: [0],
3: [1,4],
4: [1,3]
}
def makeGraph(edgeList):
return nx.Graph(edgeList)
def sortedPairList(pairList):
return sorted([tuple(sorted(pair)) for pair in pairList])
def test_populated():
name = 'populated'
g = makeGraph(simpleGraph)
want = [(0, 1), (0, 2), (1, 4), (3, 4)]
graph.toggle(g,[1,3])
assert sortedPairList(g.edges) == want, name
def test_toggleTwice():
name = 'toggleTwice'
g = makeGraph(simpleGraph)
want = [(0, 1), (0, 2), (1, 3), (1, 4), (3, 4)]
graph.toggle(g,[1,3])
graph.toggle(g,[1,3])
assert sortedPairList(g.edges) == want, name
def test_empty():
name = 'empty'
g = makeGraph(None)
want = [(0,1)]
graph.toggle(g,[0,1])
assert sortedPairList(g.edges) == want, name
def test_newNodes():
name = 'newNodes'
g = makeGraph(simpleGraph)
want = [0,1,2,3,4,5,6]
graph.toggle(g,[5,6])
assert list(g.nodes) == want, name
def test_badEdge():
name = 'badEdge'
g = makeGraph(simpleGraph)
want = [(0, 1), (0, 2), (1, 3), (1, 4), (3, 4)]
# effectively a no-op
graph.toggle(g,[5])
assert sortedPairList(g.edges) == want, name
def test_selfEdge():
name = 'selfEdge'
g = makeGraph(simpleGraph)
want = [(0, 1), (0, 2), (1, 3), (1, 4), (3, 4)]
# effectively a no-op
graph.toggle(g,[3,3])
assert sortedPairList(g.edges) == want, name | {"/main.py": ["/graph.py"], "/tests/test_complement.py": ["/graph.py"], "/tests/test_add.py": ["/graph.py"], "/tests/test_toggleEdge.py": ["/graph.py"], "/tests/test_remove.py": ["/graph.py"]} |
65,644 | carlsunderman/graph-builder | refs/heads/master | /tests/test_remove.py | import networkx as nx
import graph
simpleGraph = {
0: [1,2],
1: [0,3,4],
2: [0],
3: [1,4],
4: [1,3]
}
def makeGraph(edgeList):
return nx.Graph(edgeList)
def test_populated():
name = 'populated'
g = makeGraph(simpleGraph)
want = [0,1,2,4]
graph.remove(g,3)
assert list(g.nodes) == want, name
def test_empty():
name = 'empty'
g = makeGraph(None)
want = []
# effectiveley a no-op
graph.remove(g,3)
assert list(g.nodes) == want, name
def test_singleton():
name = 'singleton'
g = makeGraph({0: []})
want = []
graph.remove(g,0)
assert list(g.nodes) == want, name | {"/main.py": ["/graph.py"], "/tests/test_complement.py": ["/graph.py"], "/tests/test_add.py": ["/graph.py"], "/tests/test_toggleEdge.py": ["/graph.py"], "/tests/test_remove.py": ["/graph.py"]} |
65,723 | ngouzy/smartchangelog | refs/heads/master | /tests/integration/__init__.py | import os
import shutil
import tempfile
import pytest
from smartchangelog.gitcmd import git_command
@pytest.fixture(scope='function')
def temp_dir():
temporary_directory = tempfile.mkdtemp()
old_cwd = os.getcwd()
os.chdir(temporary_directory)
git_command('init')
git_command('config', 'user.name', 'Nicolas Gouzy')
git_command('config', 'user.email', 'nicolas.gouzy@gmail.com')
yield temporary_directory
os.chdir(old_cwd)
shutil.rmtree(temporary_directory)
def hook_path():
return os.path.join(os.getcwd(), '.git', 'hooks') | {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,724 | ngouzy/smartchangelog | refs/heads/master | /smartchangelog/commit.py | import re
from datetime import datetime
from typing import NamedTuple, cast
from smartchangelog import datetools
from smartchangelog.commitmsg import CommitType, CommitMsg, CommitSyntaxError
class _Commit(NamedTuple):
id: str
author: str
date: datetime
type: CommitType = None
scope: str = None
subject: str = None
body: str = None
class Commit(_Commit):
class Message(NamedTuple):
type: CommitType = None
scope: str = None
subject: str = None
body: str = None
@classmethod
def parse(cls, commit: str) -> 'Commit':
m = re.match('commit (?P<id>[a-z0-9]{40})(?:\n|.)+Author: (?P<author>.*)(?:\n|.)+'
'Date: (?P<date>.*)(?P<message>(.|\n)*)',
commit)
gd = m.groupdict()
message = cls.parse_message(gd['message'])
commit_id = gd['id']
author = gd['author']
date = datetools.str2date(gd['date'].strip())
return cls(
id=commit_id,
author=author,
date=date,
type=message.type,
scope=message.scope,
subject=message.subject,
body=message.body
)
@classmethod
def strip_lines(cls, string) -> str:
return "\n".join(line.strip() for line in string.strip(' \n').split('\n'))
@classmethod
def parse_message(cls, message: str) -> Message:
message = cls.strip_lines(message)
try:
cm = CommitMsg.parse(message)
return cls.Message(**cm.__dict__)
except CommitSyntaxError:
message = re.sub("\n+", "\n", message)
lines = message.split('\n', maxsplit=1)
subject = lines[0] or None
body = None
if len(lines) > 1:
body = lines[1] or None
return cls.Message(
type=None,
scope=None,
subject=subject,
body=body
)
@classmethod
def property_name(cls, prop: property) -> str:
# fixme: change implementation, use _Commit.__dict__
i = int(prop.__doc__.split(' ')[-1])
return tuple(cls._fields)[i]
@classmethod
def property(cls, name: str):
prop = cast(property, _Commit.__dict__[name])
return prop
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,725 | ngouzy/smartchangelog | refs/heads/master | /setup.py | from setuptools import setup, find_packages
from codecs import open
from os import path
from smartchangelog import __version__
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
name, description = long_description.split('\n')[0].split(': ')
setup(
name=name,
version=__version__,
description=description,
long_description=long_description,
url='https://github.com/ngouzy/smartchangelog',
author='Nicolas Gouzy',
author_email='nicolas.gouzy@gmail.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Documentation',
'License :: OSI Approved :: MIT License',
# Supported Python versions
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
# Environment
'Environment :: Console'
],
keywords='changelog, git, hook, message formatter',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[],
extras_require={
'test': ['mypy', 'pytest', 'pytest-cov']
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'commit-msg=smartchangelog.scripts.commitmsg_script:main',
'smartchangelog=smartchangelog.scripts.changelog_script:main',
],
},
)
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,726 | ngouzy/smartchangelog | refs/heads/master | /smartchangelog/datetools.py | from datetime import datetime
date_format = "%Y-%m-%d %H:%M:%S %z"
def str2date(string: str) -> datetime:
return datetime.strptime(string, date_format)
def date2str(dt: datetime) -> str:
return dt.strftime(date_format) | {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,727 | ngouzy/smartchangelog | refs/heads/master | /smartchangelog/__init__.py | __version__ = '0.1.3-dev'
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,728 | ngouzy/smartchangelog | refs/heads/master | /tests/unit/test_commit.py | from smartchangelog import datetools
from smartchangelog.commit import Commit
from smartchangelog.commitmsg import CommitType
from tests.unit import data_file_path
class TestCommit:
def test_parse(self):
# GIVEN
with open(data_file_path('one.gitlog'), encoding='utf-8') as log_file:
log = log_file.read()
expected = Commit(
id='a6f79b56acbb9e58327ecf91feed611bb614927f',
author='Nicolas Gouzy <nicolas.gouzy@orange.com>',
date=datetools.str2date('2017-03-23 17:30:56 +0100'),
type=CommitType.refactor,
scope='changelog',
subject='better model',
body='NamedTuple rocks !'
)
# WHEN
changelog_item = Commit.parse(log)
# THEN
assert changelog_item == expected
def test_strip_lines(self):
# GIVEN
string = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Phasellus non erat imperdiet, pellentesque nibh et, porta velit.
Fusce sit amet elit ac magna congue accumsan sed ut tellus.
Nullam at velit tincidunt, sodales mi quis, gravida metus.
Quisque pellentesque ipsum nec nunc vehicula tincidunt.
"""
expected = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.\n" \
"Phasellus non erat imperdiet, pellentesque nibh et, porta velit.\n" \
"\n" \
"Fusce sit amet elit ac magna congue accumsan sed ut tellus.\n" \
"Nullam at velit tincidunt, sodales mi quis, gravida metus.\n" \
"\n" \
"\n" \
"Quisque pellentesque ipsum nec nunc vehicula tincidunt."
# WHEN
actual = Commit.strip_lines(string)
# THEN
assert actual == expected
def test_property_name(self):
# GIVEN
prop = Commit.author
# WHEN
property_name = Commit.property_name(prop)
# THEN
assert property_name == 'author' | {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,729 | ngouzy/smartchangelog | refs/heads/master | /tests/integration/test_githook.py | import os
import shutil
import pytest
from smartchangelog import githook
# noinspection PyUnresolvedReferences
from tests.integration import hook_path, temp_dir
@pytest.mark.usefixtures("temp_dir")
def test_githook_install():
# GIVEN
# WHEN
commitmsg_hook_path = githook.install()
# THEN
assert os.path.abspath(commitmsg_hook_path) == os.path.join(hook_path(), 'commit-msg')
@pytest.mark.usefixtures("temp_dir")
def test_githook_install_without_githook_folder():
# GIVEN
shutil.rmtree(hook_path())
# WHEN
commitmsg_hook_path = githook.install()
# THEN
assert os.path.abspath(commitmsg_hook_path) == os.path.join(hook_path(), 'commit-msg')
@pytest.mark.usefixtures("temp_dir")
def test_githook_uninstall():
# GIVEN
githook.install()
expected_commitmsg_hook_path = os.path.join(hook_path(), 'commit-msg')
# WHEN
commitmsg_hook_path = githook.uninstall()
# THEN
assert os.path.abspath(commitmsg_hook_path) == expected_commitmsg_hook_path
@pytest.mark.usefixtures("temp_dir")
def test_githook_uninstall_without_githook_folder():
# GIVEN
shutil.rmtree(hook_path())
# WHEN
commitmsg_hook_path = githook.uninstall()
# THEN
assert commitmsg_hook_path is None
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,730 | ngouzy/smartchangelog | refs/heads/master | /tests/integration/test_changelog_script.py | import inspect
import pytest
import smartchangelog
from smartchangelog.scripts import changelog_script
from smartchangelog.tools import set_args
from smartchangelog.gitcmd import tag
"""Path of the file containing changelog_script.py file"""
changelog_script_path = inspect.getfile(changelog_script)
def test_range_arg():
# GIVEN
with set_args(changelog_script_path, '--range', revision_range()) as result, \
pytest.raises(
SystemExit) as e:
# WHEN
changelog_script.main()
stdout, stderr = result
printed_report = stdout.read()
# THEN
assert e.value.code == 0
assert printed_report
def test_range_arg_with_groupby():
# GIVEN
with set_args(changelog_script_path, '--range', revision_range(), '--groupby', 'type', 'scope') as result, \
pytest.raises(
SystemExit) as e:
# WHEN
changelog_script.main()
stdout, stderr = result
printed_report = stdout.read()
# THEN
assert e.value.code == 0
assert printed_report
def test_version_arg():
# GIVEN
expected_version = smartchangelog.__version__
with set_args(changelog_script_path, "--version") as result, pytest.raises(SystemExit) as e:
# WHEN
changelog_script.main()
stdout, stderr = result
version = stdout.read().strip("\n")
# THEN
assert e.value.code == 0
assert version == expected_version
# Tools
def revision_range() -> str:
tags = tag()
rev_range = "{start}..{end}".format(start=tags[0], end=tags[-1])
return rev_range
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,731 | ngouzy/smartchangelog | refs/heads/master | /tests/unit/test_changelog.py | from smartchangelog import datetools
from smartchangelog.changelog import Changelog, Node
from smartchangelog.commit import Commit
from smartchangelog.commitmsg import CommitType
from tests.unit import data_file_path
class TestChangelog:
def test_parse(self):
# GIVEN
with open(data_file_path('big.gitlog'), encoding='utf-8') as log_file:
log = log_file.read()
expected_commit_with_scope = Commit(
id='a6f79b56acbb9e58327ecf91feed611bb614927f',
author='Nicolas Gouzy <nicolas.gouzy@orange.com>',
date=datetools.str2date('2017-03-23 17:30:56 +0100'),
type=CommitType.refactor,
scope='changelog',
subject="better model",
body='NamedTuple rocks !'
)
expected_commit_without_scope = Commit(
id='597ec5676235e18f5a607726603df944da5be7fe',
author='Nicolas Gouzy <nicolas.gouzy@orange.com>',
date=datetools.str2date('2017-03-22 15:28:45 +0100'),
type=None,
scope=None,
subject='Merge branch develop into master',
body=None
)
# WHEN
changelog = Changelog.parse(log)
# THEN
assert (len(changelog) == 35)
assert changelog[0] == expected_commit_with_scope
assert changelog[1] == expected_commit_without_scope
def test_groupby(self):
# GIVEN
with open(data_file_path('big.gitlog'), encoding='utf-8') as log_file:
log = log_file.read()
changelog = Changelog.parse(log)
# WHEN
node = changelog.groupby(Commit.type, Commit.scope)
# THEN
assert len(node) == len(changelog)
class TestNode:
def test_len_with_empty_tree(self):
# GIVEN
tree = Node()
# WHEN
# THEN
assert len(tree) == 1
def test_len_with_small_tree(self):
# GIVEN
children = tuple([Node(name=str(i)) for i in range(10)])
tree = Node(children=children)
# WHEN
actual = len(tree)
# THEN
assert actual == 10
def test_len_with_tree(self):
# GIVEN
children = tuple([Node(name=str(i), children=tuple([Node(), Node()])) for i in range(10)])
tree = Node(children=children)
# WHEN
actual = len(tree)
# THEN
assert actual == 20
def test_report_with_big_git_log(self):
# GIVEN
with open(data_file_path('big.gitlog'), encoding='utf-8') as log_file:
log = log_file.read()
changelog = Changelog.parse(log)
node = changelog.groupby(Commit.type, Commit.scope)
with open(data_file_path('big.md'), encoding='utf-8') as md_file:
expected = md_file.read()
# WHEN
report = node.report()
# THEN
assert report == expected
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,732 | ngouzy/smartchangelog | refs/heads/master | /tests/unit/__init__.py | import inspect
import os
def data_dir_path() -> str:
return os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
def data_file_path(filename: str) -> str:
return os.path.join(data_dir_path(), 'data', filename)
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,733 | ngouzy/smartchangelog | refs/heads/master | /smartchangelog/tools.py | import inspect
import os
import sys
from contextlib import contextmanager
from io import StringIO
from typing import Iterator, TextIO, cast
from smartchangelog.scripts import commitmsg_script
"""Path of the file containing commitmsg_script.py file"""
commitmsg_script_path = inspect.getfile(commitmsg_script)
@contextmanager
def set_commit_editmsg(msg: str) -> Iterator[TextIO]:
filename = 'COMMIT_EDITMSG'
with open(filename, mode='w') as f:
f.write(msg)
try:
yield cast(TextIO, f)
finally:
if os.path.isfile(filename):
os.remove(filename)
@contextmanager
def set_args(*args):
old = list(sys.argv)
sys.argv[:] = args
oldout, olderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = StringIO(), StringIO()
try:
yield sys.stdout, sys.stderr
finally:
sys.stdout.seek(0)
sys.stderr.seek(0)
sys.argv[:] = old
sys.stdout, sys.stderr = oldout, olderr
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,734 | ngouzy/smartchangelog | refs/heads/master | /smartchangelog/changelog.py | import re
from collections import Iterable
from io import StringIO
from itertools import groupby
from typing import List, Tuple, Callable, Any, IO, cast
from smartchangelog import datetools
from smartchangelog.commit import Commit
class Node:
def __init__(self, name: str = None, criterion: property = None, children: Tuple['Node'] = None,
value: Commit = None) -> None:
self._parent: 'Node' = None
self.name = name
self.criterion = criterion
self._children: Tuple['Node'] = None
self.children = children
self.value = value
@property
def parent(self) -> 'Node':
return self._parent
@property
def children(self) -> Tuple['Node']:
return self._children
@children.setter
def children(self, children: Tuple['Node']) -> None:
if children is not None:
for node in children:
node._parent = self
self._children = children
def depth_level(self) -> int:
if self.parent is None:
return 0
else:
return self.parent.depth_level() + 1
def __len__(self):
if not self.children:
return 1
nb_children = 0
for child in self.children:
nb_children += len(child)
return nb_children
@classmethod
def print_multilines(cls, name: str, value: str, file: IO):
if value:
lines = value.split('\n')
if len(lines) == 1:
print(" * {name}: {value}".format(name=name, value=value), file=file)
else:
print(" * {name}:".format(name=name), file=file)
for line in lines:
print(" - {line}".format(line=line), file=file)
@classmethod
def print_leaf(cls, commit: Commit, file: IO) -> None:
print("* subject: {subject}".format(subject=commit.subject or ''), file=file)
cls.print_multilines(name='body', value=commit.body, file=file)
print(" * date: {date}".format(date=datetools.date2str(commit.date)), file=file)
print(" * author: {author}".format(author=commit.author), file=file)
print(" * commit: {id}".format(id=commit.id), file=file)
def print_header(self, node: 'Node', file: IO):
print(
"{header} {criterion_name}: {name}".format(
header="#" * (self.depth_level() + 1),
criterion_name=Commit.property_name(node.criterion),
name=node.name
),
file=file
)
print(file=file)
def report(self) -> str:
sio = StringIO()
with sio:
if self.children is None:
self.print_leaf(commit=self.value, file=sio)
else:
for node in self.children:
if node.name:
self.print_header(node=node, file=sio)
print(node.report().strip('\n'), file=sio)
print(file=sio)
string = sio.getvalue()
return string
class Changelog(List[Commit]):
@classmethod
def parse(cls, log: str) -> 'Changelog':
raw_commits = re.findall('(commit [a-z0-9]{40}\n(?:.|\n)*?)(?=commit [a-z0-9]{40}|$)', log)
return Changelog([Commit.parse(rc) for rc in raw_commits])
def groupby(self, *criteria: property) -> Node:
if len(criteria) == 0:
# Sort
date_prop = cast(property, Commit.date)
date_getter = cast(Callable[[Commit], Any], date_prop.fget)
self.sort(key=date_getter)
return self.node()
criteria_list = list(criteria)
criterion = criteria_list.pop(0)
criterion_getter = cast(Callable[[Commit], Any], criterion.fget)
# Filter
# noinspection PyTypeChecker
categorized_changelog = Changelog([commit for commit in self if criterion_getter(commit) is not None])
# noinspection PyTypeChecker
uncategorized_commits = Changelog([commit for commit in self if criterion_getter(commit) is None])
# Sort
categorized_changelog.sort(key=criterion_getter)
# Arrange
raw_result = self.groupby_to_list(groupby(iterable=categorized_changelog, key=criterion_getter))
children_list: List[Node] = []
for key, group in raw_result:
cl = Changelog(group)
children_list.append(Node(name=str(key), criterion=criterion, children=cl.groupby(*criteria_list).children))
if len(uncategorized_commits) > 0:
children_list.append(uncategorized_commits.node(name="unknown", criterion=criterion))
children = cast(Tuple[Node], tuple(children_list))
return Node(children=children)
def node(self, name: str=None, criterion: property=None) -> Node:
# noinspection PyTypeChecker
children = cast(Tuple[Node], tuple(Node(value=commit) for commit in self))
return Node(name=name, criterion=criterion, children=children)
@classmethod
def groupby_to_list(cls, iterable: Iterable):
return [[key, [i for i in group]] for key, group in iterable]
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,735 | ngouzy/smartchangelog | refs/heads/master | /tests/unit/test_static_types_check.py | import inspect
from smartchangelog import commitmsg
from mypy import api
from smartchangelog import changelog
def test_static_type_check_with_mypy():
commitmsg_file = inspect.getfile(commitmsg)
changelog_file = inspect.getfile(changelog)
params = ['--ignore-missing-imports', commitmsg_file, changelog_file]
result = api.run(params)
if result[0]:
# FIXME: begin: There are bugs in mypy
# * support iteration on enums see https://github.com/python/mypy/issues/2305
# * support NamedTuple
# So, we have to remove irrelevant errors
check_type_errors = "\n".join(
(error for error in result[0].strip().split("\n") if error.split("error: ")[1] not in (
'"CommitType" expects no type arguments, but 1 given',
'Invalid type "commit_type_str"',
'Iterable expected',
'"CommitType" has no attribute "__iter__"',
'Right hand side values are not supported in NamedTuple',
'Invalid statement in NamedTuple definition; expected "field_name: field_type"'
)))
# FIXME: end
if len(check_type_errors) > 0:
raise (Exception(check_type_errors))
if result[1]:
raise (Exception(result[1]))
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,736 | ngouzy/smartchangelog | refs/heads/master | /smartchangelog/githook.py | import os
import shutil
git_path = os.path.join('.git')
hooks_path = os.path.join(git_path, 'hooks')
commitmsg_hook_path = os.path.join(hooks_path, 'commit-msg')
def install():
check_git_path()
if not os.path.isdir(hooks_path):
os.makedirs(hooks_path, mode=0o755, exist_ok=True)
uninstall()
commitmsg_script_path = shutil.which('commit-msg')
assert commitmsg_script_path
os.symlink(commitmsg_script_path, commitmsg_hook_path)
assert os.path.exists(commitmsg_hook_path)
return commitmsg_hook_path
def uninstall():
check_git_path()
if os.path.exists(commitmsg_hook_path):
os.remove(commitmsg_hook_path)
return commitmsg_hook_path
def check_git_path():
assert os.path.isdir(git_path)
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,737 | ngouzy/smartchangelog | refs/heads/master | /smartchangelog/scripts/changelog_script.py | import argparse
from smartchangelog.gitcmd import log
from smartchangelog import __version__
from smartchangelog.changelog import Changelog
from smartchangelog.commit import Commit
def main() -> None:
parser = argparse.ArgumentParser(description="Smart changelog report",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-v", "--version", help="print smartchangelog version number", action="version",
version=__version__)
parser.add_argument("-r", "--range", help="revision range (in the same meaning than git log command)")
parser.add_argument("-g", "--groupby", help="list of criteria", nargs="*")
args = parser.parse_args()
gitlog = log(revision_range=args.range)
changelog = Changelog.parse(gitlog)
if args.groupby:
criteria = tuple((Commit.property(criterion) for criterion in args.groupby))
else:
criteria = ()
node = changelog.groupby(*criteria)
print(node.report())
exit(0)
if __name__ == "__main__":
main()
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,738 | ngouzy/smartchangelog | refs/heads/master | /tests/integration/test_commitmsg_script.py | import pytest
import smartchangelog.scripts.commitmsg_script
from smartchangelog.tools import set_args, set_commit_editmsg, commitmsg_script_path
from smartchangelog import githook
# noinspection PyUnresolvedReferences
from tests.integration import temp_dir
def test_help_arg():
# GIVEN
with set_args(commitmsg_script_path, "-h"), pytest.raises(SystemExit) as e:
# WHEN
smartchangelog.scripts.commitmsg_script.main()
# THEN
assert e.value.code == 0
def test_right_msg_arg():
# GIVEN
with set_args(commitmsg_script_path, 'feat(ui): add button'), pytest.raises(SystemExit) as e:
# WHEN
smartchangelog.scripts.commitmsg_script.main()
# THEN
assert e.value.code == 0
def test_wrong_msg_arg():
# GIVEN
with set_args(commitmsg_script_path, 'wrong commit message'), pytest.raises(SystemExit) as e:
# WHEN
smartchangelog.scripts.commitmsg_script.main()
# THEN
assert e.value.code != 0
def test_right_msg_file():
# GIVEN
with set_commit_editmsg('feat(ui): add button') as f, \
set_args(commitmsg_script_path, f.name), \
pytest.raises(SystemExit) as e:
# WHEN
smartchangelog.scripts.commitmsg_script.main()
# THEN
assert e.value.code == 0
def test_wrong_msg_file():
# GIVEN
with set_commit_editmsg('bad format') as f, \
set_args(commitmsg_script_path, f.name), \
pytest.raises(SystemExit) as e:
# WHEN
smartchangelog.scripts.commitmsg_script.main()
# THEN
assert e.value.code != 0
def test_version_arg():
# GIVEN
expected_version = smartchangelog.__version__
with set_args(commitmsg_script_path, "--version") as result, pytest.raises(SystemExit) as e:
# WHEN
smartchangelog.scripts.commitmsg_script.main()
stdout, stderr = result
version = stdout.read().strip("\n")
# THEN
assert e.value.code == 0
assert version == expected_version
@pytest.mark.usefixtures("temp_dir")
def test_install_arg():
# GIVEN
with set_args(commitmsg_script_path, "-i") as result, pytest.raises(SystemExit) as e:
# WHEN
smartchangelog.scripts.commitmsg_script.main()
stdout, stderr = result
install_msg = stdout.read().strip("\n")
# THEN
assert e.value.code == 0
assert install_msg == 'commit-msg hook installed in .git/hooks/commit-msg'
@pytest.mark.usefixtures("temp_dir")
def test_uninstall_arg():
# GIVEN
githook.install()
with set_args(commitmsg_script_path, "-u") as result, pytest.raises(SystemExit) as e:
# WHEN
smartchangelog.scripts.commitmsg_script.main()
stdout, stderr = result
uninstall_msg = stdout.read().strip("\n")
# THEN
assert e.value.code == 0
assert uninstall_msg == 'commit-msg hook removed from .git/hooks/commit-msg'
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,739 | ngouzy/smartchangelog | refs/heads/master | /smartchangelog/scripts/commitmsg_script.py | #!/usr/bin/env python3
"""
Git commit hook:
.git/hooks/commit-msg
"""
import argparse
from smartchangelog.commitmsg import CommitMsg, CommitSyntaxError
from smartchangelog import __version__
from smartchangelog.githook import install, uninstall
def main() -> None:
parser = argparse.ArgumentParser(description="Git commit message checker",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=CommitMsg.help())
parser.add_argument("-v", "--version", help="print commit-msg version number", action="version",
version=__version__)
group = parser.add_mutually_exclusive_group()
group.add_argument("msg", help="the commit message to check", nargs="?")
group.add_argument("-i", "--install_hook", action="store_true")
group.add_argument("-u", "--uninstall_hook", action="store_true")
args = parser.parse_args()
if args.install_hook:
hook_path = install()
if hook_path:
print("commit-msg hook installed in {path}".format(path=hook_path))
elif args.uninstall_hook:
hook_path = uninstall()
if hook_path:
print("commit-msg hook removed from {path}".format(path=hook_path))
else:
msg = args.msg
if "COMMIT_EDITMSG" in msg:
with open(args.msg) as msg_file:
msg = msg_file.read()
try:
CommitMsg.parse(msg)
except CommitSyntaxError as e:
parser.error("{error}\n\n{help}".format(error=e, help=CommitMsg.help()))
exit(0)
if __name__ == "__main__":
main()
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,740 | ngouzy/smartchangelog | refs/heads/master | /smartchangelog/commitmsg.py | import inspect
import re
from enum import Enum
from typing import NamedTuple, Optional, Any
class CommitSyntaxError(Exception):
"""
Invalid commit syntax error
"""
class CommitType(Enum):
feat = 'new feature for the user, not a new feature for build script'
fix = 'bug fix for the user, not a fix to a build script'
docs = 'changes to the documentation'
style = 'formatting, missing semi colons, etc; no production code change'
refactor = 'refactoring production code, eg.renaming a variable'
test = 'adding missing tests, refactoring tests; no production code change'
chore = 'updating gradle scripts, continuous integration scripts, etc; no production code change'
def __lt__(self, other: Any) -> bool:
if isinstance(other, CommitType):
return self.index() < other.index()
return NotImplemented
def index(self) -> int:
return [ct for ct in CommitType].index(self)
def __str__(self) -> str:
return self.name
class FirstLine(NamedTuple):
type: CommitType
scope: str
subject: str
class CommitMsg:
"""
Your commit message have to follow this format:
<type>(<scope>): <subject>
<body>
Where :
Message first line (type, scope and subject)
The first line cannot be longer than {firstline_max_length} characters.
The type and scope should always be lowercase as shown
below.
Allowed <type> values: {allowed_types}
Example <scope> values:
* ui
* business
* model
* widget
* config
etc.
The <scope> can be empty (e.g. if the change is a global or difficult
to assign to a single component), in which case the parentheses are
omitted.
Message body (optional)
The body cannot be longer than {bodyline_max_length} characters.
uses the imperative, present tense: "change" not "changed" nor
"changes"
includes motivation for the change and contrasts with previous behavior
"""
FIRSTLINE_PATTERN = re.compile('^([a-z]+)(?:\(([^\n\t]+)\))?: (.+)$')
FIRSTLINE_MAX_LENGTH = 70
BODY_MAX_LENGTH = 80
def __init__(self, msg_type: CommitType, scope: str, subject: str, body: str = None) -> None:
self.type = msg_type
self.scope = scope
self.subject = subject
self.body = body
def __eq__(self, other: Any) -> bool:
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
@classmethod
def parse(cls, msg: str) -> 'CommitMsg':
msg_parts = msg.split("\n", maxsplit=1)
firstline = cls.parse_firstline(msg_parts[0])
if len(msg_parts) > 1:
body = msg_parts[1]
cls.parse_body(body)
else:
body = None
return cls(firstline.type, firstline.scope, firstline.subject, body)
@classmethod
def parse_firstline(cls, firstline: str) -> FirstLine:
if len(firstline) > cls.FIRSTLINE_MAX_LENGTH:
raise CommitSyntaxError("First line can not be greater than {length} characters".format(
length=cls.FIRSTLINE_MAX_LENGTH))
result = cls.FIRSTLINE_PATTERN.search(firstline)
if result is None:
raise CommitSyntaxError("{firstline} doesn't follow the first line commit message pattern: {pattern}"
.format(firstline=firstline, pattern=cls.FIRSTLINE_PATTERN.pattern))
commit_type_str, scope, subject = result.groups()
try:
commit_type = CommitType[commit_type_str]
except KeyError:
raise CommitSyntaxError("{commit_type} is not an available commit type".format(commit_type=commit_type_str))
return FirstLine(type=commit_type, scope=scope, subject=subject)
@classmethod
def parse_body(cls, body: str) -> str:
for line in body.split('\n'):
if len(line) > cls.BODY_MAX_LENGTH:
raise CommitSyntaxError("Body line can not be greater than {length} characters".format(
length=cls.BODY_MAX_LENGTH))
return body
@classmethod
def format_allowed_types(cls) -> str:
return "\n" + "\n".join("\t* {name}: {doc}".format(name=ct.name, doc=ct.value) for ct in CommitType)
@classmethod
def help(cls) -> str:
return inspect.getdoc(cls).format(allowed_types=cls.format_allowed_types(),
firstline_max_length=cls.FIRSTLINE_MAX_LENGTH,
bodyline_max_length=cls.BODY_MAX_LENGTH)
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,741 | ngouzy/smartchangelog | refs/heads/master | /tests/unit/test_commitmsg.py | import pytest
from smartchangelog.commitmsg import CommitMsg, CommitSyntaxError, CommitType
class TestCommitMsg:
class TestParseFirstLine:
def test_with_type_and_scope_and_subject(self):
# GIVEN
firstline = "feat(ui main): add button"
# WHEN
parsed_firstline = CommitMsg.parse_firstline(firstline)
# THEN
assert parsed_firstline.type == CommitType.feat
assert parsed_firstline.scope == "ui main"
assert parsed_firstline.subject == "add button"
def test_with_firstline_with_type_and_subject_but_without_scope(self):
# GIVEN
msg = "fix: commit-msg hook exit"
# WHEN
firstline = CommitMsg.parse_firstline(msg)
# THEN
assert firstline.type == CommitType.fix
assert firstline.scope is None
assert firstline.subject == "commit-msg hook exit"
def test_with_wrong_firstline_format(self):
# GIVEN
firstline = "bad message"
with pytest.raises(CommitSyntaxError):
# WHEN
CommitMsg.parse_firstline(firstline)
# THEN CommitSyntaxError is raised
def test_with_unknown_type(self):
# GIVEN
firstline = "unknown(ui): add button"
with pytest.raises(CommitSyntaxError):
# WHEN
CommitMsg.parse_firstline(firstline)
# THEN CommitSyntaxError is raised
def test_with_too_long_firstline_length(self):
# GIVEN
firstline = "feat(ui): " + "a" * (CommitMsg.FIRSTLINE_MAX_LENGTH + 1)
with pytest.raises(CommitSyntaxError):
# WHEN
CommitMsg.parse_firstline(firstline)
# THEN CommitSyntaxError is raised
class TestParseBody:
def test_with_too_long_body_line_length(self):
# GIVEN
body = "body\n" + \
"b" * (CommitMsg.BODY_MAX_LENGTH + 1)
with pytest.raises(CommitSyntaxError):
# WHEN
CommitMsg.parse_body(body)
# THEN CommitSyntaxError is raised
def test_with_one_line_body(self):
# GIVEN
body = "body"
# WHEN
actual = CommitMsg.parse_body(body)
# THEN
assert actual == body
def test_with_multi_line_body(self):
# GIVEN
body = "first line body\n" + \
"second line body"
# WHEN
actual = CommitMsg.parse_body(body)
# THEN
assert actual == body
class TestParse:
def test_with_firstline_but_without_body(self):
# GIVEN
msg = "feat: add button"
# WHEN
commit_msg = CommitMsg.parse(msg)
# THEN
assert commit_msg.type == CommitType.feat
assert commit_msg.scope is None
assert commit_msg.subject == "add button"
assert commit_msg.body is None
def test_with_firstline_and_body(self):
# GIVEN
msg = "" + \
"feat(ui): add button\n" + \
"body first line\n" + \
"body second line"
# WHEN
commit_msg = CommitMsg.parse(msg)
# THEN
assert commit_msg.type == CommitType.feat
assert commit_msg.scope == "ui"
assert commit_msg.subject == "add button"
assert commit_msg.body == "body first line\nbody second line"
class TestEquality:
def test_equality_with_same_commitmsg(self):
# GIVEN
cm1 = CommitMsg(
msg_type=CommitType.feat,
scope='conso',
subject='OEM-372',
body='add field for nbAlerts'
)
cm2 = CommitMsg(
msg_type=CommitType.feat,
scope='conso',
subject='OEM-372',
body='add field for nbAlerts'
)
# WHEN
# THEN
assert cm1 == cm2
def test_equality_with_other_commitmsg(self):
# GIVEN
cm1 = CommitMsg(
msg_type=CommitType.feat,
scope='conso',
subject='OEM-372',
body='add field for nbAlerts'
)
cm2 = CommitMsg(
msg_type=CommitType.fix,
scope='conso',
subject='OEM-372',
body='add field for nbAlerts'
)
# WHEN
# THEN
assert cm1 != cm2
def test_equality_with_other_class(self):
# GIVEN
cm = CommitMsg(
msg_type=CommitType.feat,
scope='conso',
subject='OEM-372',
body='add field for nbAlerts'
)
s = "a string"
# WHEN
# THEN
assert cm != s
class TestCommitType:
def test_str(self):
# GIVEN
ct = CommitType.feat
# WHEN
string = str(ct)
# THEN
assert string == 'feat'
def test_lt_with_commit_type(self):
# GIVEN
ct1 = CommitType.feat
ct2 = CommitType.refactor
# WHEN
# THEN
assert ct1 < ct2
def test_lt_with_other_class(self):
# GIVEN
ct = CommitType.feat
s = "a string"
# WHEN
# THEN
with pytest.raises(TypeError):
assert ct < s
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,742 | ngouzy/smartchangelog | refs/heads/master | /smartchangelog/gitcmd.py | import subprocess
import os
from typing import cast, List
class GitCmdError(Exception):
"""
Git command error
"""
def git_command(*git_args: str) -> str:
args = ['git'] + cast(List[str], list(git_args))
cp = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if cp.returncode == 0 and len(cp.stderr) == 0:
return cp.stdout.decode('utf-8').strip('\n')
else:
raise GitCmdError(cp.stderr.decode('utf-8').strip('\n'))
def is_inside_work_tree() -> bool:
try:
result = git_command('rev-parse', '--is-inside-work-tree')
return result == 'true'
except GitCmdError:
return False
def get_gitdir() -> str:
if is_inside_work_tree():
path = os.path.join(git_command('rev-parse', '--show-toplevel'), '.git')
return os.path.abspath(path)
else:
raise GitCmdError("You have to be inside a git work tree")
def log(revision_range: str) -> str:
return git_command("log", revision_range, "--date", "iso")
def tag() -> List[str]:
return git_command("tag").split("\n")
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,743 | ngouzy/smartchangelog | refs/heads/master | /tests/unit/test_datetools.py | from datetime import datetime, timezone, timedelta
from smartchangelog import datetools
def test_str2date():
# GIVEN
expected = datetime(
year=2017,
month=3,
day=21,
hour=16,
minute=9,
second=13,
tzinfo=timezone(timedelta(hours=1))
)
string = '2017-03-21 16:09:13 +0100'
# WHEN
date = datetools.str2date(string)
# THEN
assert date == expected
def test_date2str():
# GIVEN
expected = '2017-03-21 16:09:13 +0100'
dt = datetime(
year=2017,
month=3,
day=21,
hour=16,
minute=9,
second=13,
tzinfo=timezone(timedelta(hours=1))
)
# WHEN
string = datetools.date2str(dt)
# THEN
assert string == expected
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,744 | ngouzy/smartchangelog | refs/heads/master | /tests/integration/test_gitusage.py | import os
import shutil
import tempfile
import pytest
from smartchangelog import githook
from smartchangelog.gitcmd import git_command, GitCmdError
# noinspection PyUnresolvedReferences
from tests.integration import temp_dir
@pytest.fixture(scope='function')
def add_sample_file(temp_dir):
temporary_directory = temp_dir
githook.install()
sample_file_path = os.path.join(temporary_directory, "sample_file.txt")
with open(sample_file_path, mode="w") as sample_file:
sample_file.write("sample content")
git_command('add', '.')
yield None
@pytest.mark.usefixtures("add_sample_file")
def test_git_commit_with_right_msg():
# GIVEN
# WHEN
result = git_command('commit', '-m', 'feat(ui): sample')
# THEN
assert result
@pytest.mark.usefixtures("add_sample_file")
def test_git_commit_with_wrong_msg():
# GIVEN
# WHEN
with pytest.raises(GitCmdError):
git_command('commit', '-m', 'wrong commit message')
# THEN
pass
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,745 | ngouzy/smartchangelog | refs/heads/master | /tests/unit/test_gitcmd.py | import os
import pytest
from smartchangelog.gitcmd import GitCmdError, is_inside_work_tree, get_gitdir, tag
from tests.unit import data_dir_path
@pytest.fixture(scope='function')
def cmd():
old_cmd = os.getcwd()
yield None
os.chdir(old_cmd)
@pytest.mark.usefixtures('cmd')
def test_is_inside_work_tree_ok():
# GIVEN
os.chdir(data_dir_path())
# WHEN
result = is_inside_work_tree()
# THEN
assert result
@pytest.mark.usefixtures('cmd')
def test_is_inside_work_tree_ko():
# GIVEN
os.chdir(os.path.expanduser('~'))
# WHEN
result = is_inside_work_tree()
# THEN
assert not result
@pytest.mark.usefixtures('cmd')
def test_get_gitdir_ok():
# GIVEN
os.chdir(data_dir_path())
# WHEN
gitdir_path = get_gitdir()
# THEN
assert os.path.split(gitdir_path)[-1] == '.git'
@pytest.mark.usefixtures('cmd')
def test_get_gitdir_ko():
# GIVEN
os.chdir(os.path.expanduser('~'))
# WHEN
with pytest.raises(GitCmdError):
get_gitdir()
# THEN
pass
@pytest.mark.usefixtures('cmd')
def test_get_tag():
# GIVEN
os.chdir(data_dir_path())
# WHEN
tags = tag()
# THEN
assert len(tags) > 0
| {"/tests/integration/__init__.py": ["/smartchangelog/gitcmd.py"], "/smartchangelog/commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commitmsg.py"], "/setup.py": ["/smartchangelog/__init__.py"], "/tests/unit/test_commit.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/tests/integration/test_githook.py": ["/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/tests/integration/test_changelog_script.py": ["/smartchangelog/__init__.py", "/smartchangelog/tools.py", "/smartchangelog/gitcmd.py"], "/tests/unit/test_changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py", "/smartchangelog/commitmsg.py", "/tests/unit/__init__.py"], "/smartchangelog/changelog.py": ["/smartchangelog/__init__.py", "/smartchangelog/commit.py"], "/tests/unit/test_static_types_check.py": ["/smartchangelog/__init__.py"], "/smartchangelog/scripts/changelog_script.py": ["/smartchangelog/gitcmd.py", "/smartchangelog/__init__.py", "/smartchangelog/changelog.py", "/smartchangelog/commit.py"], "/tests/integration/test_commitmsg_script.py": ["/smartchangelog/scripts/commitmsg_script.py", "/smartchangelog/tools.py", "/smartchangelog/__init__.py", "/tests/integration/__init__.py"], "/smartchangelog/scripts/commitmsg_script.py": ["/smartchangelog/commitmsg.py", "/smartchangelog/__init__.py", "/smartchangelog/githook.py"], "/tests/unit/test_commitmsg.py": ["/smartchangelog/commitmsg.py"], "/tests/unit/test_datetools.py": ["/smartchangelog/__init__.py"], "/tests/integration/test_gitusage.py": ["/smartchangelog/__init__.py", "/smartchangelog/gitcmd.py", "/tests/integration/__init__.py"], "/tests/unit/test_gitcmd.py": ["/smartchangelog/gitcmd.py", "/tests/unit/__init__.py"]} |
65,746 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /user/migrations/0005_auto_20201219_1312.py | # Generated by Django 3.1.4 on 2020-12-19 04:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('product', '0001_initial'),
('user', '0004_auto_20201217_1957'),
]
operations = [
migrations.AlterField(
model_name='user',
name='account',
field=models.CharField(max_length=45, unique=True),
),
migrations.AlterField(
model_name='user',
name='birth_date',
field=models.DateField(null=True),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, unique=True),
),
migrations.AlterField(
model_name='user',
name='event_name',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='user',
name='gender',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='user.gender'),
),
migrations.AlterField(
model_name='user',
name='grade',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='user.grade'),
),
migrations.AlterField(
model_name='user',
name='phone_number',
field=models.CharField(max_length=45, unique=True),
),
migrations.AlterField(
model_name='user',
name='recommender',
field=models.CharField(max_length=45, null=True),
),
migrations.AlterField(
model_name='user',
name='terms_and_condition',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='user.termsandcondition'),
),
migrations.CreateModel(
name='OftenBuying',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1)),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='product.product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.user')),
],
options={
'db_table': 'often_buyings',
},
),
migrations.AddField(
model_name='user',
name='often_buying',
field=models.ManyToManyField(related_name='often_buying_set', through='user.OftenBuying', to='product.Product'),
),
]
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,747 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /board/views.py | import json
from django.http import JsonResponse
from django.views import View
from django.core.exceptions import ValidationError
from .models import Review, Question
from user.utils import signin_decorator
class ReviewView(View):
@signin_decorator
def post(self, request):
try:
data = json.loads(request.body)
Review.objects.create(
author_id = request.user.id,
product_id = data['product_id'],
title = data['title'],
contents = data['contents'],
help_count = 0,
hit_count = 0,
image_url = data['image_url'],
)
return JsonResponse({'MESSAGE': 'SUCCESS'}, status=201)
except KeyError as e:
return JsonResponse({"MESSAGE": "KEY_ERROR => " + e.args[0]}, status=400)
def get(self, request, review_id):
try:
review = Review.objects.get(id=review_id)
review.hit_count += 1
review.save()
review_post = {
'id' : review.id,
'title' : review.title,
'contents' : review.contents,
'help_count': review.help_count,
'hit_count' : int(review.hit_count),
'image_url' : review.image_url,
'created_at': review.created_at,
}
return JsonResponse({"MESSAGE": "SUCCESS", "review_post": review_post}, status=201)
except Review.DoesNotExist as e:
return JsonResponse({"MESSAGE": "KEY_ERROR => " + e.args[0]}, status=400)
@signin_decorator
def patch(self, request, review_id):
try:
data = json.loads(request.body)
if data.get('help') == "True":
review = Review.objects.get(id=review_id)
review.help_count += 1
review.save()
return JsonResponse({'MESSAGE': 'SUCCESS'}, status=200)
review = Review.objects.get(id=review_id, author_id=request.user.id,)
if data.get('title'):
review.title = data.get('title')
if data.get('contents'):
review.contents = data.get('contents')
if data.get('image_url'):
review.image_url = data.get('image_url')
review.save()
return JsonResponse({'MESSAGE': 'SUCCESS'}, status=200)
except KeyError as e:
return JsonResponse({"MESSAGE": "KEY_ERROR => " + e.args[0]}, status=400)
except Review.DoesNotExist:
return JsonResponse({"MESSAGE": "REVIEW_DOES_NOT_EXIST"}, status=400)
@signin_decorator
def delete(self, request, review_id):
try:
review = Review.objects.get(id=review_id, author_id=request.user.id)
review.delete()
return JsonResponse({'MESSAGE': 'SUCCESS'}, status=204)
except Review.DoesNotExist:
return JsonResponse({"MESSAGE": "REVIEW_DOES_NOT_EXIST"}, status=400)
class ReviewListView(View):
def get(self, request, product_id):
try:
offset = int(request.GET.get('offset'), 0)
limit = int(request.GET.get('limit'), 10)
limit += offset
reviews = Review.objects.order_by('-created_at').filter(product_id=product_id)
review_list = [{
'id' : review.id,
'title' : review.title,
'contents' : review.contents,
'help_count' : review.help_count,
'hit_count' : review.hit_count,
'image_url' : review.image_url,
'created_at' : review.created_at,
'writer' : review.author.name,
'show' : "False",
}for review in reviews[offset:limit]]
return JsonResponse({'MESSAGE': 'SUCCESS', "review_list": review_list}, status=200)
except Exception as e:
return JsonResponse({'message' : 'ERROR => ' + e.args[0]}, status = 400)
class QuestionView(View):
@signin_decorator
def post(self, request):
try:
data = json.loads(request.body)
Question.objects.create(
author_id = request.user.id,
product_id = data['product_id'],
title = data['title'],
contents = data['contents'],
is_private = data['private'] if data.get('private') else False,
)
return JsonResponse({'MESSAGE': 'SUCCESS'}, status=201)
except KeyError as e:
return JsonResponse({"MESSAGE": "KEY_ERROR => " + str(e.args[0])}, status=400)
except ValidationError as e:
return JsonResponse({"MESSAGE": "KEY_ERROR => " + str(e.args[0])}, status=400)
def get(self, request, question_id):
try:
question = Question.objects.get(id=question_id)
question_post = {
'id' : question.id,
'product_id' : question.product_id,
'title' : question.title,
'contents' : question.contents,
'private' : question.is_private,
'created_at' : question.created_at,
}
return JsonResponse({"MESSAGE": "SUCCESS", "question_post": question_post}, status=201)
except Question.DoesNotExist as e:
return JsonResponse({"MESSAGE": "KEY_ERROR => " + e.args[0]}, status=400)
@signin_decorator
def patch(self, request, question_id):
try:
data = json.loads(request.body)
question = Question.objects.get(id=question_id, author_id=request.user.id)
if data.get('title'):
question.title = data.get('title')
if data.get('contents'):
question.contents = data.get('contents')
if data.get('private'):
question.is_private = data.get('private')
question.save()
return JsonResponse({'MESSAGE': 'SUCCESS'}, status=200)
except KeyError as e:
return JsonResponse({"MESSAGE": "KEY_ERROR => " + e.args[0]}, status=400)
except Question.DoesNotExist:
return JsonResponse({"MESSAGE": "QUESTION_DOES_NOT_EXIST"}, status=400)
@signin_decorator
def delete(self, request, question_id):
try:
question = Question.objects.get(id=question_id, author_id=request.user.id)
question.delete()
return JsonResponse({'MESSAGE': 'SUCCESS'}, status=204)
except Question.DoesNotExist:
return JsonResponse({"MESSAGE": "QUESTION_DOES_NOT_EXIST"}, status=400)
class QuestionListView(View):
def get(self, request, product_id):
try:
offset = int(request.GET.get('offset'), 0)
limit = int(request.GET.get('limit'), 10)
limit += offset
questions = Question.objects.order_by('-created_at').filter(product_id=product_id)
question_list = [{
'id' : question.id,
'product_id' : question.product_id,
'title' : question.title,
'contents' : question.contents,
'private' : question.is_private,
'created_at' : question.created_at,
'writer' : question.author.name,
'show' : "False",
}for question in questions[offset:limit]]
return JsonResponse({'MESSAGE': 'SUCCESS', "question_list": question_list}, status=200)
except Exception as e:
return JsonResponse({'MESSAGE' : 'ERROR => ' + e.args[0]}, status = 400) | {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,748 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /board/migrations/0002_auto_20201217_1745.py | # Generated by Django 3.1.4 on 2020-12-17 08:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('board', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='review',
old_name='review_image_url',
new_name='image_url',
),
migrations.RemoveField(
model_name='question',
name='has_comment',
),
migrations.RemoveField(
model_name='questioncomment',
name='question',
),
migrations.AddField(
model_name='question',
name='comment_id',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='board.questioncomment'),
),
migrations.AddField(
model_name='questioncomment',
name='title',
field=models.CharField(default='hi', max_length=200),
preserve_default=False,
),
]
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,749 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /product/migrations/0002_auto_20201217_2010.py | # Generated by Django 3.1.4 on 2020-12-17 11:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('product', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='product',
name='delivery_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='product.deliverytype'),
),
migrations.AlterField(
model_name='product',
name='discount',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='product.discount'),
),
migrations.AlterField(
model_name='product',
name='notice',
field=models.CharField(max_length=500, null=True),
),
migrations.AlterField(
model_name='product',
name='origin',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='product.origin'),
),
migrations.AlterField(
model_name='product',
name='packing_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='product.packingtype'),
),
migrations.AlterField(
model_name='product',
name='subcategory',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='product.subcategory'),
),
migrations.AlterField(
model_name='product',
name='subtitle',
field=models.CharField(max_length=500),
),
migrations.AlterField(
model_name='subcategory',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='product.category'),
),
]
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,750 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /user/migrations/0004_remove_oftenbuying_quantity.py | # Generated by Django 3.1.4 on 2020-12-21 01:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0003_auto_20201217_1756'),
]
operations = [
migrations.RemoveField(
model_name='oftenbuying',
name='quantity',
),
]
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,751 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /user/migrations/0001_initial.py | # Generated by Django 3.1.4 on 2020-12-16 11:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Gender',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gender', models.CharField(max_length=10)),
],
options={
'db_table': 'genders',
},
),
migrations.CreateModel(
name='Grade',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('grade', models.CharField(max_length=10)),
],
options={
'db_table': 'grades',
},
),
migrations.CreateModel(
name='TermsAndCondition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('privacy_policy_agreement', models.BooleanField(default=False)),
('sns_marketing_agreement', models.BooleanField(default=False)),
('email_marketing_agreement', models.BooleanField(default=False)),
],
options={
'db_table': 'terms_and_conditions',
},
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account', models.CharField(max_length=45)),
('password', models.CharField(max_length=2000)),
('name', models.CharField(max_length=45)),
('email', models.EmailField(max_length=254)),
('phone_number', models.CharField(max_length=45)),
('birth_date', models.DateField(null=True)),
('recommender', models.CharField(max_length=45, null=True)),
('event_name', models.CharField(max_length=100, null=True)),
('mileage', models.DecimalField(decimal_places=2, max_digits=10)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('gender', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='user.gender')),
('grade', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='user.grade')),
('terms_and_condition', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='user.termsandcondition')),
],
options={
'db_table': 'users',
},
),
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('is_active', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.user')),
],
options={
'db_table': 'addresses',
},
),
]
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,752 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /board/urls.py | from django.urls import path
from .views import QuestionView, QuestionListView
from .views import ReviewView, ReviewListView
urlpatterns = [
path('/question', QuestionView.as_view()),
path('/question/<int:question_id>', QuestionView.as_view()),
path('/question/product/<int:product_id>', QuestionListView.as_view()),
path('/review', ReviewView.as_view()),
path('/review/<int:review_id>', ReviewView.as_view()),
path('/review/product/<int:product_id>', ReviewListView.as_view()),
] | {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,753 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /user/utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json, jwt
from django.http import JsonResponse
from django.core.exceptions import ObjectDoesNotExist
from my_settings import SECRET_KEY, ALGORITHM
from .models import User
def signin_decorator(func):
def wrapper(self, request, *args, **kwargs):
access_token = request.headers.get("Authorization", None)
print(f'access_token: {access_token}')
if "Authorization" == None:
return JsonResponse({"message":"INVALID_LOGIN"}, status=401)
try:
token_payload = jwt.decode(access_token, SECRET_KEY, ALGORITHM)
print(f'token_payload: {token_payload}')
user = User.objects.get(account=token_payload['id'])
print(f'user: {user}')
request.user = user
print(f'request.user: {request.user}')
return func(self, request, *args, **kwargs)
except jwt.ExpiredSignatureError:
return JsonResponse({"message":"EXPIRED_TOKEN"}, status=401)
except jwt.DecodeError:
return JsonResponse({"message":"INVALID_TOKEN"}, status=401)
except User.DoesNotExist:
return JsonResponse({"message":"INVALID_USER"}, status=401)
except jwt.InvalidTokenError:
return JsonResponse({"message":"NEED_LOGIN"}, status=401)
return wrapper
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,754 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /order/migrations/0001_initial.py | # Generated by Django 3.1.4 on 2020-12-16 11:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('product', '0001_initial'),
('user', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='OrderStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
],
options={
'db_table': 'order_status',
},
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
],
options={
'db_table': 'payment_method',
},
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_number', models.CharField(max_length=45)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
('address', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='user.address')),
('payment_method', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='order.paymentmethod')),
('status', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='order.orderstatus')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='user.user')),
],
options={
'db_table': 'orders',
},
),
migrations.CreateModel(
name='OftenBuying',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1)),
('product', models.ForeignKey(default=1, on_delete=django.db.models.deletion.SET_DEFAULT, to='product.product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.user')),
],
options={
'db_table': 'often_buyings',
},
),
migrations.CreateModel(
name='Coupons',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
('discount_percentage', models.DecimalField(decimal_places=2, max_digits=5)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.user')),
],
options={
'db_table': 'coupons',
},
),
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1)),
('order', models.ForeignKey(default=1, on_delete=django.db.models.deletion.SET_DEFAULT, to='order.order')),
('product', models.ForeignKey(default=1, on_delete=django.db.models.deletion.SET_DEFAULT, to='product.product')),
],
options={
'db_table': 'carts',
},
),
]
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,755 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /user/urls.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.urls import path
from .views import SignupView, SigninView
urlpatterns = [
path('/signup', SignupView.as_view()),
path('/signin', SigninView.as_view()),
]
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,756 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /product/urls.py | from django.urls import path
from product.views import (
ProductDetailView,
ProductListView,
MdChoiceView,
CategoryView,
)
urlpatterns = [
path('', ProductListView.as_view()),
path('/<int:product_id>', ProductDetailView.as_view()),
path('/md-choice', MdChoiceView.as_view()),
path('/category', CategoryView.as_view()),
]
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,757 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /product/views.py | import json
from django.http import JsonResponse
from django.views import View
from .models import Category, Subcategory, Product
class ProductDetailView(View):
def get(self, request, product_id):
try:
product = Product.objects.select_related('discount', 'delivery_type', 'origin', 'packing_type', 'subcategory').prefetch_related('productdescription_set', 'detailedimage_set').get(id=product_id)
product_detail = {
'id' : product.id,
'name' : product.name,
'subtitle' : product.subtitle,
'price' : product.price,
'discount_name' : product.discount.name,
'discount_percentage' : product.discount.percentage,
'is_soldout' : product.is_soldout,
'image_url' : product.image_url,
'sales_unit' : product.sales_unit,
'weight' : product.weight,
'delivery_type' : product.delivery_type.name,
'origin' : product.origin.name,
'packing_type' : product.packing_type.packing_type,
'allergy' : product.allergy,
'expiration_date' : product.expiration_date,
'notice' : product.notice,
'subcategory_id' : product.subcategory.id,
'content' : product.productdescription_set.get().content,
'product_image_url' : product.detailedimage_set.get().product_image_url,
'description_image_url' : product.detailedimage_set.get().description_image_url
}
except Product.DoesNotExist:
return JsonResponse({'message': 'PRODUCT_NOT_FOUND'}, status = 404)
return JsonResponse({'message': 'SUCCESS', 'product_detail': product_detail}, status = 200)
class ProductListView(View):
def get(self, request):
try:
offset = int(request.GET.get('offset', 0))
limit = int(request.GET.get('limit', 100))
subcategory_id = request.GET.get('subcategory', None)
products = Product.objects.select_related('discount', 'subcategory').filter(subcategory=subcategory_id) if subcategory_id else Product.objects.all()
product_list = [{
'id' : product.id,
'name' : product.name,
'subtitle' : product.subtitle,
'price' : product.price,
'discount_percentage' : product.discount.percentage if subcategory_id else "0",
'is_soldout' : product.is_soldout,
'image_url' : product.image_url,
} for product in products[offset:limit]]
except ValueError:
return JsonResponse({'message': 'VALUE_ERROR'}, status = 400)
return JsonResponse({'message': 'SUCCESS', 'product_list': product_list}, status = 200)
class CategoryView(View):
def get(self,request):
categories = Category.objects.prefetch_related('subcategory_set')
categories = [{
'id' : category.id,
'name' : category.name,
'subcategories' : [{
'id' : subcategory.id,
'name' : subcategory.name
} for subcategory in category.subcategory_set.all()]
} for category in categories]
return JsonResponse({'message': 'SUCCESS', 'categories': categories}, status = 200)
class MdChoiceView(View):
def get(self, request):
try:
products = Product.objects.select_related('discount', 'subcategory')
product_list = [{
'id' : product.id,
'name' : product.name,
'subtitle' : product.subtitle,
'price' : product.price,
'discount_percentage' : product.discount.percentage,
'is_soldout' : product.is_soldout,
'image_url' : product.image_url,
'subcategory_id' : product.subcategory.id,
'subcategory_name' : product.subcategory.name
} for product in products]
except ValueError:
return JsonResponse({'message': 'VALUE_ERROR'}, status = 400)
return JsonResponse({'message': 'SUCCESS', 'product_list': product_list}, status = 200)
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,758 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /product/migrations/0001_initial.py | # Generated by Django 3.1.4 on 2020-12-16 11:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
],
options={
'db_table': 'categories',
},
),
migrations.CreateModel(
name='DeliveryType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
],
options={
'db_table': 'delivery_types',
},
),
migrations.CreateModel(
name='Discount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
('percentage', models.DecimalField(decimal_places=2, max_digits=5)),
],
options={
'db_table': 'discounts',
},
),
migrations.CreateModel(
name='Origin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
],
options={
'db_table': 'origins',
},
),
migrations.CreateModel(
name='PackingType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('packing_type', models.CharField(max_length=45)),
('cart_packing_type', models.CharField(max_length=45)),
],
options={
'db_table': 'packing_types',
},
),
migrations.CreateModel(
name='Subcategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='product.category')),
],
options={
'db_table': 'subcategories',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('subtitle', models.CharField(max_length=100)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('sales_unit', models.DecimalField(decimal_places=2, max_digits=10)),
('weight', models.DecimalField(decimal_places=2, max_digits=10)),
('allergy', models.CharField(max_length=500)),
('expiration_date', models.DateField()),
('notice', models.CharField(max_length=200, null=True)),
('is_soldout', models.BooleanField(default=False)),
('image_url', models.URLField(max_length=2000)),
('delivery_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='product.deliverytype')),
('discount', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='product.discount')),
('origin', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='product.origin')),
('packing_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='product.packingtype')),
('subcategory', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='product.subcategory')),
],
options={
'db_table': 'products',
},
),
]
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,759 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /product/models.py | from django.db import models
class Category(models.Model):
name = models.CharField(max_length=45)
class Meta:
db_table = 'categories'
class Subcategory(models.Model):
name = models.CharField(max_length=45)
category = models.ForeignKey(Category, on_delete=models.SET_NULL, null=True)
class Meta:
db_table = 'subcategories'
class Discount(models.Model):
name = models.CharField(max_length=45)
percentage = models.DecimalField(max_digits=5, decimal_places=2)
class Meta:
db_table = 'discounts'
class DeliveryType(models.Model):
name = models.CharField(max_length=45)
class Meta:
db_table = 'delivery_types'
class Origin(models.Model):
name = models.CharField(max_length=45)
class Meta:
db_table = 'origins'
class PackingType(models.Model):
packing_type = models.CharField(max_length=45)
cart_packing_type = models.CharField(max_length=45)
class Meta:
db_table = 'packing_types'
class Product(models.Model):
name = models.CharField(max_length=100)
subtitle = models.CharField(max_length=500)
price = models.DecimalField(max_digits=10, decimal_places=2)
discount = models.ForeignKey(Discount, on_delete=models.SET_NULL, null=True)
sales_unit = models.DecimalField(max_digits=10, decimal_places=2)
weight = models.DecimalField(max_digits=10, decimal_places=2)
delivery_type = models.ForeignKey(DeliveryType, on_delete=models.SET_NULL, null=True)
origin = models.ForeignKey(Origin, on_delete=models.SET_NULL, null=True)
packing_type = models.ForeignKey(PackingType, on_delete=models.SET_NULL, null=True)
allergy = models.CharField(max_length=500)
expiration_date = models.DateField()
notice = models.CharField(max_length=500, null=True)
is_soldout = models.BooleanField(default = False)
image_url = models.URLField(max_length=2000)
subcategory = models.ForeignKey(Subcategory, on_delete=models.SET_NULL, null=True)
class Meta:
db_table = 'products'
class DetailedImage(models.Model):
description_image_url = models.URLField(max_length=2000)
product_image_url = models.URLField(max_length=2000)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
class Meta:
db_table = 'detailed_images'
class ProductDescription(models.Model):
content = models.TextField()
detailed_image = models.ForeignKey(DetailedImage, on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
class Meta:
db_table = 'product_descriptions' | {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,760 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /user/migrations/0002_auto_20201217_1745.py | # Generated by Django 3.1.4 on 2020-12-17 08:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('product', '0001_initial'),
('user', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='account',
field=models.CharField(max_length=45, unique=True),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, unique=True),
),
migrations.AlterField(
model_name='user',
name='phone_number',
field=models.CharField(max_length=45, unique=True),
),
migrations.CreateModel(
name='OftenBuying',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1)),
('product', models.ForeignKey(default=1, on_delete=django.db.models.deletion.SET_DEFAULT, to='product.product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.user')),
],
options={
'db_table': 'often_buyings',
},
),
migrations.AddField(
model_name='user',
name='often_buying',
field=models.ManyToManyField(related_name='often_buying_set', through='user.OftenBuying', to='product.Product'),
),
]
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,761 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /user/models.py | from django.db import models
from product.models import Product
class User(models.Model):
account = models.CharField(max_length=45, unique=True)
password = models.CharField(max_length=2000)
name = models.CharField(max_length=45)
email = models.EmailField(max_length=254, unique=True)
phone_number = models.CharField(max_length=45, unique=True)
gender = models.ForeignKey('Gender', on_delete=models.PROTECT)
birth_date = models.DateField(null=True)
recommender = models.CharField(max_length=45, null=True)
event_name = models.CharField(max_length=100, null=True)
grade = models.ForeignKey('Grade', on_delete=models.PROTECT)
terms_and_condition = models.ForeignKey('TermsAndCondition', on_delete=models.PROTECT)
mileage = models.DecimalField(max_digits=10, decimal_places=2)
often_buying = models.ManyToManyField(Product, through="OftenBuying", related_name='often_buying_set')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'users'
class Gender(models.Model):
gender = models.CharField(max_length=10)
class Meta:
db_table = 'genders'
class Grade(models.Model):
grade = models.CharField(max_length=10)
class Meta:
db_table = 'grades'
class TermsAndCondition(models.Model):
privacy_policy_agreement = models.BooleanField(default=False)
sns_marketing_agreement = models.BooleanField(default=False)
email_marketing_agreement = models.BooleanField(default=False)
class Meta:
db_table = 'terms_and_conditions'
class Address(models.Model):
name = models.CharField(max_length=200)
user = models.ForeignKey('User', on_delete=models.CASCADE)
is_active = models.BooleanField(default=False)
class Meta:
db_table = 'addresses'
class OftenBuying(models.Model):
user = models.ForeignKey("User", on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.SET_NULL, null=True)
class Meta:
db_table = 'often_buyings'
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,762 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /user/views.py | import json, bcrypt, jwt
import re
from django.views import View
from django.http import JsonResponse
from django.db import transaction
from .models import User, Gender, Grade, TermsAndCondition, Address
from my_settings import SECRET_KEY, ALGORITHM
from datetime import datetime, timedelta
from .utils import signin_decorator
# Create your views here.
class SignupView(View):
def validate_account(self, account): # 6자 이상의 영문 또는 영문과 숫자를 조합
REGEX_ACCOUNT_1 = '^[a-z]{6,}$' # 영문 6자 이상
REGEX_ACCOUNT_2 = '^(?=.*[0-9])(?=.*[a-z]).{6,}$' # 영문+숫자 6자 이상
return re.match(REGEX_ACCOUNT_1, account) or re.match(REGEX_ACCOUNT_2, account)
def validate_password(self, password): # 10자 이상, 영문/숫자/특수문자만 허용하며 2개 이상 조합
REGEX_PASSWORD_1 = '^(?=.*[0-9])(?=.*[a-zA-Z]).{10,}$' # 영문+숫자 10자 이상
REGEX_PASSWORD_2 = '^(?=.*[!@#$%^&*()_+])(?=.*[a-zA-Z]).{10,}$' # 영문+특수문자 10자 이상
REGEX_PASSWORD_3 = '^(?=.*[0-9])(?=.*[!@#$%^&*()_+]).{10,}$' # 숫자+특수문자 10자 이상
REGEX_PASSWORD_4 = '^(?=.*[0-9])(?=.*[!@#$%^&*()_+])(?=.*[a-zA-Z]).{10,}$' # 영문+숫자+특수문자 10자 이상
return re.match(REGEX_PASSWORD_1, password) or re.match(REGEX_PASSWORD_2, password) \
or re.match(REGEX_PASSWORD_3, password) or re.match(REGEX_PASSWORD_4, password)
def validate_email(self, email): # @, . 포함
REGEX_EMAIL = '^[a-zA-Z0-9+-_.]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$'
return re.match(REGEX_EMAIL, email)
def validate_phone_number(self, phone_number):
REGEX_PHONE_NUMBER = '^\d{3}?[-]\d{3,4}?[-]\d{4}$'
return re.match(REGEX_PHONE_NUMBER, phone_number)
def post(self, request):
data = json.loads(request.body)
try:
if User.objects.filter(account=data['account']).exists():
return JsonResponse({"message":"USER_EXIST"}, status=409)
if not self.validate_account(data['account']):
return JsonResponse({"message":"INVALID_ACCOUNT"}, status=400)
if not self.validate_password(data['password']):
return JsonResponse({"message": "INVALID_PW"}, status=400)
if not self.validate_email(data['email']):
return JsonResponse({"message":"INVALID_EMAIL"}, status=400)
if not self.validate_phone_number(data['phone_number']):
return JsonResponse({"message":"INVALID_PHONE_NUMBER"}, status=400)
hashed_pw = bcrypt.hashpw(data['password'].encode('utf-8'), bcrypt.gensalt()).decode('utf-8')
birth_date = data.get('birth_date')
recommender = data.get('recommender')
event_name = data.get('event_name')
with transaction.atomic():
user_model = User(
account = data['account'],
password = hashed_pw,
name = data['name'],
email = data['email'],
phone_number = data['phone_number'],
gender_id = data['gender_id'],
birth_date = birth_date,
recommender = recommender,
event_name = event_name,
grade_id = 1,
terms_and_condition = TermsAndCondition(id=1),
mileage = 0
)
user_model.save()
Address(
name = data['address'],
user_id = user_model.id,
is_active = True
).save()
return JsonResponse({"message":"SUCCESS"}, status=201)
except KeyError as e:
return JsonResponse({"message":"KEY_ERROR =>" + e.args[0]}, status=400)
class CheckAccountView(View):
def post(self, request):
data = json.loads(request.body)
REGEX_ACCOUNT_1 = '^[a-z]{6,}$'
REGEX_ACCOUNT_2 = '^(?=.*[0-9])(?=.*[a-z]).{6,}$'
try:
if user_model.account == '':
return JsonResponse({"message":"ACCOUNT_NOT_ENTERED"}, status=400)
if User.objects.filter(account=data['account']).exists():
return JsonResponse({"message":"ACCOUNT_EXIST"}, status=400)
return re.match(REGEX_ACCOUNT_1, account) or re.match(REGEX_ACCOUNT_2, account)
return JsonResponse({"message":"SUCCESS"}, status=200)
except KeyError:
return JsonResponse({"message":"KEY_ERROR"}, status=400)
class CheckEmailView(View):
def post(self, request):
data = json.loads(request.body)
REGEX_EMAIL = '^[a-zA-Z0-9+-_.]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$'
try:
if user_model.email == '':
return JsonResponse({"message":"EMAIL_NOT_ENTERED"}, status=400)
if User.objects.filter(email=data['email']).exists():
return JsonResponse({"message":"EMAIL_EXIST"}, status=400)
return re.match(REGEX_EMAIL, email)
return JsonResponse({"message":"SUCCESS"}, status=200)
except KeyError:
return JsonResponse({"message":"KEY_ERROR"}, status=400)
class SigninView(View):
def post(self, request):
data = json.loads(request.body)
try:
if not User.objects.filter(account=data['account']).exists():
return JsonResponse({"message":"USER_NOT_EXIST"}, status=404)
if User.objects.filter(account=data['account']).exists():
user = User.objects.get(account=data['account'])
hashed_pw = bcrypt.hashpw(data['password'].encode('utf-8'), bcrypt.gensalt()).decode('utf-8')
if bcrypt.checkpw(data['password'].encode('utf-8'), user.password.encode('utf-8')):
access_token = jwt.encode({'id':user.account, 'exp': datetime.utcnow() + timedelta(hours=24)}, SECRET_KEY, ALGORITHM).decode('utf-8')
return JsonResponse({"ACCESS_TOKEN":access_token}, status=201)
return JsonResponse({"message": "INVALID_USER"}, status = 401)
return JsonResponse({"message":"INVALID_USER"}, status=401)
except KeyError:
return JsonResponse({"message":"KEY_ERROR"}, status=400)
class FindAccountView(View):
def post(self, request):
data = json.loads(request.body)
if User.objects.filter(account=data['account']).exists():
print (data['account'])
return JsonResponse({"message":"Success"}, status=200)
return JsonResponse({"message":"fail"}, status=400)
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,763 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /user/migrations/0006_merge_20201222_1547.py | # Generated by Django 3.1.4 on 2020-12-22 06:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0005_auto_20201219_1312'),
('user', '0004_remove_oftenbuying_quantity'),
]
operations = [
]
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,764 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /board/migrations/0001_initial.py | # Generated by Django 3.1.4 on 2020-12-16 11:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('product', '0001_initial'),
('user', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('contents', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
('is_private', models.BooleanField(default=False)),
('has_comment', models.BooleanField(default=False)),
('author', models.ForeignKey(default=1, on_delete=django.db.models.deletion.SET_DEFAULT, to='user.user')),
('product', models.ForeignKey(default=1, on_delete=django.db.models.deletion.SET_DEFAULT, to='product.product')),
],
options={
'db_table': 'questions',
},
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('contents', models.TextField()),
('help_count', models.IntegerField()),
('hit_count', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
('review_image_url', models.URLField(max_length=2000)),
('author', models.ForeignKey(default=1, on_delete=django.db.models.deletion.SET_DEFAULT, to='user.user')),
('product', models.ForeignKey(default=1, on_delete=django.db.models.deletion.SET_DEFAULT, to='product.product')),
],
options={
'db_table': 'reviews',
},
),
migrations.CreateModel(
name='QuestionComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('contents', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='board.question')),
],
options={
'db_table': 'question_comments',
},
),
]
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,765 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /product/migrations/0003_detailedimage_productdescription.py | # Generated by Django 3.1.4 on 2020-12-19 05:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('product', '0002_auto_20201217_2010'),
]
operations = [
migrations.CreateModel(
name='DetailedImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description_image_url', models.URLField(max_length=2000)),
('product_image_url', models.URLField(max_length=2000)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.product')),
],
options={
'db_table': 'detailed_images',
},
),
migrations.CreateModel(
name='ProductDescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('detailed_image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.detailedimage')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.product')),
],
options={
'db_table': 'product_descriptions',
},
),
]
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,766 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /board/models.py | from django.db import models
from user.models import User
from product.models import Product
class Review(models.Model):
author = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
product = models.ForeignKey(Product, on_delete=models.SET_NULL, null=True)
title = models.CharField(max_length=200)
contents = models.TextField()
help_count = models.IntegerField()
hit_count = models.IntegerField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateField(auto_now=True)
image_url = models.URLField(max_length=2000)
class Meta:
db_table = 'reviews'
class Question(models.Model):
author = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
product = models.ForeignKey(Product, on_delete=models.SET_NULL, null=True)
title = models.CharField(max_length=200)
contents = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateField(auto_now=True)
is_private = models.BooleanField(default=False)
class Meta:
db_table = 'questions'
class QuestionComment(models.Model):
title = models.CharField(max_length=200)
contents = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateField(auto_now=True)
question = models.ForeignKey(Question, on_delete=models.SET_NULL, null=True)
class Meta:
db_table = 'question_comments'
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,767 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /order/migrations/0002_auto_20201217_1745.py | # Generated by Django 3.1.4 on 2020-12-17 08:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('product', '0001_initial'),
('order', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='order',
name='cart',
field=models.ManyToManyField(related_name='order_cart_set', through='order.Cart', to='product.Product'),
),
migrations.AlterField(
model_name='cart',
name='order',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.SET_DEFAULT, related_name='cart_order_set', to='order.order'),
),
migrations.DeleteModel(
name='OftenBuying',
),
]
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,768 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /user/migrations/0004_auto_20201217_1957.py | # Generated by Django 3.1.4 on 2020-12-17 10:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user', '0003_auto_20201217_1756'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='often_buying',
),
migrations.AlterField(
model_name='user',
name='account',
field=models.CharField(max_length=45),
),
migrations.AlterField(
model_name='user',
name='birth_date',
field=models.DateField(blank=True, default='2020-12-31'),
preserve_default=False,
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name='user',
name='event_name',
field=models.CharField(blank=True, default='', max_length=100),
preserve_default=False,
),
migrations.AlterField(
model_name='user',
name='gender',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.gender'),
),
migrations.AlterField(
model_name='user',
name='grade',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.grade'),
),
migrations.AlterField(
model_name='user',
name='phone_number',
field=models.CharField(max_length=45),
),
migrations.AlterField(
model_name='user',
name='recommender',
field=models.CharField(blank=True, default='', max_length=45),
preserve_default=False,
),
migrations.AlterField(
model_name='user',
name='terms_and_condition',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.termsandcondition'),
),
migrations.DeleteModel(
name='OftenBuying',
),
]
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,769 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /order/views.py | import json
from datetime import datetime
from django.http import JsonResponse
from django.views import View
from django.db import transaction
from order.models import Cart, Order
from user.models import Address, OftenBuying, User
from product.models import Product
from user.utils import signin_decorator
class CartView(View):
@signin_decorator
@transaction.atomic
def post(self, request):
try:
data = json.loads(request.body)
new_order, flag = Order.objects.get_or_create(
user_id = request.user.id,
status_id = 1,
address_id = Address.objects.get(user_id=request.user.id, is_active=1).id,
payment_method_id = 1,
)
if flag:
new_order.order_number = f"{request.user.id}-{datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f')}"
new_order.save()
new_order_id = new_order.id
if Cart.objects.filter(order_id=new_order_id, product_id=data['product_id']).exists():
product_in_cart = Cart.objects.get(order_id=new_order_id, product_id=data['product_id'])
product_in_cart.quantity += int(data['quantity'])
product_in_cart.save()
return JsonResponse({'MESSAGE': 'SUCCESS'}, status=200)
Cart.objects.create(
order_id = new_order_id,
product_id = data['product_id'],
quantity = data['quantity'],
is_selected = True,
)
return JsonResponse({'MESSAGE': 'SUCCESS'}, status=201)
except KeyError as e:
return JsonResponse({"MESSAGE": "KEY_ERROR => " + e.args[0]}, status=400)
@signin_decorator
def get(self, request):
try:
cart = Cart.objects.filter(order__user_id=request.user.id, order__status=1).prefetch_related("product__discount", "product__packing_type")
items_in_cart = [{
"id" : item.id,
"product_id" : item.product.id,
"name" : item.product.name,
"quantity" : item.quantity,
"price" : item.product.price,
"discount_rate" : float(item.product.discount.percentage),
"is_soldout" : item.product.is_soldout,
"cart_packing_type" : item.product.packing_type.cart_packing_type,
"image_url" : item.product.image_url,
"selected" : item.is_selected,
}for item in cart]
return JsonResponse({"MESSAGE": "SUCCESS", "items_in_cart": items_in_cart}, status=200)
except Cart.DoesNotExist:
return JsonResponse({"MESSAGE": "SUCCESS", "items_in_cart":[]}, status=200)
@signin_decorator
def delete(self, request):
try:
data = json.loads(request.body)
item = Cart.objects.get(id=data['cart_item_id'])
item.delete()
return JsonResponse({'MESSAGE': 'SUCCESS'}, status=204)
except KeyError as e:
return JsonResponse({"MESSAGE": "KEY_ERROR => " + e.args[0]}, status=400)
except Cart.DoesNotExist:
return JsonResponse({"MESSAGE": "ITEM_DOES_NOT_EXIST"}, status=400)
@signin_decorator
def patch(self, request):
try:
data = json.loads(request.body)
delta = data.get('delta')
select = data.get('select')
cart_item = Cart.objects.get(id=data['cart_item_id'])
if delta == "minus":
if cart_item.quantity == 1:
return JsonResponse({'MESSAGE': 'ITEM QUANTITY IS 1'}, status=400)
cart_item.quantity -= 1
elif delta == "plus":
cart_item.quantity += 1
elif select == "True":
cart_item.is_selected = True
elif select == "False":
cart_item.is_selected = False
else:
return JsonResponse({"MESSAGE": "KEY_ERROR => IMPROPER delta OR select"}, status=400)
cart_item.save()
return JsonResponse({'MESSAGE': 'SUCCESS'}, status=201)
except KeyError as e:
return JsonResponse({"MESSAGE": "KEY_ERROR => " + e.args[0]}, status=400)
except Cart.DoesNotExist as e:
return JsonResponse({"MESSAGE": "KEY_ERROR => " + e.args[0]}, status=400)
class OftenBuyingView(View):
@signin_decorator
def post(self, request):
try:
data = json.loads(request.body)
if not Product.objects.filter(id=data['product_id']).exists():
return JsonResponse({'MESSAGE': 'PRODUCT_DOES_NOT_EXIST'}, status=400)
if OftenBuying.objects.filter(user_id=request.user.id, product_id=data['product_id']).exists():
return JsonResponse({'MESSAGE': 'PRODUCT_ALREADY_EXIST_IN_OFTEN_BUYING'}, status=400)
OftenBuying.objects.create(
user_id = request.user.id,
product_id = data['product_id'],
)
return JsonResponse({'MESSAGE': 'SUCCESS'}, status=201)
except KeyError as e:
return JsonResponse({"MESSAGE": "KEY_ERROR => " + str(e.args[0])}, status=400)
@signin_decorator
def get(self, request):
try:
user = User.objects.get(id=request.user.id)
often_buying = user.oftenbuying_set.all().select_related('product')
items_in_often_buying = [{
"id" : item.id,
"product_id" : item.product.id,
"name" : item.product.name,
"price" : item.product.price,
"image_url" : item.product.image_url,
}for item in often_buying]
return JsonResponse({"MESSAGE": "SUCCESS", "items_in_often_buying": items_in_often_buying}, status=200)
except OftenBuying.DoesNotExist:
return JsonResponse({"MESSAGE": "SUCCESS", "items_in_cart":[]}, status=200)
@signin_decorator
def delete(self, request):
try:
data = json.loads(request.body)
item = OftenBuying.objects.get(id=data['often_buying_item_id'])
item.delete()
return JsonResponse({'MESSAGE': 'SUCCESS'}, status=204)
except KeyError as e:
return JsonResponse({"MESSAGE": "KEY_ERROR => " + e.args[0]}, status=400)
except OftenBuying.DoesNotExist:
return JsonResponse({"MESSAGE": "ITEM_DOES_NOT_EXIST"}, status=400)
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,770 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /order/models.py | from django.db import models
class Order(models.Model):
user = models.ForeignKey("user.User", on_delete=models.PROTECT)
order_number = models.CharField(max_length=45)
status = models.ForeignKey("OrderStatus", on_delete=models.PROTECT)
address = models.ForeignKey("user.Address", on_delete=models.PROTECT)
payment_method = models.ForeignKey("PaymentMethod", on_delete=models.PROTECT)
cart = models.ManyToManyField("product.Product", through="Cart", related_name="order_cart_set")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateField(auto_now=True)
class Meta:
db_table = 'orders'
class OrderStatus(models.Model):
name = models.CharField(max_length=45)
class Meta:
db_table = 'order_status'
class PaymentMethod(models.Model):
name = models.CharField(max_length=45)
class Meta:
db_table = 'payment_method'
class Cart(models.Model):
order = models.ForeignKey(Order, on_delete=models.SET_NULL, null=True, related_name="cart_order_set")
product = models.ForeignKey("product.Product", on_delete=models.SET_NULL, null=True)
quantity = models.IntegerField(default=1)
is_selected = models.BooleanField(null=True, default=False)
class Meta:
db_table = 'carts'
class Coupons(models.Model):
name = models.CharField(max_length=45)
discount_percentage = models.DecimalField(max_digits=5, decimal_places=2)
user = models.ForeignKey("user.User", on_delete=models.CASCADE)
class Meta:
db_table = 'coupons'
| {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,771 | wecode-bootcamp-korea/15-1st-MarketBully-backend | refs/heads/master | /order/urls.py | from django.urls import path
from .views import CartView, OftenBuyingView
urlpatterns = [
path('/cart', CartView.as_view()),
path('/often_buying', OftenBuyingView.as_view()),
] | {"/board/views.py": ["/board/models.py", "/user/utils.py"], "/board/urls.py": ["/board/views.py"], "/user/utils.py": ["/user/models.py"], "/user/urls.py": ["/user/views.py"], "/product/urls.py": ["/product/views.py"], "/product/views.py": ["/product/models.py"], "/user/models.py": ["/product/models.py"], "/user/views.py": ["/user/models.py", "/user/utils.py"], "/board/models.py": ["/user/models.py", "/product/models.py"], "/order/views.py": ["/order/models.py", "/user/models.py", "/product/models.py", "/user/utils.py"], "/order/urls.py": ["/order/views.py"]} |
65,805 | ajayiezekiel/linuxjobber | refs/heads/master | /ajayiezekiel9000scrumy/urls.py | from django.urls import path
from ajayiezekiel9000scrumy import views
urlpatterns= [
path('', views.get_grading_parameters, name="get_grading_parameters"),
path('movegoal/<int:goal_id>', views.move_goal, name="movegoal" ),
path('addgoal/', views.add_goal, name="addgoal"),
path('home/', views.home, name="home"),
] | {"/ajayiezekiel9000scrumy/views.py": ["/ajayiezekiel9000scrumy/models.py"]} |
65,806 | ajayiezekiel/linuxjobber | refs/heads/master | /ajayiezekiel9000scrumy/migrations/0002_auto_20200515_2252.py | # Generated by Django 3.0.6 on 2020-05-15 21:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ajayiezekiel9000scrumy', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='goalstatus',
name='status_name',
field=models.CharField(max_length=150),
),
]
| {"/ajayiezekiel9000scrumy/views.py": ["/ajayiezekiel9000scrumy/models.py"]} |
65,807 | ajayiezekiel/linuxjobber | refs/heads/master | /django-ajayiezekiel9000scrumy/ajayiezekiel9000scrumy/urls.py | from django.urls import path
from ajayiezekiel9000scrumy import views
urlpatterns= [
path('', views.get_grading_parameters, name="get_grading_parameters")
] | {"/ajayiezekiel9000scrumy/views.py": ["/ajayiezekiel9000scrumy/models.py"]} |
65,808 | ajayiezekiel/linuxjobber | refs/heads/master | /django-ajayiezekiel9000scrumy/ajayiezekiel9000scrumy/views.py | from django.http import HttpResponse
def get_grading_parameters(request):
return HttpResponse("Welcome to Django") | {"/ajayiezekiel9000scrumy/views.py": ["/ajayiezekiel9000scrumy/models.py"]} |
65,809 | ajayiezekiel/linuxjobber | refs/heads/master | /ajayiezekiel9000scrumy/views.py | from django.http import HttpResponse
from django.shortcuts import render
from django.contrib.auth.models import User
from .models import GoalStatus, ScrumyGoals, ScrumyHistory
import random
def get_grading_parameters(request):
goal = ScrumyGoals.objects.get(pk=1)
return HttpResponse(goal)
def move_goal(request, goal_id):
dic = "A record with that goal id does not exist"
try:
obj1 = ScrumyGoals.objects.get(pk = goal_id)
except Exception as e:
return render(request, 'ajayiezekiel9000scrumy/exception.html', {'error': dic})
else:
return HttpResponse(obj1.goal_name)
def add_goal(request):
weekly = GoalStatus.objects.get(status_name='Weekly Goal')
the_user = User.objects.get(username='louis')
sample_dict = {}
number = random.randint(1000,9999)
if number not in sample_dict:
ScrumyGoals.objects.create(goal_name='Keep Learning Django',
goal_id=number,
created_by='Louis',
moved_by='Louis',
owner='Louis',
goal_status=weekly,
user=the_user
)
sample_dict[number] = number
goal = ScrumyGoals.objects.get(pk=number)
return HttpResponse(goal)
def home(request):
goals = ScrumyGoals.objects.all()
return render(request, 'ajayiezekiel9000scrumy/home.html',
{'goals': goals})
| {"/ajayiezekiel9000scrumy/views.py": ["/ajayiezekiel9000scrumy/models.py"]} |
65,810 | ajayiezekiel/linuxjobber | refs/heads/master | /ajayiezekiel9000scrumy/models.py | from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
class GoalStatus(models.Model):
status_name = models.CharField(max_length = 150)
def __str__(self):
return self.status_name
class ScrumyGoals(models.Model):
goal_name = models.CharField(max_length = 250)
goal_id = models.IntegerField(default = 1)
created_by = models.CharField(max_length = 250)
moved_by = models.CharField(max_length = 250)
owner = models.CharField(max_length = 250)
goal_status = models.ForeignKey(GoalStatus, on_delete = models.PROTECT)
user = models.ForeignKey(User, related_name='player', on_delete = models.PROTECT)
def __str__(self):
return self.goal_name
class ScrumyHistory(models.Model):
moved_by = models.CharField(max_length = 250)
created_by = models.CharField(max_length = 250)
moved_from = models.CharField(max_length = 250)
moved_to = models.CharField(max_length = 250)
time_of_action = models.DateTimeField(default = timezone.now)
goal = models.ForeignKey(ScrumyGoals, on_delete = models.CASCADE)
def __str__(self):
return self.created_by
| {"/ajayiezekiel9000scrumy/views.py": ["/ajayiezekiel9000scrumy/models.py"]} |
65,813 | luoheng/TCssrbm | refs/heads/master | /CrossCorrelation.py | import numpy
class CrossCorrelation(object):
def __init__(self, test_image, samples, window_size, seed=98987, n_patches_of_samples=0):
"""
self.test_image = numpy.asarray((test_image - test_image.min()) / \
(test_image.max() - test_image.min() + 1e-6))
self.samples = numpy.asarray((samples - samples.min()) / \
(samples.max() - samples.min() + 1e-6))
"""
self.test_image = test_image
self.samples = samples
#assert self.test_image.dtype == self.samples.dtype
self.window_size = window_size
self.rng = numpy.random.RandomState(seed)
n_samples,channels_samples,rows_samples,cols_samples = self.samples.shape
channels_test,rows_test,cols_test = self.test_image.shape
assert channels_test == channels_samples
assert rows_test >= window_size
assert cols_test >= window_size
assert rows_samples >= window_size
assert cols_samples >= window_size
if rows_samples>window_size or cols_samples>window_size:
assert n_patches_of_samples>0
self.patches = numpy.zeros((n_samples,n_patches_of_samples,channels_samples,window_size,window_size),self.samples.dtype)
for samples_index in xrange(n_samples):
offsets_row = self.rng.randint(rows_samples-window_size+1, size=n_patches_of_samples)
offsets_col = self.rng.randint(cols_samples-window_size+1, size=n_patches_of_samples)
for n, (r,c) in enumerate(zip(offsets_row, offsets_col)):
temp_patch= self.samples[samples_index,:,r:r+window_size,c:c+window_size]
temp_patch = temp_patch/numpy.sqrt((temp_patch**2).sum())
self.patches[samples_index,n,:,:,:] = temp_patch
else:
self.patches = numpy.zeros((n_samples,1,channels_samples,rows_samples,cols_samples))
for samples_index in xrange(n_samples):
temp_patch = self.samples[samples_index,]
self.patches[samples_index,0,] = temp_patch/numpy.sqrt((temp_patch**2).sum())
def NCC(self):
channels_test,rows_test,cols_test = self.test_image.shape
n_samples,n_patches,channels_patches,rows_patches,cols_patches = self.patches.shape
#rc_test_img = numpy.zeros((rows_test-rows_patches+1,cols_test-cols_patches+1,channels_test,rows_patches,cols_patches))
#for row_index in xrange(rows_test-rows_patches+1):
# for col_index in xrange(cols_test-cols_patches+1):
# temp_patch = self.test_image[:,row_index:row_index+rows_patches,col_index:col_index+cols_patches]
# rc_test_img[row_index,col_index,] = temp_patch/numpy.sqrt((temp_patch**2).sum())
value_NCC = numpy.zeros((n_samples,n_patches,rows_test-rows_patches+1,cols_test-cols_patches+1))
for samples_index in xrange(n_samples):
for n_patches_index in xrange(n_patches):
rc_patch = self.patches[samples_index,n_patches_index,]
for row_index in xrange(rows_test-rows_patches+1):
for col_index in xrange(cols_test-cols_patches+1):
temp_patch = self.test_image[:,row_index:row_index+rows_patches,col_index:col_index+cols_patches]
temp_patch = temp_patch/numpy.sqrt((temp_patch**2).sum())
value_NCC[samples_index,n_patches_index,row_index,col_index] = numpy.dot(
rc_patch.reshape(1,channels_patches*rows_patches*cols_patches),
temp_patch.reshape(1,channels_patches*rows_patches*cols_patches).T)
self.value_NCC = value_NCC
return value_NCC
def TSS(self):
try:
value_NCC = self.value_NCC
except:
value_NCC = self.NCC()
return numpy.amax(numpy.amax(numpy.amax(value_NCC,1),1),1)
def NCC(test_imgs,inpainted_imgs):
"""
test_imgs = numpy.asarray((test_imgs - test_imgs.min()) / \
(test_imgs.max() - test_imgs.min() + 1e-6))
inpainted_imgs = numpy.asarray((inpainted_imgs - inpainted_imgs.min()) / \
(inpainted_imgs.max() - inpainted_imgs.min() + 1e-6))
"""
n_samples, n_channels, n_test_rows, n_test_cols = test_imgs.shape
n_samples_, n_channels_, n_samples_rows_, n_samples_cols_ = inpainted_imgs.shape
assert n_samples==n_samples_
assert n_channels==n_channels_
assert n_test_rows==n_samples_rows_
assert n_test_cols==n_samples_cols_
value_NCC = numpy.zeros((n_samples,))
for ii in xrange(n_samples):
tmp_test = test_imgs[ii,:,:,:]
tmp_test = tmp_test/numpy.sqrt((tmp_test**2).sum())
tmp_inpainted = inpainted_imgs[ii,:,:,:]
tmp_inpainted = tmp_inpainted/numpy.sqrt((tmp_inpainted**2).sum())
value_NCC[ii] = numpy.dot(
tmp_test.reshape(1,n_channels*n_test_rows*n_test_cols),
tmp_inpainted.reshape(1,n_channels_*n_samples_rows_*n_samples_cols_).T)
return value_NCC
| {"/test_NCC.py": ["/CrossCorrelation.py"]} |
65,814 | luoheng/TCssrbm | refs/heads/master | /test_CrossCorrelation.py | import numpy
import theano
from CrossCorrelation import CrossCorrelation
floatX=theano.config.floatX
arrayX = lambda X : numpy.asarray(X, dtype=floatX)
def rand(shp, dtype=floatX):
return numpy.random.rand(*shp).astype(dtype)
test_image_shp = (1,4,4)
n_sample=2
n_patches_of_samples=1
samples_shp = (n_sample,n_patches_of_samples,2,2)
window_size=2
seed=0
test_image = arrayX(numpy.ones(test_image_shp))
samples = rand(samples_shp)
print 'test_image'
print test_image
print 'samples'
print samples
print samples.shape
CC = CrossCorrelation(test_image,samples,window_size,n_patches_of_samples=4)
print CC.patches
print CC.patches.shape
NCC_value = CC.NCC()
print NCC_value
for n in xrange(n_sample):
print ((samples[n,0:window_size,0:window_size]/numpy.sqrt((samples[n,0,0:window_size,0:window_size]**2).sum()))*0.5).sum()
#assert (samples[0,0,:,:]/numpy.sqrt((samples[0,0,:,:]**2).sum())*0.5).sum() == NCC_value[0,0]
| {"/test_NCC.py": ["/CrossCorrelation.py"]} |
65,815 | luoheng/TCssrbm | refs/heads/master | /test_NCC.py | from CrossCorrelation import NCC
import numpy
test_img = numpy.random.randn(2,2)
inpaited_img = numpy.random.randn(2,2)
print | {"/test_NCC.py": ["/CrossCorrelation.py"]} |
65,816 | luoheng/TCssrbm | refs/heads/master | /tests/test_unshared_conv_diagonally.py | import sys
import unittest
import pdb
import numpy
import theano
from theano.tests.unittest_tools import verify_grad
from unshared_conv_diagonally import FilterActs
from unshared_conv_diagonally import WeightActs
from unshared_conv_diagonally import ImgActs
def rand(shp, dtype):
# return numpy.ones(shp, dtype=dtype)
# return numpy.arange(numpy.prod(shp)).reshape(shp).astype(dtype)
return numpy.random.rand(*shp).astype(dtype)
def assert_linear(f, pt, mode=None):
t = theano.tensor.scalar(dtype=pt.dtype)
ptlike = theano.shared(rand(
pt.get_value(borrow=True).shape,
dtype=pt.dtype))
out = f(pt)
out2 = f(pt * t)
out3 = f(ptlike) + out
out4 = f(pt + ptlike)
f = theano.function([t], [out * t, out2, out3, out4],
allow_input_downcast=True,
mode=mode)
outval, out2val, out3val, out4val = f(3.6)
assert numpy.allclose(outval, out2val)
assert numpy.allclose(out3val, out4val)
class TestFilterActs(unittest.TestCase):
# Global test variables (may be extended to include more tests)
#Each item in ishape_list : (icount, icolors, irows, icols)
ishape_list = [(1, 1, 4, 4), (2, 1, 4, 4),
(1, 2, 4, 4), (1, 1, 4, 4),
(2, 3, 24, 24), (2, 3, 20, 20),
(2, 3, 48, 48), (20, 1, 98, 98)]
#Each item in fshapes_list = (fmodules, filters_per_module,
# fcolors, frows, fcols)
fshape_list = [(1, 1, 1, 2, 2), (1, 1, 1, 2, 2),
(1, 1, 2, 2, 2), (1, 4, 1, 2, 2),
(1, 1, 3, 6, 6), (3, 2, 3, 6, 6),
(5, 32, 3, 11, 11), (11, 32, 1, 11, 11)]
# Each item in hshapes_list = (hcount, fmodules, filter_per_module,
# hrows, hcols)
hshape_list = [(1, 1, 1, 2, 2), (2, 1, 1, 2, 2),
(1, 1, 1, 2, 2), (1, 1, 4, 2, 2),
(2, 1, 1, 4, 4), (2, 3, 2, 3, 3),
(2, 5, 32, 4, 4), (20, 11, 32, 8, 8)]
module_stride = 1
dtype = 'float64'
mode = theano.compile.get_default_mode()
nbTests = len(ishape_list)
# Utility functions
def ishape(self, i):
return self.ishape_list[i]
def fshape(self, i):
return self.fshape_list[i]
def hshape(self, i):
return self.hshape_list[i]
def function(self, inputs, outputs):
return theano.function(inputs, outputs, mode=self.mode)
def setUp(self):
self.op = FilterActs(self.module_stride)
for i in range(self.nbTests):
self.s_images_list = [theano.shared(rand(ishape, self.dtype))
for ishape in self.ishape_list]
self.s_filters_list = [theano.shared(rand(fshape, self.dtype))
for fshape in self.fshape_list]
# Test cases
def test_type(self):
for i in range(self.nbTests):
print i, self.ishape_list[i], self.fshape_list[i]
out = self.op(self.s_images_list[i], self.s_filters_list[i])
assert out.dtype == self.dtype
assert out.ndim == 5
f = self.function([], out)
outval = f()
assert outval.shape == self.hshape(i)
assert outval.dtype == self.s_images_list[i].get_value(
borrow=True).dtype
def test_linearity_images(self):
for i in range(self.nbTests):
print i, self.ishape_list[i], self.fshape_list[i]
assert_linear(
lambda imgs: self.op(imgs, self.s_filters_list[i]),
self.s_images_list[i],
mode=self.mode)
def test_linearity_filters(self):
for i in range(self.nbTests):
assert_linear(
lambda fts: self.op(self.s_images_list[i], fts),
self.s_filters_list[i],
mode=self.mode)
def test_shape(self):
for i in range(self.nbTests):
out = self.op(self.s_images_list[i], self.s_filters_list[i])
f = self.function([], out)
outval = f()
assert outval.shape == self.hshape(i)
def test_grad_left(self):
for i in range(self.nbTests - 2):
print i, self.ishape_list[i], self.fshape_list[i]
# test only the left so that the right can be a shared variable,
# (for tests on the GPU)
def left_op(imgs):
return self.op(imgs, self.s_filters_list[i])
try:
verify_grad(left_op, [self.s_images_list[i].get_value()],
mode=self.mode, eps=9e-4)
except verify_grad.E_grad, e:
raise
print e.num_grad.gf
print e.analytic_grad
raise
def test_grad_right(self):
for i in range(self.nbTests - 2):
# test only the right so that the left can be a shared variable,
# (for tests on the GPU)
def right_op(filters):
return self.op(self.s_images_list[i], filters)
try:
verify_grad(right_op, [self.s_filters_list[i].get_value()],
mode=self.mode, eps=3e-4)
except verify_grad.E_grad, e:
raise
print e.num_grad.gf
print e.analytic_grad
raise
def test_dtype_mismatch(self):
for i in range(self.nbTests):
self.assertRaises(TypeError,
self.op,
theano.tensor.cast(self.s_images_list[i], 'float32'),
theano.tensor.cast(self.s_filters_list[i], 'float64'))
self.assertRaises(TypeError,
self.op,
theano.tensor.cast(self.s_images_list[i], 'float64'),
theano.tensor.cast(self.s_filters_list[i], 'float32'))
def test_op_eq(self):
assert FilterActs(1) == FilterActs(1)
assert not (FilterActs(1) != FilterActs(1))
assert (FilterActs(2) != FilterActs(1))
assert FilterActs(1) != None
class TestFilterActsF32(TestFilterActs):
dtype = 'float32'
class TestWeightActs(unittest.TestCase):
# Global test variables (may be extended to include more tests)
#Each item in ishape_list : (icount, icolors, irows, icols)
ishape_list = [(1, 1, 4, 4), (2, 1, 4, 4),
(1, 2, 4, 4), (1, 1, 4, 4),
(2, 3, 24, 24), (2, 3, 20, 20),
(2, 3, 48, 48), (20, 1, 98, 98)]
#Each item in fshapes_list = (fmodules, filters_per_module,
# fcolors, frows, fcols)
fshape_list = [(1, 1, 1, 2, 2), (1, 1, 1, 2, 2),
(1, 1, 2, 2, 2), (1, 4, 1, 2, 2),
(1, 1, 3, 6, 6), (3, 2, 3, 6, 6),
(5, 32, 3, 11, 11), (11, 32, 1, 11, 11)]
# Each item in hshapes_list = (hcount, fmodules, filter_per_module,
# hrows, hcols)
hshape_list = [(1, 1, 1, 2, 2), (2, 1, 1, 2, 2),
(1, 1, 1, 2, 2), (1, 1, 4, 2, 2),
(2, 1, 1, 4, 4), (2, 3, 2, 3, 3),
(2, 5, 32, 4, 4), (20, 11, 32, 8, 8)]
module_stride = 1
dtype = 'float64'
nbTests = len(ishape_list)
# Utility functions
def ishape(self, i):
return self.ishape_list[i]
def fshape(self, i):
return self.fshape_list[i]
def frows(self, i):
return self.fshape_list[i][3]
def fcols(self, i):
return self.fshape_list[i][4]
def hshape(self, i):
return self.hshape_list[i]
def setUp(self):
self.op = WeightActs(self.module_stride)
for i in range(self.nbTests):
self.s_images_list = [theano.shared(rand(ishape, self.dtype))
for ishape in self.ishape_list]
self.s_hidacts_list = [theano.shared(rand(hshape, self.dtype))
for hshape in self.hshape_list]
# Test cases
def test_type(self):
for i in range(self.nbTests):
print i, self.ishape_list[i], self.s_hidacts_list[i]
out = self.op(self.s_images_list[i], self.s_hidacts_list[i],
self.frows(i), self.fcols(i))
assert out.dtype == self.dtype
assert out.ndim == 5
f = theano.function([], out)
outval = f()
assert outval.shape == self.fshape(i)
assert outval.dtype == self.dtype
def test_linearity_images(self):
for i in range(self.nbTests):
def f(images):
return self.op(images, self.s_hidacts_list[i],
self.frows(i), self.fcols(i))
assert_linear(f, self.s_images_list[i])
def test_linearity_hidacts(self):
for i in range(self.nbTests):
def f(hidacts):
return self.op(self.s_images_list[i], hidacts,
self.frows(i), self.fcols(i))
assert_linear(f, self.s_hidacts_list[i])
def test_grad(self):
for i in range(self.nbTests - 2):
def op2(imgs, hids):
return self.op(imgs, hids, self.frows(i), self.fcols(i))
try:
verify_grad(op2,
[self.s_images_list[i].get_value(),
self.s_hidacts_list[i].get_value()])
except verify_grad.E_grad, e:
print e.num_grad.gf
print e.analytic_grad
raise
def test_dtype_mismatch(self):
for i in range(self.nbTests):
self.assertRaises(TypeError,
self.op,
theano.tensor.cast(self.s_images_list[i], 'float32'),
theano.tensor.cast(self.s_hidacts_list[i], 'float64'),
self.frows, self.fcols)
self.assertRaises(TypeError,
self.op,
theano.tensor.cast(self.s_images_list[i], 'float64'),
theano.tensor.cast(self.s_hidacts_list[i], 'float32'),
self.frows, self.fcols)
class TestWeightActsF32(TestWeightActs):
dtype = 'float32'
class TestImgActs(unittest.TestCase):
# Global test variables (may be extended to include more tests)
#Each item in ishape_list : (icount, icolors, irows, icols)
ishape_list = [(1, 1, 4, 4), (2, 1, 4, 4),
(1, 2, 4, 4), (1, 1, 4, 4),
(2, 3, 24, 24), (2, 3, 20, 20),
(2, 3, 48, 48), (20, 1, 98, 98)]
#Each item in fshapes_list = (fmodules, filters_per_module,
# fcolors, frows, fcols)
fshape_list = [(1, 1, 1, 2, 2), (1, 1, 1, 2, 2),
(1, 1, 2, 2, 2), (1, 4, 1, 2, 2),
(1, 1, 3, 6, 6), (3, 2, 3, 6, 6),
(5, 32, 3, 11, 11), (11, 32, 1, 11, 11)]
# Each item in hshapes_list = (hcount, fmodules, filter_per_module,
# hrows, hcols)
hshape_list = [(1, 1, 1, 2, 2), (2, 1, 1, 2, 2),
(1, 1, 1, 2, 2), (1, 1, 4, 2, 2),
(2, 1, 1, 4, 4), (2, 3, 2, 3, 3),
(2, 5, 32, 4, 4), (20, 11, 32, 8, 8)]
module_stride = 1
dtype = 'float64'
nbTests = len(ishape_list)
# Utility functions
def ishape(self, i):
return self.ishape_list[i]
def irows(self, i):
return self.ishape_list[i][2]
def icols(self, i):
return self.ishape_list[i][3]
def fshape(self, i):
return self.fshape_list[i]
def hshape(self, i):
return self.hshape_list[i]
def setUp(self):
self.op = ImgActs(module_stride=self.module_stride)
self.s_filters_list = [theano.shared(rand(fshape, self.dtype))
for fshape in self.fshape_list]
self.s_hidacts_list = [theano.shared(rand(hshape, self.dtype))
for hshape in self.hshape_list]
# Test Cases
def test_type(self):
for i in range(self.nbTests):
out = self.op(self.s_filters_list[i], self.s_hidacts_list[i],
self.irows(i), self.icols(i))
assert out.dtype == self.dtype
assert out.ndim == 4
f = theano.function([], out)
outval = f()
assert outval.shape == self.ishape(i)
assert outval.dtype == self.dtype
def test_linearity_filters(self):
for i in range(self.nbTests):
def f(filts):
return self.op(filts, self.s_hidacts_list[i],
self.irows(i), self.icols(i))
assert_linear(f, self.s_filters_list[i])
def test_linearity_hidacts(self):
for i in range(self.nbTests):
def f(hidacts):
return self.op(self.s_filters_list[i], hidacts,
self.irows(i), self.icols(i))
assert_linear(f, self.s_hidacts_list[i])
def test_grad(self):
for i in range(self.nbTests - 2):
def op2(imgs, hids):
return self.op(imgs, hids, self.irows(i), self.icols(i))
try:
verify_grad(op2,
[self.s_filters_list[i].get_value(),
self.s_hidacts_list[i].get_value()])
except verify_grad.E_grad, e:
print e.num_grad.gf
print e.analytic_grad
raise
def test_dtype_mismatch(self):
for i in range(self.nbTests):
self.assertRaises(TypeError,
self.op,
theano.tensor.cast(self.s_filters_list[i], 'float32'),
theano.tensor.cast(self.s_hidacts_list[i], 'float64'),
self.irows(i), self.icols(i))
self.assertRaises(TypeError,
self.op,
theano.tensor.cast(self.s_filters_list[i], 'float64'),
theano.tensor.cast(self.s_hidacts_list[i], 'float32'),
self.irows(i), self.icols(i))
class TestImgActsF32(TestImgActs):
dtype = 'float32'
| {"/test_NCC.py": ["/CrossCorrelation.py"]} |
65,817 | luoheng/TCssrbm | refs/heads/master | /TCrbm.py | """
This file extends the mu-ssRBM for tiled-convolutional training
"""
import cPickle, pickle
import numpy
numpy.seterr('warn') #SHOULD NOT BE IN LIBIMPORT
from PIL import Image
import theano
from theano import tensor
from theano.tensor import nnet,grad
from pylearn.io import image_tiling
from pylearn.algorithms.mcRBM import (
contrastive_cost, contrastive_grad)
import pylearn.gd.sgd
import sys
from unshared_conv_diagonally import FilterActs
from unshared_conv_diagonally import WeightActs
from unshared_conv_diagonally import ImgActs
from Brodatz import Brodatz_op
#import scipy.io
import os
_temp_data_path_ = '.'#'/Tmp/luoheng'
if 1:
print 'WARNING: using SLOW rng'
RandomStreams = tensor.shared_randomstreams.RandomStreams
else:
import theano.sandbox.rng_mrg
RandomStreams = theano.sandbox.rng_mrg.MRG_RandomStreams
floatX=theano.config.floatX
sharedX = lambda X, name : theano.shared(numpy.asarray(X, dtype=floatX),
name=name)
def Toncv(image,filters,module_stride=1):
op = FilterActs(module_stride)
return op(image,filters)
def Tdeconv(filters, hidacts, irows, icols, module_stride=1):
op = ImgActs(module_stride)
return op(filters, hidacts, irows, icols)
def unnatural_sgd_updates(params, grads, stepsizes, tracking_coef=0.1, epsilon=1):
grad_means = [theano.shared(numpy.zeros_like(p.get_value(borrow=True)))
for p in params]
grad_means_sqr = [theano.shared(numpy.ones_like(p.get_value(borrow=True)))
for p in params]
updates = dict()
for g, gm, gms, p, s in zip(
grads, grad_means, grad_means_sqr, params, stepsizes):
updates[gm] = tracking_coef * g + (1-tracking_coef) * gm
updates[gms] = tracking_coef * g*g + (1-tracking_coef) * gms
var_g = gms - gm**2
# natural grad doesn't want sqrt, but i found it worked worse
updates[p] = p - s * gm / tensor.sqrt(var_g+epsilon)
return updates
"""
def grad_updates(params, grads, stepsizes):
grad_means = [theano.shared(numpy.zeros_like(p.get_value(borrow=True)))
for p in params]
grad_means_sqr = [theano.shared(numpy.ones_like(p.get_value(borrow=True)))
for p in params]
updates = dict()
for g, p, s in zip(
grads, params, stepsizes):
updates[p] = p - s*g
return updates
"""
def safe_update(a, b):
for k,v in dict(b).iteritems():
if k in a:
raise KeyError(k)
a[k] = v
return a
def most_square_shape(N):
"""rectangle (height, width) with area N that is closest to sqaure
"""
for i in xrange(int(numpy.sqrt(N)),0, -1):
if 0 == N % i:
return (i, N/i)
def tile_conv_weights(w,flip=False, scale_each=False):
"""
Return something that can be rendered as an image to visualize the filters.
"""
#if w.shape[1] != 3:
# raise NotImplementedError('not rgb', w.shape)
if w.shape[2] != w.shape[3]:
raise NotImplementedError('not square', w.shape)
if w.shape[1] == 1:
wmin, wmax = w.min(), w.max()
if not scale_each:
w = numpy.asarray(255 * (w - wmin) / (wmax - wmin + 1e-6), dtype='uint8')
trows, tcols= most_square_shape(w.shape[0])
outrows = trows * w.shape[2] + trows-1
outcols = tcols * w.shape[3] + tcols-1
out = numpy.zeros((outrows, outcols), dtype='uint8')
#tr_stride= 1+w.shape[1]
for tr in range(trows):
for tc in range(tcols):
# this is supposed to flip the filters back into the image
# coordinates as well as put the channels in the right place, but I
# don't know if it really does that
tmp = w[tr*tcols+tc,
0,
::-1 if flip else 1,
::-1 if flip else 1]
if scale_each:
tmp = numpy.asarray(255*(tmp - tmp.min()) / (tmp.max() - tmp.min() + 1e-6),
dtype='uint8')
out[tr*(1+w.shape[2]):tr*(1+w.shape[2])+w.shape[2],
tc*(1+w.shape[3]):tc*(1+w.shape[3])+w.shape[3]] = tmp
return out
wmin, wmax = w.min(), w.max()
if not scale_each:
w = numpy.asarray(255 * (w - wmin) / (wmax - wmin + 1e-6), dtype='uint8')
trows, tcols= most_square_shape(w.shape[0])
outrows = trows * w.shape[2] + trows-1
outcols = tcols * w.shape[3] + tcols-1
out = numpy.zeros((outrows, outcols,3), dtype='uint8')
tr_stride= 1+w.shape[1]
for tr in range(trows):
for tc in range(tcols):
# this is supposed to flip the filters back into the image
# coordinates as well as put the channels in the right place, but I
# don't know if it really does that
tmp = w[tr*tcols+tc].transpose(1,2,0)[
::-1 if flip else 1,
::-1 if flip else 1]
if scale_each:
tmp = numpy.asarray(255*(tmp - tmp.min()) / (tmp.max() - tmp.min() + 1e-6),
dtype='uint8')
out[tr*(1+w.shape[2]):tr*(1+w.shape[2])+w.shape[2],
tc*(1+w.shape[3]):tc*(1+w.shape[3])+w.shape[3]] = tmp
return out
class RBM(object):
"""
Light-weight class that provides math related to inference in Gaussian RBM
Attributes:
- v_shape - the input image shape (ie. n_imgs, n_chnls, n_img_rows, n_img_cols)
- n_conv_hs - the number of spike and slab hidden units
- filters_hs_shape - the kernel filterbank shape for hs units
- filters_h_shape - the kernel filterbank shape for h units
- filters_hs - a tensor with shape (n_conv_hs,n_chnls,n_ker_rows, n_ker_cols)
- conv_bias_hs - a vector with shape (n_conv_hs, n_out_rows, n_out_cols)
- subsample_hs - how to space the receptive fields (dx,dy)
- n_global_hs - how many globally-connected spike and slab units
- weights_hs - global weights
- global_bias_hs -
- _params a list of the attributes that are shared vars
The technique of combining convolutional and global filters to account for border effects is
borrowed from (Alex Krizhevsky, TR?, October 2010).
"""
def __init__(self, **kwargs):
print 'init rbm'
self.__dict__.update(kwargs)
@classmethod
def alloc(cls,
conf,
image_shape, # input dimensionality
filters_hs_shape,
filters_irange,
sigma,
seed = 8923402,
):
rng = numpy.random.RandomState(seed)
self = cls()
n_images, n_channels, n_img_rows, n_img_cols = image_shape
n_filters_hs_modules, n_filters_hs_per_modules, fcolors, n_filters_hs_rows, n_filters_hs_cols = filters_hs_shape
assert fcolors == n_channels
self.sigma = sigma
self.v_shape = image_shape
print 'v_shape'
print self.v_shape
self.filters_hs_shape = filters_hs_shape
print 'self.filters_hs_shape'
print self.filters_hs_shape
self.out_conv_hs_shape = FilterActs.infer_shape_without_instance(self.v_shape,self.filters_hs_shape)
print 'self.out_conv_hs_shape'
print self.out_conv_hs_shape
conv_bias_hs_shape = (n_filters_hs_modules,n_filters_hs_per_modules)
self.conv_bias_hs_shape = conv_bias_hs_shape
print 'self.conv_bias_hs_shape'
print self.conv_bias_hs_shape
bias_v_shape = self.v_shape[1:]
self.bias_v_shape = bias_v_shape
print 'self.bias_v_shape'
print self.bias_v_shape
self.filters_hs = sharedX(rng.randn(*filters_hs_shape) * filters_irange , 'filters_hs')
#conv_bias_ival = rng.rand(*conv_bias_hs_shape)*2-1
#conv_bias_ival *= conf['conv_bias_irange']
#conv_bias_ival += conf['conv_bias0']
conv_bias_ival = numpy.zeros(conv_bias_hs_shape)
self.conv_bias_hs = sharedX(conv_bias_ival, name='conv_bias_hs')
self.bias_v = sharedX(numpy.zeros(self.bias_v_shape), name='bias_v')
negsample_mask = numpy.zeros((n_channels,n_img_rows,n_img_cols),dtype=floatX)
negsample_mask[:,n_filters_hs_rows:n_img_rows-n_filters_hs_rows+1,n_filters_hs_cols:n_img_cols-n_filters_hs_cols+1] = 1
self.negsample_mask = sharedX(negsample_mask,'negsample_mask')
self.conf = conf
self._params = [self.filters_hs,
self.conv_bias_hs,
self.bias_v
]
return self
def convdot(self, image, filters):
return Toncv(image,filters)
def convdot_T(self, filters, hidacts):
n_images, n_channels, n_img_rows, n_img_cols = self.v_shape
return Tdeconv(filters, hidacts, n_img_rows, n_img_cols)
#####################
# spike-and-slab convolutional hidden units
def mean_convhs_h_given_v(self, v):
"""Return the mean of binary-valued hidden units h, given v
"""
W = self.filters_hs
vW = self.convdot(v, W)
vW = vW/self.sigma
vWb = vW.dimshuffle(0,3,4,1,2) + self.conv_bias_hs
rval = nnet.sigmoid(vWb.dimshuffle(0,3,4,1,2))
return rval
#####################
# visible units
def mean_v_given_h(self, convhs_h):
Wh = self.convdot_T(self.filters_hs, convhs_h)
rval = Wh + self.bias_v
return rval*self.sigma
#####################
def gibbs_step_for_v(self, v, s_rng, return_locals=False):
#positive phase
mean_convhs_h = self.mean_convhs_h_given_v(v)
def sample_h(hmean,shp):
return tensor.cast(s_rng.uniform(size=shp) < hmean, floatX)
sample_convhs_h = sample_h(mean_convhs_h, self.out_conv_hs_shape)
vv_mean = self.mean_v_given_h(sample_convhs_h)
vv_sample = s_rng.normal(size=self.v_shape)*self.sigma + vv_mean
vv_sample = theano.tensor.mul(vv_sample,self.negsample_mask)
if return_locals:
return vv_sample, locals()
else:
return vv_sample
def free_energy_given_v(self, v):
# This is accurate up to a multiplicative constant
# because I dropped some terms involving 2pi
def pre_sigmoid(x):
assert x.owner and x.owner.op == nnet.sigmoid
return x.owner.inputs[0]
pre_convhs_h = pre_sigmoid(self.mean_convhs_h_given_v(v))
rval = tensor.add(
-tensor.sum(nnet.softplus(pre_convhs_h),axis=[1,2,3,4]), #the shape of pre_convhs_h: 64 x 11 x 32 x 8 x 8
(0.5/self.sigma) * tensor.sum((v-self.bias_v)**2, axis=[1,2,3]), #shape: 64 x 1 x 98 x 98
)
assert rval.ndim==1
return rval
def cd_updates(self, pos_v, neg_v, stepsizes, other_cost=None):
grads = contrastive_grad(self.free_energy_given_v,
pos_v, neg_v,
wrt=self.params(),
other_cost=other_cost
)
assert len(stepsizes)==len(grads)
if self.conf['unnatural_grad']:
sgd_updates = unnatural_sgd_updates
else:
sgd_updates = pylearn.gd.sgd.sgd_updates
rval = dict(
sgd_updates(
self.params(),
grads,
stepsizes=stepsizes))
if 0:
#DEBUG STORE GRADS
grad_shared_vars = [sharedX(0*p.value.copy(),'') for p in self.params()]
self.grad_shared_vars = grad_shared_vars
rval.update(dict(zip(grad_shared_vars, grads)))
return rval
def params(self):
# return the list of *shared* learnable parameters
# that are, in your judgement, typically learned in this model
return list(self._params)
def save_weights_to_files(self, identifier):
# save 4 sets of weights:
pass
def save_weights_to_grey_files(self, identifier):
# save 4 sets of weights:
#filters_hs
def arrange_for_show(filters_hs,filters_hs_shape):
n_filters_hs_modules, n_filters_hs_per_modules, fcolors, n_filters_hs_rows, n_filters_hs_cols = filters_hs_shape
filters_fs_for_show = filters_hs.reshape(
(n_filters_hs_modules*n_filters_hs_per_modules,
fcolors,
n_filters_hs_rows,
n_filters_hs_cols))
fn = theano.function([],filters_fs_for_show)
rval = fn()
return rval
filters_fs_for_show = arrange_for_show(self.filters_hs, self.filters_hs_shape)
Image.fromarray(
tile_conv_weights(
filters_fs_for_show,flip=False), 'L').save(
'filters_hs_%s.png'%identifier)
def dump_to_file(self, filename):
try:
cPickle.dump(self, open(filename, 'wb'))
except cPickle.PicklingError:
pickle.dump(self, open(filename, 'wb'))
class Gibbs(object): # if there's a Sampler interface - this should support it
@classmethod
def alloc(cls, rbm, batchsize, rng):
if not hasattr(rng, 'randn'):
rng = numpy.random.RandomState(rng)
self = cls()
seed=int(rng.randint(2**30))
self.rbm = rbm
if batchsize==rbm.v_shape[0]:
self.particles = sharedX(
rng.randn(*rbm.v_shape),
name='particles')
else:
self.particles = sharedX(
rng.randn(batchsize,1,98,98),
name='particles')
self.s_rng = RandomStreams(seed)
return self
def HMC(rbm, batchsize, rng): # if there's a Sampler interface - this should support it
if not hasattr(rng, 'randn'):
rng = numpy.random.RandomState(rng)
seed=int(rng.randint(2**30))
particles = sharedX(
rng.randn(*rbm.v_shape),
name='particles')
return pylearn.sampling.hmc.HMC_sampler(
particles,
rbm.free_energy_given_v,
seed=seed)
class Trainer(object): # updates of this object implement training
@classmethod
def alloc(cls, rbm, visible_batch,
lrdict,
conf,
rng=234,
iteration_value=0,
):
batchsize = rbm.v_shape[0]
sampler = Gibbs.alloc(rbm, batchsize, rng=rng)
print 'alloc trainer'
error = 0.0
return cls(
rbm=rbm,
batchsize=batchsize,
visible_batch=visible_batch,
sampler=sampler,
iteration=sharedX(iteration_value, 'iter'),
learn_rates = [lrdict[p] for p in rbm.params()],
conf=conf,
annealing_coef=sharedX(1.0, 'annealing_coef'),
conv_h_means = sharedX(numpy.zeros(rbm.out_conv_hs_shape[1:])+0.5,'conv_h_means'),
cpnv_h = sharedX(numpy.zeros(rbm.out_conv_hs_shape), 'conv_h'),
#recons_error = sharedX(error,'reconstruction_error'),
)
def __init__(self, **kwargs):
print 'init trainer'
self.__dict__.update(kwargs)
def updates(self):
print 'start trainer.updates'
conf = self.conf
ups = {}
add_updates = lambda b: safe_update(ups,b)
annealing_coef = 1.0 - self.iteration / float(conf['train_iters'])
ups[self.iteration] = self.iteration + 1 #
ups[self.annealing_coef] = annealing_coef
conv_h = self.rbm.mean_convhs_h_given_v(
self.visible_batch)
new_conv_h_means = 0.1 * conv_h.mean(axis=0) + .9*self.conv_h_means
#new_conv_h_means = conv_h.mean(axis=0)
ups[self.conv_h_means] = new_conv_h_means
ups[self.cpnv_h] = conv_h
#ups[self.global_h_means] = new_global_h_means
#sparsity_cost = 0
#self.sparsity_cost = sparsity_cost
# SML updates PCD
add_updates(
self.rbm.cd_updates(
pos_v=self.visible_batch,
neg_v=self.sampler.particles,
stepsizes=[annealing_coef*lr for lr in self.learn_rates]))
if conf['chain_reset_prob']:
# advance the 'negative-phase' chain
nois_batch = self.sampler.s_rng.normal(size=self.rbm.v_shape)
resets = self.sampler.s_rng.uniform(size=(conf['batchsize'],))<conf['chain_reset_prob']
old_particles = tensor.switch(resets.dimshuffle(0,'x','x','x'),
nois_batch, # reset the chain
self.sampler.particles, #continue chain
)
#old_particles = tensor.switch(resets.dimshuffle(0,'x','x','x'),
# self.visible_batch, # reset the chain
# self.sampler.particles, #continue chain
# )
else:
old_particles = self.sampler.particles
tmp_particles = old_particles
for step in xrange(self.conf['steps_sampling']):
tmp_particles = self.rbm.gibbs_step_for_v(tmp_particles, self.sampler.s_rng)
new_particles = tmp_particles
#broadcastable_value = new_particles.broadcastable
#print broadcastable_value
#reconstructions= self.rbm.gibbs_step_for_v(self.visible_batch, self.sampler.s_rng)
#recons_error = tensor.sum((self.visible_batch-reconstructions)**2)
#recons_error = 0.0
#ups[self.recons_error] = recons_error
#return {self.particles: new_particles}
ups[self.sampler.particles] = new_particles
return ups
def save_weights_to_files(self, pattern='iter_%05i'):
#pattern = pattern%self.iteration.get_value()
# save particles
#Image.fromarray(tile_conv_weights(self.sampler.particles.get_value(borrow=True),
# flip=False),
# 'RGB').save('particles_%s.png'%pattern)
#self.rbm.save_weights_to_files(pattern)
pass
def save_weights_to_grey_files(self, pattern='iter_%05i'):
pattern = pattern%self.iteration.get_value()
# save particles
"""
particles_for_show = self.sampler.particles.dimshuffle(3,0,1,2)
fn = theano.function([],particles_for_show)
particles_for_show_value = fn()
Image.fromarray(tile_conv_weights(particles_for_show_value,
flip=False),'L').save('particles_%s.png'%pattern)
self.rbm.save_weights_to_grey_files(pattern)
"""
Image.fromarray(tile_conv_weights(self.sampler.particles.get_value(borrow=True),
flip=False),'L').save('particles_%s.png'%pattern)
self.rbm.save_weights_to_grey_files(pattern)
def print_status(self):
def print_minmax(msg, x):
assert numpy.all(numpy.isfinite(x))
print msg, x.min(), x.max()
print 'iter:', self.iteration.get_value()
print_minmax('filters_hs ', self.rbm.filters_hs.get_value(borrow=True))
print_minmax('particles', self.sampler.particles.get_value())
print_minmax('conv_h_means', self.conv_h_means.get_value())
print_minmax('conv_h', self.cpnv_h.get_value())
print_minmax('visible_bais', self.rbm.bias_v.get_value())
print 'lr annealing coef:', self.annealing_coef.get_value()
#print 'reconstruction error:', self.recons_error.get_value()
def main_inpaint(filename, algo='Gibbs', rng=777888, scale_separately=False):
rbm = cPickle.load(open(filename))
sampler = Gibbs.alloc(rbm, rbm.conf['batchsize'], rng)
batch_idx = tensor.iscalar()
batch_range = batch_idx * rbm.conf['batchsize'] + numpy.arange(rbm.conf['batchsize'])
n_examples = rbm.conf['batchsize'] #64
n_img_rows = 98
n_img_cols = 98
n_img_channels=1
batch_x = Brodatz_op(batch_range,
'../../../Brodatz/D6.gif', # download from http://www.ux.uis.no/~tranden/brodatz.html
patch_shape=(n_img_channels,
n_img_rows,
n_img_cols),
noise_concelling=0.,
seed=3322,
batchdata_size=n_examples
)
fn_getdata = theano.function([batch_idx],batch_x)
batchdata = fn_getdata(0)
scaled_batchdata = (batchdata - batchdata.min())/(batchdata.max() - batchdata.min() + 1e-6)
scaled_batchdata[:,:,11:88,11:88] = 0
batchdata[:,:,11:88,11:88] = 0
print 'the min of border: %f, the max of border: %f'%(batchdata.min(),batchdata.max())
shared_batchdata = sharedX(batchdata,'batchdata')
border_mask = numpy.zeros((n_examples,n_img_channels,n_img_rows,n_img_cols),dtype=floatX)
border_mask[:,:,11:88,11:88]=1
sampler.particles = shared_batchdata
new_particles = rbm.gibbs_step_for_v(sampler.particles, sampler.s_rng)
new_particles = tensor.mul(new_particles,border_mask)
new_particles = tensor.add(new_particles,batchdata)
fn = theano.function([], [],
updates={sampler.particles: new_particles})
particles = sampler.particles
for i in xrange(500):
print i
if i % 20 == 0:
savename = '%s_inpaint_%04i.png'%(filename,i)
print 'saving'
temp = particles.get_value(borrow=True)
print 'the min of center: %f, the max of center: %f' \
%(temp[:,:,11:88,11:88].min(),temp[:,:,11:88,11:88].max())
if scale_separately:
scale_separately_savename = '%s_inpaint_scale_separately_%04i.png'%(filename,i)
blank_img = numpy.zeros((n_examples,n_img_channels,n_img_rows,n_img_cols),dtype=floatX)
tmp = temp[:,:,11:88,11:88]
tmp = (tmp - tmp.min()) / (tmp.max() - tmp.min() + 1e-6)
blank_img[:,:,11:88,11:88] = tmp
blank_img = blank_img + scaled_batchdata
Image.fromarray(
tile_conv_weights(
blank_img,
flip=False),
'L').save(scale_separately_savename)
else:
Image.fromarray(
tile_conv_weights(
particles.get_value(borrow=True),
flip=False),
'L').save(savename)
fn()
def main_sample(filename, algo='Gibbs', rng=777888, burn_in=5000, save_interval=5000, n_files=10):
rbm = cPickle.load(open(filename))
if algo == 'Gibbs':
sampler = Gibbs.alloc(rbm, rbm.conf['batchsize'], rng)
new_particles = rbm.gibbs_step_for_v(sampler.particles, sampler.s_rng)
new_particles = tensor.clip(new_particles,
rbm.conf['particles_min'],
rbm.conf['particles_max'])
fn = theano.function([], [],
updates={sampler.particles: new_particles})
particles = sampler.particles
elif algo == 'HMC':
print "WARNING THIS PROBABLY DOESNT WORK"
# still need to figure out how to get the clipping into
# the iterations of mcmc
sampler = HMC(rbm, rbm.conf['batchsize'], rng)
ups = sampler.updates()
ups[sampler.positions] = tensor.clip(ups[sampler.positions],
rbm.conf['particles_min'],
rbm.conf['particles_max'])
fn = theano.function([], [], updates=ups)
particles = sampler.positions
for i in xrange(burn_in):
print i
if i % 20 == 0:
savename = '%s_sample_burn_%04i.png'%(filename,i)
print 'saving'
Image.fromarray(
tile_conv_weights(
particles.get_value(borrow=True),
flip=False),
'L').save(savename)
fn()
for n in xrange(n_files):
for i in xrange(save_interval):
fn()
savename = '%s_sample_%04i.png'%(filename,n)
print 'saving', savename
Image.fromarray(
tile_conv_weights(
particles.get_value(borrow=True),
flip=False),
'L').save(savename)
def main_print_status(filename, algo='Gibbs', rng=777888, burn_in=500, save_interval=500, n_files=1):
def print_minmax(msg, x):
assert numpy.all(numpy.isfinite(x))
print msg, x.min(), x.max()
rbm = cPickle.load(open(filename))
if algo == 'Gibbs':
sampler = Gibbs.alloc(rbm, rbm.conf['batchsize'], rng)
new_particles = rbm.gibbs_step_for_v(sampler.particles, sampler.s_rng)
#new_particles = tensor.clip(new_particles,
# rbm.conf['particles_min'],
# rbm.conf['particles_max'])
fn = theano.function([], [],
updates={sampler.particles: new_particles})
particles = sampler.particles
elif algo == 'HMC':
print "WARNING THIS PROBABLY DOESNT WORK"
for i in xrange(burn_in):
fn()
print_minmax('particles', particles.get_value(borrow=True))
def main0(rval_doc):
if 'conf' not in rval_doc:
raise NotImplementedError()
conf = rval_doc['conf']
batchsize = conf['batchsize']
batch_idx = tensor.iscalar()
batch_range = batch_idx * conf['batchsize'] + numpy.arange(conf['batchsize'])
if conf['dataset']=='Brodatz':
n_examples = conf['batchsize'] #64
n_img_rows = 98
n_img_cols = 98
n_img_channels=1
batch_x = Brodatz_op(batch_range,
'../../Brodatz/D6.gif', # download from http://www.ux.uis.no/~tranden/brodatz.html
patch_shape=(n_img_channels,
n_img_rows,
n_img_cols),
noise_concelling=0.,
seed=3322,
batchdata_size=n_examples
)
else:
raise ValueError('dataset', conf['dataset'])
rbm = RBM.alloc(
conf,
image_shape=(
n_examples,
n_img_channels,
n_img_rows,
n_img_cols
),
filters_hs_shape=(
conf['filters_hs_size'],
conf['n_filters_hs'],
n_img_channels,
conf['filters_hs_size'],
conf['filters_hs_size']
), #fmodules(stride) x filters_per_modules x fcolors(channels) x frows x fcols
filters_irange=conf['filters_irange'],
sigma=conf['sigma'],
)
rbm.save_weights_to_grey_files('iter_0000')
base_lr = conf['base_lr_per_example']/batchsize
conv_lr_coef = conf['conv_lr_coef']
trainer = Trainer.alloc(
rbm,
visible_batch=batch_x,
lrdict={
# higher learning rate ok with CD1
rbm.filters_hs: sharedX(conv_lr_coef*base_lr, 'filters_hs_lr'),
rbm.conv_bias_hs: sharedX(base_lr, 'conv_bias_hs_lr'),
rbm.bias_v: sharedX(base_lr, 'conv_bias_hs_lr')
},
conf = conf,
)
print 'start building function'
training_updates = trainer.updates() #
train_fn = theano.function(inputs=[batch_idx],
outputs=[],
#mode='FAST_COMPILE',
#mode='DEBUG_MODE',
updates=training_updates
) #
print 'training...'
iter = 0
while trainer.annealing_coef.get_value()>=0: #
dummy = train_fn(iter) #
#trainer.print_status()
if iter % 1000 == 0:
rbm.dump_to_file(os.path.join(_temp_data_path_,'rbm_%06i.pkl'%iter))
if iter <= 1000 and not (iter % 100): #
trainer.print_status()
trainer.save_weights_to_grey_files()
elif not (iter % 1000):
trainer.print_status()
trainer.save_weights_to_grey_files()
iter += 1
def main_train():
print 'start main_train'
main0(dict(
conf=dict(
dataset='Brodatz',
chain_reset_prob=.0,#approx CD-50
unnatural_grad=False,
train_iters=100000,
base_lr_per_example=0.00001,
conv_lr_coef=1.0,
batchsize=64,
n_filters_hs=32,
filters_hs_size=11,
filters_irange=.005,
sigma = 0.25,
n_tiled_conv_offset_diagonally = 1,
steps_sampling = 1,
)))
if __name__ == '__main__':
if sys.argv[1] == 'train':
sys.exit(main_train())
if sys.argv[1] == 'sampling':
sys.exit(main_sample(sys.argv[2]))
if sys.argv[1] == 'inpaint':
sys.exit(main_inpaint(sys.argv[2]))
if sys.argv[1] == 'print_status':
sys.exit(main_print_status(sys.argv[2]))
| {"/test_NCC.py": ["/CrossCorrelation.py"]} |
65,818 | luoheng/TCssrbm | refs/heads/master | /Brodatz_p.py | import theano
import numpy
from PIL import Image
from protocol_ import TensorFnDataset
floatX=theano.config.floatX
def Brodatz_op(s_idx, filename, patch_shape=(1,98,98), noise_concelling=100, seed=3322, batchdata_size=64, rescale=1.0, validation=False):
"""Return symbolic Brodatz_images[s_idx]
If s_idx is a scalar, the return value is a tensor3 of shape 1,98,98.
If s_idx is a vector of len N, the return value
is a tensor4 of shape N,1,98,98.
"""
ob = Brodatz(filename, patch_shape, noise_concelling, seed, batchdata_size, rescale, validation)
fn = ob.extract_random_patches
op = TensorFnDataset(floatX,
bcast=(False, False, False),
fn=fn,
single_shape=(1,98,98))
return op(s_idx%batchdata_size)
class Brodatz(object):
def __init__(self, filename, patch_shape, noise_concelling, seed, batchdata_size, rescale, validation=False):
self.patch_shape = patch_shape
self.filename = filename
self.ncc = noise_concelling
self.rng = numpy.random.RandomState(seed)
self.batchdata_size = batchdata_size
image = Image.open(filename)
image_rows, image_cols = image.size
image = image.resize((int(image_rows/2),int(image_cols/2)), Image.BICUBIC)
new_image_rows, new_image_cols = image.size
img_array = numpy.asarray(image, dtype=floatX)
if validation:
# The model is in validation mode, the training set is going to be the 2/3 of
# the top half of the image and the testing set is going to be the remaining third
train_validation_limit = int(new_image_cols*2/3)
self.training_img = numpy.zeros(img_array[0:int(new_image_rows/2),:train_validation_limit].shape,dtype=floatX)
self.test_img = numpy.zeros(img_array[0:int(new_image_rows/2),train_validation_limit:].shape)
self.training_img = img_array[0:int(new_image_rows/2),:train_validation_limit]
self.test_img = img_array[0:int(new_image_rows/2),train_validation_limit:]
else:
# The model is in test mode, the training set is going to be the whole
# top half of the image and the testing set is going to be the bottom half
self.training_img = numpy.zeros((int(new_image_rows/2),new_image_cols),dtype=floatX)
self.test_img = numpy.zeros((int(new_image_rows/2),new_image_cols))
self.training_img = img_array[0:int(new_image_rows/2),:]
self.test_img = img_array[int(new_image_rows/2):,:]
print "BrodatzOp : using a validation set : " + str(validation)
print "BrodatzOp : the training image size is : " + str(self.training_img.shape)
print "BrodatzOp : the test image size is : " + str(self.test_img.shape)
patch_channels, patch_rows, patch_cols = patch_shape
assert patch_rows < int(new_image_rows/2)
assert patch_cols < int(new_image_cols)
assert patch_channels == 1
self.training_img = self.training_img - self.training_img.mean()
self.training_img = self.training_img/(rescale*self.training_img.std()+self.ncc)
#self.test_img = self.test_img - self.test_img.mean()
#self.test_img = self.test_img/(rescale*self.test_img.std()+self.ncc)
print 'the std of the training data is:%f' %self.training_img.std()
#print 'the std of the test data is:%f' %self.test_img.std()
#@staticmethod
def extract_random_patches(self):
N = self.batchdata_size
_, patch_rows, patch_cols = self.patch_shape
img_rows, img_cols = self.training_img.shape
rval = numpy.zeros((N,1,patch_rows,patch_cols), dtype=self.training_img.dtype)
offsets_row = self.rng.randint(img_rows-patch_rows+1, size=N)
offsets_col = self.rng.randint(img_cols-patch_cols+1, size=N)
for n, (r,c) in enumerate(zip(offsets_row, offsets_col)):
rval[n,0,:,:] = self.training_img[r:r+patch_rows,c:c+patch_cols]
return rval
| {"/test_NCC.py": ["/CrossCorrelation.py"]} |
65,819 | luoheng/TCssrbm | refs/heads/master | /unshared_conv_diagonally.py | """
XXX
"""
import pdb
import numpy
import theano
import StringIO
from theano.tensor import blas
from theano import gof, tensor, scalar
def any_symbolic(*args):
"""
Return True iff any a in `args` is a theano Variable
"""
for a in args:
if isinstance(a, theano.Variable):
return True
return False
def not_symbolic(*args):
return not any_symbolic(*args)
class Base(theano.Op):
def __init__(self,
module_stride=1,
openmp=None
):
self.module_stride = module_stride
if openmp is None:
openmp = theano.config.openmp
self.openmp = openmp
def _attributes(self):
return (
self.module_stride,
self.openmp
)
def __eq__(self, other):
return (type(self) == type(other)
and self._attributes() == other._attributes())
def __hash__(self):
return hash((type(self), self._attributes()))
def __str__(self):
return '%s{module_stride=%i,openmp=%d}' % (
self.__class__.__name__,
self.module_stride,
self.openmp
)
def c_compile_args(self):
if self.openmp:
return ['-fopenmp']
return []
class FilterActs(Base):
"""
Images of shape: colors x
Filters are of shape:
channels
"""
def __init__(self,
module_stride=1,
openmp=None,
fcols=None,
frows=None
):
self.module_stride = module_stride
if openmp is None:
openmp = theano.config.openmp
self.openmp = openmp
self.fcols = fcols
self.frows = frows
def _attributes(self):
return (
self.module_stride,
self.openmp,
self.fcols,
self.frows
)
def c_support_code(self):
return blas.blas_header_text()
def c_libraries(self):
return blas.ldflags()
def c_headers(self):
return ["omp.h"]
def c_compile_args(self):
ret = blas.ldflags(libs=False, flags=True)
if self.openmp:
ret += ['-fopenmp']
return ret
def c_lib_dirs(self):
return blas.ldflags(libs=False, libs_dir=True)
def c_header_dirs(self):
return blas.ldflags(libs=False, include_dir=True)
@classmethod
def infer_shape_without_instance(cls, ishape, fshape):
icount, icolors, irows, icols = ishape
fmodules, filters_per_module, fcolors, frows, fcols = fshape
if not any_symbolic(irows, icols) and irows != icols:
raise ValueError("non-square image argument",
(irows, icols))
if not any_symbolic(frows, fcols) and frows != fcols:
raise ValueError("non-square filter shape",
(frows, fcols))
if (not any_symbolic(icolors, fcolors)
and icolors != fcolors):
raise ValueError("color counts don't match",
(icolors, fcolors))
if (irows < frows or icols < fcols):
raise ValueError("filters' size is too small",
(irows, icols))
hrows = irows / frows
hcols = icols / fcols
hshape = (icount, fmodules, filters_per_module, hrows, hcols)
return hshape
def make_node(self, images, filters):
images = theano.tensor.as_tensor_variable(images)
filters = theano.tensor.as_tensor_variable(filters)
ibcast = images.broadcastable
fbcast = filters.broadcastable
icount, icolors, irows, icols = ibcast
fmodules, filters_per_module, fcolors, frows, fcols = fbcast #fmodules will alone the diagonal of the images
#print fmodules, fcolors, frows, fcols, filters_per_module
hbcast = (icount, fmodules, filters_per_module, frows, fcols) #should be (False, False, False, False, False)
htype = theano.tensor.TensorType(
dtype=images.dtype,
broadcastable=hbcast)
if images.dtype != filters.dtype:
raise TypeError('dtype mismatch', (images, filters))
return theano.gof.Apply(self,
[images, filters],
[htype()])
def perform(self, node, iargs, ostor):
#print 'into FilterActs.perform'
images, filters = iargs
# icount : number of images in minibatch
# icolors : number of color channel in the image ( 1=grayscale, 3=RGB, ...)
# irows and icols : size of each image
icount, icolors, irows, icols = images.shape
fmodules, filters_per_module, fcolors, frows, fcols = filters.shape
hshape = self.infer_shape(node, (images.shape, filters.shape))[0]
_, _, _, hrows, hcols = hshape
hidacts = numpy.zeros(hshape, dtype=images.dtype)
for m in xrange(fmodules):
for hR in xrange(hrows):
img_r_offset = m * self.module_stride + hR * frows
for hC in xrange(hcols):
img_c_offset = m * self.module_stride + hC * fcols
rc_images = images[:, :,
img_r_offset:img_r_offset + frows,
img_c_offset:img_c_offset + fcols]
rc_filters = filters[m]
# rc_images are count x fcolors x frows x fcols
# rc_filters are fpm x fcolors x frows x fcols
rc_hidacts = numpy.dot(
rc_images.reshape(icount, -1),
rc_filters.reshape(filters_per_module, -1).T
)
hidacts[:, m, :, hR, hC] = rc_hidacts
if False:
# I didn't run all the tests as this is too long, but it seam good.
hidacts2 = numpy.zeros(hshape, dtype=images.dtype)
for m in xrange(fmodules):
rc_filters = filters[m]
for hR in xrange(hrows):
img_r_offset = m * self.module_stride + hR * frows
for hC in xrange(hcols):
img_c_offset = m * self.module_stride + hC * fcols
rc_images = images[:, :,
img_r_offset:img_r_offset + frows,
img_c_offset:img_c_offset + fcols]
# rc_images are count x fcolors x frows x fcols
# rc_filters are fpm x fcolors x frows x fcols
A = rc_images.reshape(icount, -1)
B = rc_filters.reshape(filters_per_module, -1).T
for i in range(A.shape[0]):
for j in range(B.shape[1]):
s = 0
for k in range(A.shape[1]):
s += A.item(i, k) * B.item(k, j)
hidacts2[i, m, j, hR, hC] = s
assert numpy.allclose(hidacts, hidacts2)
ostor[0][0] = hidacts
#print 'exiting FilterActs.perform'
if 0:
print 'FilterActs shapes: images', images.shape
print 'FilterActs shapes: filters', filters.shape
print 'FilterActs shapes: hidacts', hidacts.shape
def c_code(self, node, node_name, input_names, output_names, sub):
# Extract input values
images, filters = input_names
#filters, hidacts, irows, icols = input_names
# Extract output values
output = output_names[0]
# Assign self.module_stride to a local variable else the
# %(module_stride)s fails
module_stride = self.module_stride
#Generate C code
fail = sub['fail']
sio = StringIO.StringIO()
fcols = self.fcols
openmp = int(self.openmp)
if fcols is None:
fcols = "%(filters)s->dimensions[4]" % locals()
frows = self.frows
if frows is None:
frows = "%(filters)s->dimensions[3]" % locals()
if node.outputs[0].dtype == 'float32':
gemm = "sgemm_"
elif node.outputs[0].dtype == 'float64':
gemm = "dgemm_"
else:
raise Exception()
print >> sio, """
// Validate the number of dimensions and the
// data type of the input tensors
if (%(images)s->nd != 4){
PyErr_SetString(PyExc_ValueError,
"FilterActs: images not a 4d tensor");
%(fail)s;
}
if (%(filters)s->nd != 5){
PyErr_SetString(PyExc_ValueError,
"FilterActs: filters not a 5d tensor");
%(fail)s;
}
if ((%(images)s->descr->type_num != PyArray_DOUBLE) &&
(%(images)s->descr->type_num != PyArray_FLOAT)){
PyErr_SetString(PyExc_TypeError,
"FilterActs: images type should be float32 or float64");
%(fail)s;
}
if ((%(filters)s->descr->type_num != PyArray_DOUBLE) &&
(%(filters)s->descr->type_num != PyArray_FLOAT)){
PyErr_SetString(PyExc_TypeError,
"FilterActs: filters type should be float32 or float64");
%(fail)s;
}
if ( %(fcols)s != %(filters)s->dimensions[4]){
PyErr_Format(PyExc_ValueError,
"FilterActs: fcols was set to %%d, but the input shape is %%d",
%(fcols)s, %(filters)s->dimensions[4]);
%(fail)s;
}
if ( %(frows)s != %(filters)s->dimensions[3]){
PyErr_Format(PyExc_ValueError,
"FilterActs: frows was set to %%d, but the input shape is %%d",
%(frows)s, %(filters)s->dimensions[3]);
%(fail)s;
}
{ // New scope level to avoid cross-initialization
// Extract input variables
const int icount = %(images)s->dimensions[0];
const int icolors = %(images)s->dimensions[1];
const int irows = %(images)s->dimensions[2];
const int icols = %(images)s->dimensions[3];
const int fmodules = %(filters)s->dimensions[0];
const int filters_per_module = %(filters)s->dimensions[1];
const int fcolors = %(filters)s->dimensions[2];
const int frows = %(frows)s;
const int fcols = %(fcols)s;
const int module_stride = %(module_stride)s;
PyArrayObject* c_images = PyArray_GETCONTIGUOUS(%(images)s);
PyArrayObject* c_filters = PyArray_GETCONTIGUOUS(%(filters)s);
// Validate the shape of the input tensors
if ( irows != icols ){
PyErr_SetString(PyExc_ValueError,
"FilterActs: non-square images argument");
Py_XDECREF(c_images);
Py_XDECREF(c_filters);
%(fail)s;
}
if ( frows != fcols ){
PyErr_SetString(PyExc_ValueError,
"FilterActs: non-square filter shape");
Py_XDECREF(c_images);
Py_XDECREF(c_filters);
%(fail)s;
}
if ( icolors != fcolors ){
PyErr_SetString(PyExc_ValueError,
"FilterActs: inconsistent number of colors arguments");
Py_XDECREF(c_images);
Py_XDECREF(c_filters);
%(fail)s;
}
if ( ! c_images ){
PyErr_SetString(PyExc_ValueError, "Not able to get c contiguous images");
Py_XDECREF(c_images);
Py_XDECREF(c_filters);
%(fail)s;
}
// Ensure output array is of the proper format
npy_intp outputDims[5];
outputDims[0] = icount;
outputDims[1] = fmodules;
outputDims[2] = filters_per_module;
outputDims[3] = irows / frows;
outputDims[4] = icols / fcols;
if (NULL == %(output)s ||
(%(output)s->dimensions[0] != outputDims[0]) ||
(%(output)s->dimensions[1] != outputDims[1]) ||
(%(output)s->dimensions[2] != outputDims[2]) ||
(%(output)s->dimensions[3] != outputDims[3]) ||
(%(output)s->dimensions[4] != outputDims[4]) ||
(!PyArray_ISBEHAVED(%(output)s)) ||
((%(output)s->descr->type_num != PyArray_DOUBLE) &&
(%(output)s->descr->type_num != PyArray_FLOAT)) )
{
// The output array has not been declared or
// is of an invalid format.
if (NULL != %(output)s) Py_XDECREF(%(output)s);
%(output)s = (PyArrayObject*)PyArray_EMPTY(5, outputDims,
%(filters)s->descr->type_num, 0);
if(!%(output)s) {
PyErr_SetString(PyExc_MemoryError,
"FilterActs: failed to alloc memory for output");
Py_XDECREF(c_images);
Py_XDECREF(c_filters);
%(fail)s;
}
}else{
//No need to initialize the ouput to 0.
}
// Compute the output
// We reshape the images: rc_images.reshape(icount, -1)
// In number of elements
const int img_strd_0 = icolors * irows * icols;
const int img_strd_1 = c_images->strides[1] / PyArray_ITEMSIZE(c_images);
const int img_strd_2 = c_images->strides[2] / PyArray_ITEMSIZE(c_images);
const int img_strd_3 = 1;//c_images->strides[3] / PyArray_ITEMSIZE(c_images);
// We reshape and transpose the filter:
// src_filters.reshape(filters_per_module, -1).T
// In number of elements
const int fil_strd_0 = 1;//c_filters->strides[4] / PyArray_ITEMSIZE(c_filters);
const int fil_strd_1 = c_filters->strides[1] / PyArray_ITEMSIZE(c_filters);
const int out_strd_i = %(output)s->strides[0] / PyArray_ITEMSIZE(%(output)s);
const int out_strd_j = %(output)s->strides[2] / PyArray_ITEMSIZE(%(output)s);
// Check if BLAS' gemm can be used to speed up the computations
// TODO: for filters, we only need the 3 last dimensions to be contiguous
// so we can remove the getcontiguous for many strided cases
// TODO: for images, we probably already support all input contiguous cases as we copy it.
bool useBlas = PyArray_ISCONTIGUOUS(c_images) &&
PyArray_ISCONTIGUOUS(c_filters) &&
icolors == 1 &&
(PyArray_TYPE(%(images)s) ==
PyArray_TYPE(%(filters)s)) &&
(PyArray_TYPE(%(images)s) ==
PyArray_TYPE(%(output)s));
if(useBlas){
int nb_threads = 1;
if(%(openmp)s)
nb_threads = omp_get_max_threads();
//Allocate temporary storare for output of gemm
npy_intp gemm_out_dim[2];
gemm_out_dim[0] = icount;
gemm_out_dim[1] = filters_per_module;
PyArrayObject* gemm_outs[nb_threads];
for(int i = 0; i< nb_threads; i++){
gemm_outs[i] = (PyArrayObject*)PyArray_EMPTY(2,
gemm_out_dim,
%(output)s->descr->type_num,
0);
if(!gemm_outs[i]) {
PyErr_SetString(PyExc_MemoryError,
"FilterActs: failed to alloc memory for gemm_out");
for(int j = 0; j < i; j++)
Py_DECREF(gemm_outs[j]);
Py_XDECREF(c_images);
Py_XDECREF(c_filters);
%(fail)s;
}
}
//Allocate temporary storare for the images passed to gemm
//This is needed as the memory order is not right.
npy_intp gemm_img_dim[2];
gemm_img_dim[0] = icount;
gemm_img_dim[1] = fcolors * frows * fcols;
PyArrayObject* gemm_imgs[nb_threads];
for(int i = 0; i< nb_threads; i++){
gemm_imgs[i] = (PyArrayObject*)PyArray_EMPTY(2,
gemm_img_dim,
%(images)s->descr->type_num,
1);
if(!gemm_imgs[i]) {
PyErr_SetString(PyExc_MemoryError,
"FilterActs: failed to alloc memory for gemm_img");
for(int j = 0; j < nb_threads; j++)
Py_DECREF(gemm_outs[j]);
for(int j = 0; j < i; j++)
Py_DECREF(gemm_imgs[j]);
Py_XDECREF(c_images);
Py_XDECREF(c_filters);
%(fail)s;
}
}
#pragma omp parallel default(none) shared(c_filters, c_images, outputDims,\
%(output)s, %(images)s,\
gemm_outs, gemm_imgs) if(%(openmp)s)
{
PyArrayObject* gemm_out = gemm_outs[omp_get_thread_num()];
PyArrayObject* gemm_img = gemm_imgs[omp_get_thread_num()];
char noTrans = 'N';
char Trans = 'T';
const dtype_%(output)s alpha = 1.0f;
const dtype_%(output)s beta = 0.0f;
const int LDA = fcolors * frows * fcols;
const int LDB = fcolors * frows * fcols;
const int LDC = filters_per_module;
const int K = fcolors * frows * fcols;
#pragma omp for schedule(static)
for(int m=0; m<fmodules; m++){
dtype_%(filters)s* rc_filters = (dtype_%(filters)s*)(
c_filters->data +
m * c_filters->strides[0]);
for(int hR=0; hR<outputDims[3]; hR++){ // loop hrows time
int img_r_offset = m * module_stride + hR * frows;
for(int hC=0; hC<outputDims[4]; hC++){ //loop hcols time
int img_c_offset = m * module_stride + hC * fcols;
dtype_%(images)s* rc_images = (dtype_%(images)s*)(
c_images->data +
img_r_offset * c_images->strides[2] +
img_c_offset * c_images->strides[3]);
//copy the images into gemm_img
dtype_%(images)s* gemm_img_ptr = (dtype_%(images)s*) gemm_img->data;
// raise(SIGINT);
for(int ic=0; ic<icount; ic++){
rc_images += ic*img_strd_0;
for(int i=0; i<fcolors; i++){
rc_images += i*img_strd_1;
for(int j=0; j<frows; j++){
rc_images += j*img_strd_2;
memcpy(gemm_img_ptr, rc_images, fcols * PyArray_ITEMSIZE(%(images)s));
gemm_img_ptr += fcols;
rc_images -= j*img_strd_2;
}
rc_images -= i*img_strd_1;
}
rc_images -= ic*img_strd_0;
}
//call gemm, it expect input as f order, so we need to swap inputs.
%(gemm)s(&Trans, &noTrans,
&filters_per_module, &icount, &K,
&alpha, rc_filters, &LDB, (dtype_%(images)s*)gemm_img->data, &LDA,
&beta, (dtype_%(output)s*) PyArray_DATA(gemm_out), &LDC);
//copy the output into out_ptr
dtype_%(output)s* out_ptr = (dtype_%(output)s*)(
%(output)s->data +
//i * %(output)s->strides[0] +
m * %(output)s->strides[1] +
//j * %(output)s->strides[2] +
hR * %(output)s->strides[3] +
hC * %(output)s->strides[4]);
dtype_%(output)s* gemm_out_ptr = (dtype_%(output)s*)PyArray_DATA(gemm_out);
int gemm_out_s0 = gemm_out->strides[0] / PyArray_ITEMSIZE(gemm_out);
int gemm_out_s1 = gemm_out->strides[1] / PyArray_ITEMSIZE(gemm_out);
for(int i=0; i<icount;
i++, out_ptr += out_strd_i,
gemm_out_ptr += gemm_out_s0){
for(int j=0; j<filters_per_module; j++){
out_ptr[j * out_strd_j] = *(gemm_out_ptr + j * gemm_out_s1);
}
}
}
}
}
}//parallel
for(int i = 0; i< nb_threads; i++){
Py_DECREF(gemm_outs[i]);
Py_DECREF(gemm_imgs[i]);
}
}else{
#pragma omp parallel for schedule(static) default(none) shared(c_filters, c_images, outputDims, %(output)s)
for(int m=0; m<fmodules; m++){
dtype_%(filters)s* rc_filters = (dtype_%(filters)s*)(
c_filters->data +
m * c_filters->strides[0]);
for(int hR=0; hR<outputDims[3]; hR++){ // loop hrows time
int img_r_offset = m * module_stride + hR * frows;
for(int hC=0; hC<outputDims[4]; hC++){ // loop hcols time
int img_c_offset = m * module_stride + hC * fcols;
dtype_%(images)s* rc_images = (dtype_%(images)s*)(
c_images->data +
img_r_offset * c_images->strides[2] +
img_c_offset * c_images->strides[3]);
dtype_%(output)s* __restrict__ out_ptr = (dtype_%(output)s*)(
%(output)s->data +
//i * %(output)s->strides[0] +
m * %(output)s->strides[1] +
//j * %(output)s->strides[2] +
hR * %(output)s->strides[3] +
hC * %(output)s->strides[4]);
//TODO raise(SIGINT);
for(int i=0; i<icount; i++, out_ptr += out_strd_i){
dtype_%(images)s* v1 = &rc_images[i * img_strd_0];
for(int j=0; j<filters_per_module; j++){
dtype_%(output)s sum = 0.0f;
dtype_%(filters)s* v2 = &rc_filters[j * fil_strd_1];
for(int k_colors=0, k_fil=0; k_colors<fcolors; k_colors++){
for(int k_i=0; k_i<frows; k_i++){
for(int k_j=0; k_j<fcols;
k_j++, k_fil++){
sum += v1[k_colors * img_strd_1 +
k_i * img_strd_2 +
k_j * img_strd_3] *
v2[k_fil * fil_strd_0];
}
}
}
out_ptr[j * out_strd_j] = sum;
}
}
}
}
}
}//if useBlas
Py_XDECREF(c_images);
Py_XDECREF(c_filters);
}
"""
return sio.getvalue() % locals()
def grad(self, inputs, goutputs):
images, filters = inputs
_, _, _, frows, fcols = filters.shape
_, _, irows, icols = images.shape
gimages = ImgActs(module_stride=self.module_stride)(
filters, goutputs[0], irows, icols)
gfilters = WeightActs(module_stride=self.module_stride)(
images, goutputs[0], frows, fcols)
return [gimages, gfilters]
def infer_shape(self, node, shapes):
ishape, fshape = shapes
icount, icolors, irows, icols = ishape
fmodules, filters_per_module, fcolors, frows, fcols = fshape
if not any_symbolic(irows, icols) and irows != icols:
raise ValueError("non-square image argument",
(irows, icols))
if not any_symbolic(frows, fcols) and frows != fcols:
raise ValueError("non-square filter shape",
(frows, fcols))
if (not any_symbolic(icolors, fcolors)
and icolors != fcolors):
raise ValueError("color counts don't match",
(icolors, fcolors))
"""
if (irows < frows or icols < fcols):
raise ValueError("filters' size is too small",
(irows, icols))
"""
hrows = irows / frows
hcols = icols / fcols
hshape = (icount, fmodules, filters_per_module, hrows, hcols)
return [hshape]
class WeightActs(Base):
"""
Images of shape: colors x
Filters are of shape:
channels
"""
def make_node(self, images, hidacts, frows, fcols):
images, hidacts, frows, fcols = map(theano.tensor.as_tensor_variable,
[images, hidacts, frows, fcols])
if frows.dtype[:3] not in ('int', 'uin'): #dtype is a string. should be 'int8' 'int16' 'uint8' ...
raise TypeError(frows)
if fcols.dtype[:3] not in ('int', 'uin'):
raise TypeError(frows)
if frows.ndim:
raise TypeError('frows should be scalar', frows)
if fcols.ndim:
raise TypeError('fcols should be scalar', fcols)
if images.dtype != hidacts.dtype: #should be floatX
raise TypeError('images and hidacts dtype mismatch',
(images.dtype, hidacts.dtype))
icount, icolors, irows, icols = images.type.broadcastable #should be (False, False, False, False)
#print icolors, irows, icols, icount
hcount, fmodules, filters_per_module, hrows, hcols = hidacts.type.broadcastable
otype = theano.tensor.TensorType(
dtype=images.dtype,
broadcastable=(fmodules, filters_per_module, icolors,
False, False)) #frows and fcols should not be broadcastable
return theano.Apply(self,
[images, hidacts, frows, fcols],
[otype()])
def perform(self, node, iargs, ostor):
#print 'into WeightActs.perform'
images, hidacts, frows, fcols = iargs
if frows != fcols:
# this could be implemented, but GPU case doesn't do it
raise NotImplementedError("non-square filter shape",
(frows, fcols))
icount, fmodules, filters_per_module, hrows, hcols = hidacts.shape
fshape = list(self.infer_shape(node,
(images.shape, hidacts.shape, (), ()))[0]) #why put (frows,) and (fcols,) here
fcolors = fshape[2]
fshape[3] = frows
fshape[4] = fcols
filters = numpy.zeros(fshape, dtype=images.dtype)
for m in xrange(fmodules):
for hR in xrange(hrows):
for hC in xrange(hcols):
img_r_offset = m*self.module_stride + hR*frows
img_c_offset = m*self.module_stride + hC*fcols
rc_images = images[:,:,
img_r_offset:img_r_offset + frows,
img_c_offset:img_c_offset + fcols]
# rc_images is icount x icolors x irows x icols
rc_hidacts = hidacts[:, m, :, hR, hC]
# rc_hidacts is count x fpm
rc_filters = numpy.dot(
rc_hidacts.T,
rc_images.reshape(icount, -1))
filters[m, :, :, :, :] += rc_filters.reshape(
(filters_per_module, fcolors, frows, fcols))
ostor[0][0] = filters
def c_support_code(self):
return blas.blas_header_text()
def c_libraries(self):
return blas.ldflags()
def c_compile_args(self):
return blas.ldflags(libs=False, flags=True)
def c_lib_dirs(self):
return blas.ldflags(libs=False, libs_dir=True)
def c_header_dirs(self):
return blas.ldflags(libs=False, include_dir=True)
def c_code(self, node, node_name, input_names, output_names, sub):
# Extract input values
images, hidacts, frows, fcols = input_names
#filters, hidacts, irows, icols = input_names
# Determine which BLAS function to use
conv_type = scalar.upcast(node.inputs[0].type.dtype,
node.inputs[1].type.dtype)
if conv_type == 'float32':
conv_type = "float"
gemv = "sgemv_"
gemm = "sgemm_"
elif conv_type == 'float64':
conv_type = "double"
gemv = "dgemv_"
gemm = "dgemm_"
else:
raise Exception()
# Extract output values
output = output_names[0]
# Assign self.module_stride to a local variable else
# the %(module_stride)s fails
module_stride = self.module_stride
#Generate C code
fail = sub['fail']
sio = StringIO.StringIO()
print >> sio, """
// Validate the shape and the data type of the input tensors
if (%(hidacts)s->nd != 5){
PyErr_SetString(PyExc_ValueError, "hidacts not a 5d tensor");
%(fail)s;
}
if (%(images)s->nd != 4){
PyErr_SetString(PyExc_ValueError, "images not a 4d tensor");
%(fail)s;
}
if ((%(hidacts)s->descr->type_num != PyArray_DOUBLE) &&
(%(hidacts)s->descr->type_num != PyArray_FLOAT)){
PyErr_SetString(PyExc_TypeError,
"hidacts type should be float32 or float64");
%(fail)s;
}
if ((%(images)s->descr->type_num != PyArray_DOUBLE) &&
(%(images)s->descr->type_num != PyArray_FLOAT)){
PyErr_SetString(PyExc_TypeError,
"images type should be float32 or float64");
%(fail)s;
}
if (%(images)s->descr->type_num != %(hidacts)s->descr->type_num){
PyErr_SetString(PyExc_TypeError,
"images and hidacts should have the same type");
%(fail)s;
}
{ // New scope level to avoid cross-initialization
// Extract input variables
int hcount = %(hidacts)s->dimensions[0];
int fmodules = %(hidacts)s->dimensions[1];
int filters_per_module = %(hidacts)s->dimensions[2];
int hrows = %(hidacts)s->dimensions[3];
int hcols = %(hidacts)s->dimensions[4];
int icount = %(images)s->dimensions[0];
int icolors = %(images)s->dimensions[1];
int irows = %(images)s->dimensions[2];
int icols = %(images)s->dimensions[3];
int frows = ((dtype_%(frows)s *) (%(frows)s->data))[0];
int fcols = ((dtype_%(fcols)s *) (%(fcols)s->data))[0];
int module_stride = %(module_stride)s;
// Validate the shape of the input tensors
if ( hrows != hcols ){
PyErr_SetString(PyExc_ValueError,
"non-square hidacts argument");
%(fail)s;
}
if ( frows != fcols ){
PyErr_SetString(PyExc_ValueError,
"non-square filter shape");
%(fail)s;
}
if ( irows != icols ){
PyErr_SetString(PyExc_ValueError,
"non-square image argument");
%(fail)s;
}
if ( hcount != icount ){
PyErr_SetString(PyExc_ValueError,
"inconsistent batch size");
%(fail)s;
}
if (hrows * frows + fmodules - 1 != irows){
PyErr_SetString(
PyExc_ValueError,
"hrows * frows + fmodules - 1 should be equal to irows");
%(fail)s;
}
if (hcols * fcols + fmodules - 1 != icols){
PyErr_SetString(
PyExc_ValueError,
"hcols * fcols + fmodules - 1 should be equal to icols");
%(fail)s;
}
// Ensure output array is of the proper format
if (NULL == %(output)s ||
(%(output)s->dimensions[0] != fmodules) ||
(%(output)s->dimensions[1] != filters_per_module) ||
(%(output)s->dimensions[2] != icolors) ||
(%(output)s->dimensions[3] != frows) ||
(%(output)s->dimensions[4] != fcols) ||
(!PyArray_ISCARRAY(%(output)s)) ||
((%(output)s->descr->type_num != PyArray_DOUBLE) &&
(%(output)s->descr->type_num != PyArray_FLOAT)))
{
// The output array is of an invalid format.
if (NULL != %(output)s) Py_XDECREF(%(output)s);
npy_intp outputDims[5];
outputDims[0] = fmodules;
outputDims[1] = filters_per_module;
outputDims[2] = icolors;
outputDims[3] = frows;
outputDims[4] = fcols;
%(output)s = (PyArrayObject*)PyArray_ZEROS(5, outputDims,
%(images)s->descr->type_num, 0);
if(!%(output)s) {
PyErr_SetString(PyExc_MemoryError,
"failed to alloc memory for output");
%(fail)s;
}
}else{
// The output array is of the proper format.
// Its content must be initialized to zeros.
dtype_%(output)s* data_ptr =
(dtype_%(output)s*)PyArray_DATA(%(output)s);
memset(data_ptr, 0, PyArray_ITEMSIZE(%(output)s) *
PyArray_SIZE(%(output)s));
}
// Extract the arrays' strides
npy_intp hidacts_count_stride = PyArray_STRIDE(%(hidacts)s, 0) /
PyArray_ITEMSIZE(%(hidacts)s);
npy_intp hidacts_module_stride = PyArray_STRIDE(%(hidacts)s, 1) /
PyArray_ITEMSIZE(%(hidacts)s);
npy_intp hidacts_filter_stride = PyArray_STRIDE(%(hidacts)s, 2) /
PyArray_ITEMSIZE(%(hidacts)s);
npy_intp hidacts_hrows_stride = PyArray_STRIDE(%(hidacts)s, 3) /
PyArray_ITEMSIZE(%(hidacts)s);
npy_intp hidacts_hcols_stride = PyArray_STRIDE(%(hidacts)s, 4) /
PyArray_ITEMSIZE(%(hidacts)s);
npy_intp images_count_stride = PyArray_STRIDE(%(images)s, 0) /
PyArray_ITEMSIZE(%(images)s);
npy_intp images_color_stride = PyArray_STRIDE(%(images)s, 1) /
PyArray_ITEMSIZE(%(images)s);
npy_intp images_irows_stride = PyArray_STRIDE(%(images)s, 2) /
PyArray_ITEMSIZE(%(images)s);
npy_intp images_icols_stride = PyArray_STRIDE(%(images)s, 3) /
PyArray_ITEMSIZE(%(images)s);
npy_intp output_module_stride = PyArray_STRIDE(%(output)s, 0) /
PyArray_ITEMSIZE(%(output)s);
npy_intp output_filter_stride = PyArray_STRIDE(%(output)s, 1) /
PyArray_ITEMSIZE(%(output)s);
npy_intp output_color_stride = PyArray_STRIDE(%(output)s, 2) /
PyArray_ITEMSIZE(%(output)s);
npy_intp output_frows_stride = PyArray_STRIDE(%(output)s, 3) /
PyArray_ITEMSIZE(%(output)s);
npy_intp output_fcols_stride = PyArray_STRIDE(%(output)s, 4) /
PyArray_ITEMSIZE(%(output)s);
// Allocate memory for the array in which the content of
// %(images)s will be copied so that it will be C Contiguous for
// BLAS' gemm function
npy_intp dotPDims[2];
dotPDims[0] = icount;
dotPDims[1] = icolors * frows * fcols;
PyArrayObject* img_C =
(PyArrayObject*)PyArray_EMPTY(2, dotPDims,
%(output)s->descr->type_num,
0);
if(!img_C) {
PyErr_SetString(PyExc_MemoryError,
"failed to alloc memory for img_C");
%(fail)s;
}
dtype_%(output)s* img_C_ptr = (dtype_%(output)s*)(img_C->data);
// Allocate memory for the array in which the content of hidacts
// will be copied so that it will be C Contiguous for BLAS'
// gemm function
PyArrayObject* hid_C_view =
(PyArrayObject*)PyArray_SwapAxes(%(hidacts)s, 0, 4);
hid_C_view = (PyArrayObject*)PyArray_SwapAxes(hid_C_view, 2, 3);
PyArrayObject* hid_C =
(PyArrayObject*)PyArray_EMPTY(5, hid_C_view->dimensions,
hid_C_view->descr->type_num,
0);
if(!hid_C) {
PyErr_SetString(PyExc_MemoryError,
"failed to alloc memory for hid_C");
Py_XDECREF(img_C);
%(fail)s;
}
if(PyArray_CopyInto(hid_C, hid_C_view) != 0){
PyErr_SetString(PyExc_MemoryError,
"failed to copy data to hid_C");
Py_XDECREF(img_C);
Py_XDECREF(hid_C);
%(fail)s;
}
dtype_%(output)s* hid_C_ptr = (dtype_%(output)s*)(hid_C->data);
npy_intp hidC_count_stride = PyArray_STRIDE(hid_C, 4) /
PyArray_ITEMSIZE(hid_C);
npy_intp hidC_module_stride = PyArray_STRIDE(hid_C, 1) /
PyArray_ITEMSIZE(hid_C);
npy_intp hidC_filter_stride = PyArray_STRIDE(hid_C, 3) /
PyArray_ITEMSIZE(hid_C);
npy_intp hidC_hrows_stride = PyArray_STRIDE(hid_C, 2) /
PyArray_ITEMSIZE(hid_C);
npy_intp hidC_hcols_stride = PyArray_STRIDE(hid_C, 0) /
PyArray_ITEMSIZE(hid_C);
// Allocate variable used to call the BLAS function
char noTrans = 'N';
%(conv_type)s alpha = 1.0f;
%(conv_type)s beta = 1.0f;
int gemm_m = icolors * frows * fcols;
int gemm_n = filters_per_module;
int gemm_k = icount;
// Compute the output
dtype_%(images)s* images_ptr =
(dtype_%(images)s*)PyArray_DATA(%(images)s);
dtype_%(output)s* output_ptr =
(dtype_%(output)s*)PyArray_DATA(%(output)s);
for(int m=0; m < fmodules; m++){
hid_C_ptr += m * hidC_module_stride;
output_ptr += m * output_module_stride;
for(int hR=0; hR < hrows; hR++){
hid_C_ptr += hR * hidC_hrows_stride;
int img_r_offset = m * module_stride + hR * frows;
for(int hC=0; hC < hcols; hC++){
hid_C_ptr += hC * hidC_hcols_stride;
int img_c_offset = m * module_stride + hC * frows;
// Copy the relevant data from images into
// the img_C array
for(int icountIndex=0; icountIndex < icount;
icountIndex++){
images_ptr += icountIndex * images_count_stride;
img_C_ptr += icountIndex * icolors * frows *
fcols;
for(int icolorsIndex=0; icolorsIndex < icolors;
icolorsIndex++){
images_ptr += icolorsIndex *
images_color_stride;
img_C_ptr += icolorsIndex * frows * fcols;
for(int frowsIndex=0; frowsIndex < frows;
frowsIndex++){
images_ptr += (img_r_offset +
frowsIndex) *
images_irows_stride;
img_C_ptr += frowsIndex * fcols;
// Copy fcols elements from images_ptr
// to img_C_ptr
if( PyArray_ISCARRAY(%(images)s) ){
images_ptr += img_c_offset;
memcpy(img_C_ptr, images_ptr,
fcols *
PyArray_ITEMSIZE(%(images)s));
images_ptr -= img_c_offset;
}else{
for(int fcolsIndex=0; fcolsIndex < fcols;
fcolsIndex++){
images_ptr += (img_c_offset +
fcolsIndex) *
images_icols_stride;
img_C_ptr[fcolsIndex] = images_ptr[0];
images_ptr -= (img_c_offset +
fcolsIndex) *
images_icols_stride;
}
}
images_ptr -= (img_r_offset +
frowsIndex) *
images_irows_stride;
img_C_ptr -= frowsIndex * fcols;
}
images_ptr -= icolorsIndex *
images_color_stride;
img_C_ptr -= icolorsIndex * frows * fcols;
}
images_ptr -= icountIndex * images_count_stride;
img_C_ptr -= icountIndex * icolors * frows *
fcols;
}
%(gemm)s(&noTrans, &noTrans,
&gemm_m, &gemm_n, &gemm_k,
&alpha,
img_C_ptr, &gemm_m,
hid_C_ptr, &gemm_k,
&beta,
output_ptr, &gemm_m);
hid_C_ptr -= hC * hidC_hcols_stride;
}
hid_C_ptr -= hR * hidC_hrows_stride;
}
hid_C_ptr -= m * hidC_module_stride;
output_ptr -= m * output_module_stride;
}
// Free the img_C and the hid_C arrays
Py_XDECREF(img_C);
Py_XDECREF(hid_C);
}
"""
return sio.getvalue() % locals()
def grad(self, inputs, goutputs):
images, hidacts, frows, fcols = inputs
gfilters, = goutputs
_, _, irows, icols = images.shape
gimages = ImgActs(module_stride=self.module_stride)(
gfilters, hidacts, irows, icols)
ghidacts = FilterActs(module_stride=self.module_stride)(
images, gfilters)
return [gimages, ghidacts, None, None]
def infer_shape(self, node, shapes):
images, hidacts, frows, fcols = node.inputs
ishape, hshape, frowshp, fcolshp = shapes
icount, icolors, irows, icols = ishape
hcount, fmodules, filters_per_module, hrows, hcols = hshape
fcolors = icolors
# frows already assigned
# fcols already assigned
fshape = (fmodules, filters_per_module, fcolors, frows, fcols )
if not_symbolic(irows, icols) and irows != icols:
raise NotImplementedError("non-square image argument",
(irows, icols))
if not_symbolic(hrows, hcols) and hrows != hcols:
raise NotImplementedError("non-square filter shape",
(hrows, hcols))
if not_symbolic(icount, hcount) and icount != hcount:
raise NotImplementedError("different number of images",
(icount, hcount))
return [fshape]
class ImgActs(Base):
"""
XXX
"""
def make_node(self, filters, hidacts, irows, icols):
filters, hidacts, irows, icols = map(theano.tensor.as_tensor_variable,
[filters, hidacts, irows, icols])
if irows.dtype[:3] not in ('int', 'uin'):
raise TypeError(irows)
if icols.dtype[:3] not in ('int', 'uin'):
raise TypeError(irows)
if irows.ndim:
raise TypeError('irows should be scalar', irows)
if icols.ndim:
raise TypeError('icols should be scalar', icols)
if filters.ndim != 5: #(fmodules, filters_per_module, fcolors, frows, fcols)
raise TypeError('filters must be 7d tensor', filters)
if hidacts.ndim != 5: #(icount, fmodules, filters_per_module, hrows, hcols)
raise TypeError('hidacts must be 5d tensor', filters)
if filters.dtype != hidacts.dtype: #should be floatX
raise TypeError('filters and hidacts must have matching dtype',
(filters, hidacts))
hcount, fmodules, filters_per_module, hrows, hcols = hidacts.type.broadcastable
#print fmodules, filters_per_module, hrows, hcols, hcount
#print hidacts
_, _, fcolors, _, _ = filters.type.broadcastable
otype = theano.tensor.TensorType(
dtype=filters.dtype,
broadcastable=(hcount, fcolors,
False, False)) # irows and icols should not be broadcastable
return theano.gof.Apply(self,
[filters, hidacts, irows, icols],
[otype()])
def perform(self, node, iargs, ostor):
#print 'into ImgActs.perform'
filters, hidacts, irows, icols = iargs
# hcount : minibatch size (nb image passed)
# fmodules : For one position, how many filters
hcount, fmodules, filters_per_module, hrows, hcols = hidacts.shape
# fmodules : nb of modules ( module = group of non-overlaping filters )
# filters per module : nomber of filters on each position ('looking' at the same image area)
# fcolors : nb of color channels ( 1 for grayscale, 3 for RGB, ... )
# frows x fcols : size of filter
fmodules_, filters_per_module_, fcolors, frows, fcols = filters.shape
assert fmodules_==fmodules
assert filters_per_module_==filters_per_module
icolors = fcolors
icount = hcount
#print 'IMGACTS: NODE OUTPUTS[0]'
#print theano.printing.debugprint(node.outputs[0])
#print 'FILTERS SHAPE:', filters.shape
#print 'HIDACTS SHAPE:', hidacts.shape
if hrows != hcols:
raise NotImplementedError("non-square hidacts argument",
(hrows, hcols))
if frows != fcols:
raise NotImplementedError("non-square filter shape",
(frows, fcols))
if irows != icols:
raise NotImplementedError("non-square image argument",
(irows, icols))
if hrows * frows + fmodules - 1 != irows:
raise NotImplementedError("hrows * frows + fmodules - 1 should" +
"be equal to irows",
(hrows * frows + fmodules - 1, irows))
if hcols * fcols + fmodules - 1 != icols:
raise NotImplementedError("hcols * fcols + fmodules - 1 should" +
"be equal to icols",
(hcols * fcols + fmodules - 1, icols))
images = numpy.zeros(
(icount, icolors, irows, icols),
dtype=hidacts.dtype)
for m in xrange(fmodules):
for hR in xrange(hrows):
img_r_offset = m*self.module_stride + hR*frows
for hC in xrange(hcols):
rc_filters = filters[m, :, :, :, :]
# rc_filters is fpm x fcolors x frows x fcols
rc_hidacts = hidacts[:, m, :, hR, hC]
# rc_hidacts is icount x fpm
img_c_offset = m*self.module_stride + hC*fcols
images[:,:,
img_r_offset:img_r_offset + frows,
img_c_offset:img_c_offset + fcols
] += numpy.dot(
rc_hidacts,
rc_filters.reshape(filters_per_module, -1)
).reshape(
(icount, fcolors, frows, fcols))
ostor[0][0] = images
#print 'exiting ImgActs perform'
def c_support_code(self):
return blas.blas_header_text()
def c_libraries(self):
return blas.ldflags()
def c_headers(self):
return ["omp.h"]
def c_compile_args(self):
ret = blas.ldflags(libs=False, flags=True)
if self.openmp:
ret += ['-fopenmp']
return ret
def c_lib_dirs(self):
return blas.ldflags(libs=False, libs_dir=True)
def c_header_dirs(self):
return blas.ldflags(libs=False, include_dir=True)
def c_code(self, node, node_name, input_names, output_names, sub):
# Extract input values
filters, hidacts, irows, icols = input_names
# Determine which BLAS function to use
conv_type = scalar.upcast(node.inputs[0].type.dtype,
node.inputs[1].type.dtype)
if conv_type == 'float32':
conv_type = "float"
gemm = "sgemm_"
axpy = "saxpy_"
elif conv_type == 'float64':
conv_type = "double"
gemm = "dgemm_"
axpy = "daxpy_"
else:
raise Exception()
# Extract output values
output = output_names[0]
# Assign self.module_stride to a local variable else
# the %(module_stride)s fails
module_stride = self.module_stride
openmp = int(self.openmp)
#Generate C code
fail = sub['fail']
sio = StringIO.StringIO()
print >> sio, """
// Validate the shape and the data type of the input tensors
if (%(hidacts)s->nd != 5){
PyErr_SetString(PyExc_ValueError, "hidacts not a 5d tensor");
%(fail)s;
}
if (%(filters)s->nd != 5){
PyErr_SetString(PyExc_ValueError, "filters not a 5d tensor");
%(fail)s;
}
if ((%(hidacts)s->descr->type_num != PyArray_DOUBLE) &&
(%(hidacts)s->descr->type_num != PyArray_FLOAT)){
PyErr_SetString(PyExc_TypeError,
"hidacts type should be float32 or float64");
%(fail)s;
}
if ((%(filters)s->descr->type_num != PyArray_DOUBLE) &&
(%(filters)s->descr->type_num != PyArray_FLOAT)){
PyErr_SetString(PyExc_TypeError,
"filters type should be float32 or float64");
%(fail)s;
}
if (%(filters)s->descr->type_num != %(hidacts)s->descr->type_num){
PyErr_SetString(PyExc_TypeError,
"filters and hidacts should have the same type");
%(fail)s;
}
{ // New scope level to avoid cross-initialization
// Extract input variables
const int hcount = %(hidacts)s->dimensions[0];
const int fmodules = %(hidacts)s->dimensions[1];
const int filters_per_module = %(hidacts)s->dimensions[2];
const int hrows = %(hidacts)s->dimensions[3];
const int hcols = %(hidacts)s->dimensions[4];
const int fmodules_ = %(filters)s->dimensions[0];
const int filters_per_module_ = %(filters)s->dimensions[1];
const int fcolors = %(filters)s->dimensions[2];
const int frows = %(filters)s->dimensions[3];
const int fcols = %(filters)s->dimensions[4];
const int irows = ((dtype_%(irows)s *) (%(irows)s->data))[0];
const int icols = ((dtype_%(icols)s *) (%(icols)s->data))[0];
const int module_stride = %(module_stride)s;
// Validate the shape of the input tensors
if (hrows != hcols){
PyErr_SetString(PyExc_ValueError,
"non-square hidacts argument");
%(fail)s;
}
if (frows != fcols){
PyErr_SetString(PyExc_ValueError,
"non-square filter shape");
%(fail)s;
}
if (irows != icols){
PyErr_SetString(PyExc_ValueError,
"non-square image argument");
%(fail)s;
}
if (fmodules_ != fmodules){
PyErr_SetString(PyExc_ValueError,
"inconsistent number of filter modules");
%(fail)s;
}
if (filters_per_module_ != filters_per_module){
PyErr_SetString(PyExc_ValueError,
"inconsistent number of filters by modules");
%(fail)s;
}
if (hrows * frows + fmodules - 1 != irows){
PyErr_SetString(
PyExc_ValueError,
"hrows * frows + fmodules - 1 should be equal to irows");
%(fail)s;
}
if (hcols * fcols + fmodules - 1 != icols){
PyErr_SetString(
PyExc_ValueError,
"hcols * fcols + fmodules - 1 should be equal to icols");
%(fail)s;
}
// Ensure output array is of the proper format
if (NULL == %(output)s ||
(%(output)s->dimensions[0] != hcount) ||
(%(output)s->dimensions[1] != fcolors) ||
(%(output)s->dimensions[2] != irows) ||
(%(output)s->dimensions[3] != icols) ||
(!PyArray_ISCARRAY(%(output)s)) ||
((%(output)s->descr->type_num != PyArray_DOUBLE) &&
(%(output)s->descr->type_num != PyArray_FLOAT)))
{
// The output array is of an invalid format.
if (NULL != %(output)s) Py_XDECREF(%(output)s);
npy_intp outputDims[4] = {hcount, fcolors, irows, icols};
%(output)s = (PyArrayObject*)PyArray_ZEROS(4, outputDims,
%(filters)s->descr->type_num, 0);
if(!%(output)s) {
PyErr_SetString(PyExc_MemoryError,
"failed to alloc memory for output");
%(fail)s
}
}else{
// The output array is of the proper format.
// Its content must be initialized to zeros.
dtype_%(hidacts)s* out_ptr =
(dtype_%(output)s*)PyArray_DATA(%(output)s);
memset(out_ptr, 0, PyArray_ITEMSIZE(%(output)s) *
PyArray_SIZE(%(output)s));
}
// Obtain C-Contiguous versions of %(hidacts)s and %(filters)s.
PyArrayObject* filter_C = PyArray_GETCONTIGUOUS(%(filters)s);
PyArrayObject* hid_C_view =
(PyArrayObject*)PyArray_SwapAxes(%(hidacts)s, 0, 3);
hid_C_view = (PyArrayObject*)PyArray_SwapAxes(hid_C_view, 2, 4);
PyArrayObject* hid_C = PyArray_GETCONTIGUOUS(hid_C_view);
// Extract the arrays' strides
const npy_intp output_count_stride = PyArray_STRIDE(%(output)s, 0) /
PyArray_ITEMSIZE(%(output)s);
const npy_intp output_color_stride = PyArray_STRIDE(%(output)s, 1) /
PyArray_ITEMSIZE(%(output)s);
const npy_intp output_frows_stride = PyArray_STRIDE(%(output)s, 2) /
PyArray_ITEMSIZE(%(output)s);
const npy_intp output_fcols_stride = PyArray_STRIDE(%(output)s, 3) /
PyArray_ITEMSIZE(%(output)s);
const npy_intp filC_fmodule_stride = PyArray_STRIDE(filter_C, 0) /
PyArray_ITEMSIZE(filter_C);
const npy_intp filC_filter_stride = PyArray_STRIDE(filter_C, 1) /
PyArray_ITEMSIZE(filter_C);
const npy_intp filC_fcolor_stride = PyArray_STRIDE(filter_C, 2) /
PyArray_ITEMSIZE(filter_C);
const npy_intp filC_frows_stride = PyArray_STRIDE(filter_C, 3) /
PyArray_ITEMSIZE(filter_C);
const npy_intp filC_fcols_stride = PyArray_STRIDE(filter_C, 4) /
PyArray_ITEMSIZE(filter_C);
const npy_intp hidC_count_stride = PyArray_STRIDE(hid_C, 3) /
PyArray_ITEMSIZE(hid_C);
const npy_intp hidC_module_stride = PyArray_STRIDE(hid_C, 1) /
PyArray_ITEMSIZE(hid_C);
const npy_intp hidC_filter_stride = PyArray_STRIDE(hid_C, 4) /
PyArray_ITEMSIZE(hid_C);
const npy_intp hidC_hrows_stride = PyArray_STRIDE(hid_C, 0) /
PyArray_ITEMSIZE(hid_C);
const npy_intp hidC_hcols_stride = PyArray_STRIDE(hid_C, 2) /
PyArray_ITEMSIZE(hid_C);
// Allocate variable used to call BLAS' functions
char noTrans = 'N';
const %(conv_type)s alpha = 1.0f;
const %(conv_type)s beta = 0.0f;
int gemm_m = fcolors * frows * fcols;
int gemm_n = hcount;
int gemm_k = filters_per_module;
int axpi_incx = fcolors * frows * fcols;
int axpi_incy = output_count_stride;
npy_intp dotPDims[2] = {hcount, fcolors * frows * fcols};
PyArrayObject* dotPResults =
(PyArrayObject*) PyArray_EMPTY(2, dotPDims,
%(output)s->descr->type_num,
0);
if(!dotPResults) {
PyErr_SetString(PyExc_MemoryError,
"failed to alloc memory for dotPResult");
Py_XDECREF(filter_C);
Py_XDECREF(hid_C);
%(fail)s;
}
dtype_%(hidacts)s* output_ptr =
(dtype_%(output)s*)PyArray_DATA(%(output)s);
dtype_%(filters)s* filter_C_ptr =
(dtype_%(filters)s*)PyArray_DATA(filter_C);
dtype_%(hidacts)s* hid_C_ptr = (dtype_%(hidacts)s*)PyArray_DATA(hid_C);
dtype_%(output)s* dotp = (dtype_%(output)s*)(dotPResults->data);
for(int hR=0; hR < hrows; hR++){
hid_C_ptr += hR * hidC_hrows_stride;
for(int m=0; m < fmodules; m++){
hid_C_ptr += m * hidC_module_stride;
filter_C_ptr += m * filC_fmodule_stride;
int img_r_offset = m * module_stride + hR * frows;
for(int hC=0; hC < hcols; hC++){
hid_C_ptr += hC * hidC_hcols_stride;
int img_c_offset = m * module_stride + hC * frows;
// Use BLAS' gemv function to speed up
// the calculation of the dot products.
%(gemm)s(&noTrans, &noTrans,
&gemm_m, &gemm_n, &gemm_k,
&alpha,
filter_C_ptr, &gemm_m,
hid_C_ptr, &gemm_k,
&beta,
dotp, &gemm_m);
// Add dotp content to output array
output_ptr += img_c_offset * output_fcols_stride;
output_ptr += img_r_offset * output_frows_stride;
for(int fcolorsIndex=0; fcolorsIndex <
fcolors; fcolorsIndex++){
output_ptr += fcolorsIndex *
output_color_stride;
dotp += fcolorsIndex * frows * fcols;
for(int frowsIndex=0; frowsIndex < frows;
frowsIndex++){
output_ptr += frowsIndex *
output_frows_stride;
dotp += frowsIndex * fcols;
for(int fcolsIndex=0; fcolsIndex < fcols;
fcolsIndex++){
%(axpy)s(&hcount, &alpha, dotp, &axpi_incx,
output_ptr, &axpi_incy);
output_ptr += output_fcols_stride;
dotp++;
}
output_ptr -= fcols * output_fcols_stride;
dotp -= fcols;
dotp -= frowsIndex * fcols;
output_ptr -= frowsIndex *
output_frows_stride;
}
output_ptr -= fcolorsIndex *
output_color_stride;
dotp -= fcolorsIndex * frows * fcols;
}
output_ptr -= img_c_offset * output_fcols_stride;
output_ptr -= img_r_offset * output_frows_stride;
hid_C_ptr -= hC * hidC_hcols_stride;
}
hid_C_ptr -= m * hidC_module_stride;
filter_C_ptr -= m * filC_fmodule_stride;
}
hid_C_ptr -= hR * hidC_hrows_stride;
}
// Free the arrays
Py_XDECREF(dotPResults);
Py_XDECREF(filter_C);
Py_XDECREF(hid_C);
}
"""
return sio.getvalue() % locals()
def grad(self, inputs, goutputs):
filters, hidacts, irows, icols = inputs
gimages, = goutputs
_, _, _, frows, fcols = filters.shape
gfilters = WeightActs(module_stride=self.module_stride)(
gimages, hidacts, frows, fcols)
ghidacts = FilterActs(module_stride=self.module_stride)(
gimages, filters)
return [gfilters, ghidacts, None, None]
| {"/test_NCC.py": ["/CrossCorrelation.py"]} |
65,820 | luoheng/TCssrbm | refs/heads/master | /test_Brodatz.py | import theano
from Brodatz import Brodatz_op, Brodatz
import numpy
from PIL import Image
from TCssrbm import tile_conv_weights
s_idx=theano.tensor.lscalar()
batch_range =s_idx*128 + numpy.arange(128)
n_examples = 16
n_img_channels = 1
n_img_rows = 98
n_img_cols = 98
batch_x = Brodatz_op(batch_range,
['../Brodatz/D21.gif',
'../Brodatz/D6.gif',
'../Brodatz/D53.gif',
'../Brodatz/D77.gif',
'../Brodatz/D4.gif',
'../Brodatz/D16.gif',
'../Brodatz/D68.gif',
'../Brodatz/D103.gif'], # download from http://www.ux.uis.no/~tranden/brodatz.html
patch_shape=(n_img_channels,
n_img_rows,
n_img_cols),
noise_concelling=0.,
seed=3322,
batchdata_size=n_examples,
rescale=1.0,
new_shapes = [[320,320],
[480,480],
[320,320],
[480,480],
[480,480],
[640,640],
[320,320],
[320,320]
],
validation=False,
test_data=False
)
fn=theano.function([s_idx],batch_x)
B_texture = Brodatz(['../Brodatz/D6.gif',
'../Brodatz/D21.gif',
'../Brodatz/D53.gif',
'../Brodatz/D77.gif',
'../Brodatz/D4.gif',
'../Brodatz/D16.gif',
'../Brodatz/D68.gif',
'../Brodatz/D103.gif'], # download from http://www.ux.uis.no/~tranden/brodatz.html
patch_shape=(n_img_channels,
n_img_rows,
n_img_cols),
noise_concelling=0.,
seed=3322,
batchdata_size=n_examples,
rescale=1.0,
new_shapes = [[320,320],
[480,480],
[320,320],
[480,480],
[480,480],
[640,640],
[320,320],
[320,320]
validation=False,
test_data=False
])
"""
for ii in xrange(8):
shp = B_texture.test_img[ii].shape
#img = numpy.zeros((1,)+shp)
temp_img = numpy.asarray(B_texture.test_img[ii], dtype='uint8')
#img[0,] = temp_img
Image.fromarray(temp_img,'L').save('test_img_%s.png'%ii)
shp = B_texture.training_img[ii].shape
#img = numpy.zeros((1,)+shp)
temp_img = numpy.asarray(255*(B_texture.training_img[ii] - B_texture.training_img[ii].min()) / (B_texture.training_img[ii].max() - B_texture.training_img[ii].min() + 1e-6),
dtype='uint8')
#img[0,] = temp_img
Image.fromarray(temp_img[0],'L').save('training_img_%s.png'%ii)
"""
for n in xrange(10):
img_1=fn(n)
#img_2=fn(n+1)
#assert img_1.any() == img_2.any()
Image.fromarray(tile_conv_weights(img_1,
flip=False),'L').save('patches_%s.png'%n)
| {"/test_NCC.py": ["/CrossCorrelation.py"]} |
65,821 | luoheng/TCssrbm | refs/heads/master | /TCssDBN_filter_bias.py | """
This file implements the binary convolutional ssRBM as a second layer in DBN
"""
import cPickle, pickle
import numpy
numpy.seterr('warn') #SHOULD NOT BE IN LIBIMPORT
from PIL import Image
import theano
from theano import tensor
from theano.tensor import nnet,grad
from theano.tensor.nnet.conv import conv2d
from pylearn.io import image_tiling
from pylearn.algorithms.mcRBM import (
contrastive_cost, contrastive_grad)
import pylearn.gd.sgd
from TCssrbm_FPCD_filter_bias import RBM,Gibbs
from unshared_conv_diagonally import FilterActs
import sys
#from unshared_conv_diagonally import FilterActs
#from unshared_conv_diagonally import WeightActs
#from unshared_conv_diagonally import ImgActs
from Brodatz import Brodatz_op
from Brodatz import Brodatz
from CrossCorrelation import CrossCorrelation,NCC
from MSSIM import MSSIM
#import scipy.io
import os
_temp_data_path_ = '.'#'/Tmp/luoheng'
if 1:
print 'WARNING: using SLOW rng'
RandomStreams = tensor.shared_randomstreams.RandomStreams
else:
import theano.sandbox.rng_mrg
RandomStreams = theano.sandbox.rng_mrg.MRG_RandomStreams
floatX=theano.config.floatX
sharedX = lambda X, name : theano.shared(numpy.asarray(X, dtype=floatX),
name=name)
def conv2d_transpose(x, filters, in_img_shape, filters_shape, subsample):
"""
Supposing a linear transformation M implementing convolution by dot(img, M),
Return the equivalent of dot(x, M.T).
This is also implemented by a convolution, but with lots of dimshuffles and flipping and
stuff.
"""
dummy_v = tensor.tensor4()
z_hs = conv2d(dummy_v, filters,
image_shape=in_img_shape,
filter_shape=filters_shape,
subsample=subsample)
rval, _ = z_hs.owner.op.grad((dummy_v, filters), (x,))
return rval
def unnatural_sgd_updates(params, grads, stepsizes, tracking_coef=0.1, epsilon=1):
grad_means = [theano.shared(numpy.zeros_like(p.get_value(borrow=True)))
for p in params]
grad_means_sqr = [theano.shared(numpy.ones_like(p.get_value(borrow=True)))
for p in params]
updates = dict()
for g, gm, gms, p, s in zip(
grads, grad_means, grad_means_sqr, params, stepsizes):
updates[gm] = tracking_coef * g + (1-tracking_coef) * gm
updates[gms] = tracking_coef * g*g + (1-tracking_coef) * gms
var_g = gms - gm**2
# natural grad doesn't want sqrt, but i found it worked worse
updates[p] = p - s * gm / tensor.sqrt(var_g+epsilon)
return updates
def safe_update(a, b):
for k,v in dict(b).iteritems():
if k in a:
raise KeyError(k)
a[k] = v
return a
def most_square_shape(N):
"""rectangle (height, width) with area N that is closest to sqaure
"""
for i in xrange(int(numpy.sqrt(N)),0, -1):
if 0 == N % i:
return (i, N/i)
def tile_conv_weights(w,flip=False, scale_each=True):
"""
Return something that can be rendered as an image to visualize the filters.
"""
#if w.shape[1] != 3:
# raise NotImplementedError('not rgb', w.shape)
if w.shape[2] != w.shape[3]:
raise NotImplementedError('not square', w.shape)
if w.shape[1] == 1:
wmin, wmax = w.min(), w.max()
if not scale_each:
w = numpy.asarray(255 * (w - wmin) / (wmax - wmin + 1e-6), dtype='uint8')
trows, tcols= most_square_shape(w.shape[0])
outrows = trows * w.shape[2] + trows-1
outcols = tcols * w.shape[3] + tcols-1
out = numpy.zeros((outrows, outcols), dtype='uint8')
#tr_stride= 1+w.shape[1]
for tr in range(trows):
for tc in range(tcols):
# this is supposed to flip the filters back into the image
# coordinates as well as put the channels in the right place, but I
# don't know if it really does that
tmp = w[tr*tcols+tc,
0,
::-1 if flip else 1,
::-1 if flip else 1]
if scale_each:
tmp = numpy.asarray(255*(tmp - tmp.min()) / (tmp.max() - tmp.min() + 1e-6),
dtype='uint8')
out[tr*(1+w.shape[2]):tr*(1+w.shape[2])+w.shape[2],
tc*(1+w.shape[3]):tc*(1+w.shape[3])+w.shape[3]] = tmp
return out
wmin, wmax = w.min(), w.max()
if not scale_each:
w = numpy.asarray(255 * (w - wmin) / (wmax - wmin + 1e-6), dtype='uint8')
trows, tcols= most_square_shape(w.shape[0])
outrows = trows * w.shape[2] + trows-1
outcols = tcols * w.shape[3] + tcols-1
out = numpy.zeros((outrows, outcols,3), dtype='uint8')
tr_stride= 1+w.shape[1]
for tr in range(trows):
for tc in range(tcols):
# this is supposed to flip the filters back into the image
# coordinates as well as put the channels in the right place, but I
# don't know if it really does that
tmp = w[tr*tcols+tc].transpose(1,2,0)[
::-1 if flip else 1,
::-1 if flip else 1]
if scale_each:
tmp = numpy.asarray(255*(tmp - tmp.min()) / (tmp.max() - tmp.min() + 1e-6),
dtype='uint8')
out[tr*(1+w.shape[2]):tr*(1+w.shape[2])+w.shape[2],
tc*(1+w.shape[3]):tc*(1+w.shape[3])+w.shape[3]] = tmp
return out
class bRBM(object):
"""
Light-weight class that provides math related to inference in binary Spike & Slab RBM
Attributes:
- _params a list of the attributes that are shared vars
"""
def __init__(self, **kwargs):
print 'init binary rbm'
self.__dict__.update(kwargs)
@classmethod
def alloc(cls,
l2_conf,
hs_shape, # input dimensionality
filters_shape,
filters_irange,
rbm,
seed = 8923402,
):
print 'alloc rbm'
rng = numpy.random.RandomState(seed)
self = cls()
#print hs_shape
#print filters_shape
n_batchsize, n_maps_, n_hs_rows, n_hs_cols = hs_shape
n_filters, n_maps, n_filters_rows, n_filters_cols = filters_shape
assert n_maps_ == n_maps
self.hs_shape = hs_shape
print 'hs_shape'
print self.hs_shape
self.filters_shape = filters_shape
print 'self.filters_shape'
print self.filters_shape
self.out_conv_v_shape = (n_batchsize, n_filters, n_hs_rows-n_filters_rows+1, n_hs_cols-n_filters_cols+1)
print 'self.out_conv_v_shape'
print self.out_conv_v_shape
#start to define the parameters
#biases for v and h
#conv_v_bias_shape = self.out_conv_v_shape[1:]
conv_v_bias_shape = (n_filters,)
self.conv_v_bias_shape = conv_v_bias_shape
self.conv_v_bias = sharedX(numpy.zeros(self.conv_v_bias_shape), name='conv_v_bias')
self.conv_v_bias_fast = sharedX(numpy.zeros(self.conv_v_bias_shape), name='conv_v_bias_fast')
print 'self.conv_v_bias_shape'
print self.conv_v_bias_shape
#h_bias_shape = self.hs_shape[1:]
h_bias_shape = (n_maps_,)
self.h_bias_shape = h_bias_shape
def conver_hs_bias(a,old_shp=rbm.conv_bias_hs_shape,new_shp=self.h_bias_shape):
#new_shp shuold like (n_maps_,),( (352,))
f_modules,n_filters = old_shp
n_maps, = new_shp
assert f_modules*n_filters == n_maps
b = a.reshape(f_modules*n_filters)
rval = numpy.zeros(new_shp)
for filters_index in xrange(f_modules*n_filters):
rval[filters_index]= b[filters_index]
return rval
h_bias_ival = conver_hs_bias(rbm.conv_bias_hs.get_value())
self.h_bias = sharedX(h_bias_ival, 'h_bias')
#self.h_bias = sharedX(numpy.zeros(self.h_bias_shape), 'h_bias')
self.h_bias_fast = sharedX(numpy.zeros(self.h_bias_shape), 'h_bias_fast')
print 'self.h_bias_shape'
print self.h_bias_shape
#filters
self.filters = sharedX(rng.randn(*self.filters_shape) * filters_irange , 'filters_hs')
self.filters_fast = sharedX(numpy.zeros(filters_shape), 'filters_fast')
#mu
#mu_shape = self.hs_shape[1:]
mu_shape = self.h_bias_shape
self.mu_shape = mu_shape
#mu_ival = numpy.zeros(mu_shape,dtype=floatX) + l2_conf['mu0']
mu_ival = conver_hs_bias(rbm.conv_mu.get_value())
self.mu = sharedX(mu_ival, name='mu')
self.mu_fast = sharedX(numpy.zeros(mu_shape,dtype=floatX), name='mu_fast')
print 'mu_shape'
print self.mu_shape
if l2_conf['alpha_logdomain']:
#alpha_ival = numpy.zeros(self.mu_shape,dtype=floatX) + numpy.log(l2_conf['alpha0'])
alpha_ival = conver_hs_bias(rbm.conv_alpha.get_value())
self.alpha = sharedX(alpha_ival,'alpha')
alpha_ival_fast = numpy.zeros(self.mu_shape,dtype=floatX)
self.alpha_fast = sharedX(alpha_ival_fast, name='alpha_fast')
else:
alpha_ival = conver_hs_bias(rbm.conv_alpha.get_value())
self.alpha = sharedX(
alpha_ival,
'alpha')
self.alpha_fast = sharedX(
numpy.zeros(self.mu_shape), name='alpha_fast')
self.l2_conf = l2_conf
self._params = [self.filters,
self.conv_v_bias,
self.h_bias,
self.mu,
self.alpha
]
self._params_fast = [self.filters_fast,
self.conv_v_bias_fast,
self.h_bias_fast,
self.mu_fast,
self.alpha_fast
]
return self
def get_filters(self,With_fast):
if With_fast:
return self.filters+self.filters_fast
else:
return self.filters
def get_alpha(self,With_fast):
if With_fast:
if self.l2_conf['alpha_logdomain']:
rval = tensor.exp(self.alpha+self.alpha_fast)
return rval
else:
return self.alpha+self.alpha_fast
else:
if self.l2_conf['alpha_logdomain']:
rval = tensor.exp(self.alpha)
return rval
else:
return self.alpha
def get_conv_v_bias(self,With_fast):
if With_fast:
return self.conv_v_bias+self.conv_v_bias_fast
else:
return self.conv_v_bias
def get_h_bias(self,With_fast):
if With_fast:
return self.h_bias+self.h_bias_fast
else:
return self.h_bias
def get_mu(self,With_fast):
if With_fast:
return self.mu+self.mu_fast
else:
return self.mu
def convdot(self,hs,filters):
return conv2d(hs,filters,
image_shape=self.hs_shape,
filter_shape=self.filters_shape,
subsample=(1,1))
def convdot_T(self, v, filters):
return conv2d_transpose(v, filters,
self.hs_shape,
self.filters_shape,
(1,1))
#####################
# binary spike-and-slab convolutional visible units
def mean_conv_v_given_s_h(self, s, h, With_fast):
"""Return the mean of binary-valued visible units v, given h and s
"""
W = self.get_filters(With_fast) #(n_filters, n_maps, n_filters_rows, n_filters_cols) (64,352,2,2)
conv_v_bias = self.get_conv_v_bias(With_fast)
shW = self.convdot(s*h, W)
shW_broadcastable = shW.dimshuffle(0,2,3,1)
#change 64 x 352 x 7 x 7 to 64 x 7 x 7 x 352 for broadcasting
pre_convhs_h_parts = shW_broadcastable + conv_v_bias
#64 x 7 x 7 x 352 + 352 = 64 x 7 x 7 x 352
rval = nnet.sigmoid(pre_convhs_h_parts.dimshuffle(0,3,1,2))
#change 64 x 7 x 7 x 352 back to 64 x 352 x 7 x 7
return rval
#####################
# binary spike-and-slab convolutional spike units (h given v)
def mean_h_given_v(self, v, With_fast):
alpha = self.get_alpha(With_fast)
mu = self.get_mu(With_fast)
W = self.get_filters(With_fast)
#(n_filters, n_maps, n_filters_rows, n_filters_cols) (64,352,2,2)
h_bias = self.get_h_bias(With_fast)
vW = self.convdot_T(v, W)
#(64,352,8,8)
vW_broadcastable = vW.dimshuffle(0,2,3,1)
#change (64,352,8,8) to (64,8,8,352)
alpha_vW_mu = vW_broadcastable/alpha + mu
pre_convhs_h_parts = 0.5*alpha*(alpha_vW_mu**2)+h_bias-0.5*alpha*(mu**2)
rval = nnet.sigmoid(pre_convhs_h_parts.dimshuffle(0,3,1,2))
#change 64 x 8 x 8 x 352 back to 64 x 352 x 8 x 8
return rval
#####################
# binary spike-and-slab convolutional slab units (s given v and h)
def mean_var_s_given_v_h(self, v, h, With_fast):
"""
"""
alpha = self.get_alpha(With_fast)
mu = self.get_mu(With_fast)
W = self.get_filters(With_fast)
vW = self.convdot_T(v, W)
#(64,352,8,8)
vW_broadcastable = vW.dimshuffle(0,2,3,1)
#change (64,352,8,8) to (64,8,8,352)
rval = ((vW_broadcastable/alpha)+mu)
return rval.dimshuffle(0,3,1,2)*h, 1.0 / alpha #change 64 x 8 x 8 x 352 back to 64 x 352 x 8 x 8
#####################
def gibbs_step_for_s_h(self, s, h, s_rng, return_locals=False, sampling_for_s=True, With_fast=True):
#positive phase
# visible variable means
mean_conv_v = self.mean_conv_v_given_s_h(s, h, With_fast)
#visible samples
sample_conv_v = tensor.cast(s_rng.uniform(size=self.out_conv_v_shape) < mean_conv_v, floatX)
#negative phase
# spike variable means
mean_h = self.mean_h_given_v(sample_conv_v, With_fast)
# spike variable samples
sample_h = tensor.cast(s_rng.uniform(size=self.hs_shape) < mean_h, floatX)
# slab variable means
meanvar_s = self.mean_var_s_given_v_h(sample_conv_v,sample_h,With_fast)
# slab variable samples
mean_s, var_s = meanvar_s
if sampling_for_s:
random_normal = s_rng.normal(size=self.hs_shape)
#(64,352,8,8)
random_normal_bc = random_normal.dimshuffle(0,2,3,1)*tensor.sqrt(var_s)
sample_s = random_normal_bc.dimshuffle(0,3,1,2) + mean_s
else:
sample_s = mean_s
if return_locals:
return sample_s, sample_h, locals()
else:
return sample_s, sample_h
def free_energy_given_s_h(self, s, h, With_fast=False):
alpha = self.get_alpha(With_fast)
mu = self.get_mu(With_fast)
W = self.get_filters(With_fast)
h_bias = self.get_h_bias(With_fast)
conv_v_bias = self.get_conv_v_bias(With_fast)
s_broadcastable = s.dimshuffle(0,2,3,1)
h_broadcastable = h.dimshuffle(0,2,3,1)
#change (64,352,8,8) to (64,8,8,352)
out_softplus = 0.5*alpha*(s_broadcastable**2) \
- alpha*mu*s_broadcastable*h_broadcastable \
+ 0.5*alpha*(mu**2)*h_broadcastable - h_bias*h_broadcastable
pre_softplus = self.convdot(s*h, W)
#(64,352,7,7)
pre_softplus_broadcastable = pre_softplus.dimshuffle(0,2,3,1)
pre_softplus_broadcastable = pre_softplus_broadcastable + conv_v_bias
#(64,7,7,352) + (352,) = (64,7,7,352)
rval = tensor.sum(out_softplus,axis=[1,2,3]) - \
tensor.sum(nnet.softplus(pre_softplus_broadcastable.dimshuffle(0,3,1,2)),axis=[1,2,3])
assert rval.ndim==1
return rval
def cd_updates(self, pos_s, pos_h, neg_s, neg_h, stepsizes, other_cost=None):
cost=(self.free_energy_given_s_h(pos_s, pos_h, With_fast=False) \
- self.free_energy_given_s_h(neg_s, neg_h,With_fast=False)).sum()
if other_cost:
cost = cost + other_cost
grads = theano.tensor.grad(cost,
wrt=self.params(),
consider_constant=[pos_s]+[pos_h]+[neg_s]+[neg_h])
#print len(stepsizes),len(grads+grads)
assert len(stepsizes)==len(grads+grads)
if self.l2_conf['unnatural_grad']:
sgd_updates = unnatural_sgd_updates
else:
sgd_updates = pylearn.gd.sgd.sgd_updates
rval = dict(
sgd_updates(
self.params()+self.params_fast(),
grads+grads,
stepsizes=stepsizes))
return rval
def params(self):
# return the list of *shared* learnable parameters
# that are, in your judgement, typically learned in this model
return list(self._params)
def params_fast(self):
# return the list of *shared* learnable parameters
# that are, in your judgement, typically learned in this model
return list(self._params_fast)
def save_weights_to_files(self, identifier):
# save 4 sets of weights:
pass
def save_weights_to_grey_files(self, identifier):
# save 4 sets of weights:
#filters_hs
pass
"""
def arrange_for_show(filters_hs,filters_hs_shape):
n_filters_hs_modules, n_filters_hs_per_modules, fcolors, n_filters_hs_rows, n_filters_hs_cols = filters_hs_shape
filters_fs_for_show = filters_hs.reshape(
(n_filters_hs_modules*n_filters_hs_per_modules,
fcolors,
n_filters_hs_rows,
n_filters_hs_cols))
fn = theano.function([],filters_fs_for_show)
rval = fn()
return rval
filters_fs_for_show = arrange_for_show(self.filters_hs, self.filters_hs_shape)
Image.fromarray(
tile_conv_weights(
filters_fs_for_show,flip=False), 'L').save(
'filters_hs_%s.png'%identifier)
if self.conf['lambda_logdomain']:
raise NotImplementedError()
else:
conv_lambda_for_show = arrange_for_show(self.conv_lambda, self.filters_hs_shape)
Image.fromarray(
tile_conv_weights(
conv_lambda_for_show,flip=False), 'L').save(
'conv_lambda_%s.png'%identifier)
"""
def dump_to_file(self, filename):
try:
cPickle.dump(self, open(filename, 'wb'))
except cPickle.PicklingError:
pickle.dump(self, open(filename, 'wb'))
class l2_Gibbs(object): # if there's a Sampler interface - this should support it
@classmethod
def alloc(cls, brbm, rng):
if not hasattr(rng, 'randn'):
rng = numpy.random.RandomState(rng)
self = cls()
seed=int(rng.randint(2**30))
self.brbm = brbm
self.s_particles = sharedX(
rng.randn(*brbm.hs_shape),
name='s_particles')
self.h_particles = sharedX(
rng.randint(2,size=brbm.hs_shape),
name='h_particles')
#self.particles = sharedX(
# numpy.zeros(rbm.v_shape),
# name='particles')
self.s_rng = RandomStreams(seed)
return self
class l2_Gibbs_for_genrating(object): # if there's a Sampler interface - this should support it
@classmethod
def alloc(cls, brbm, rng):
if not hasattr(rng, 'randn'):
rng = numpy.random.RandomState(rng)
self = cls()
seed=int(rng.randint(2**30))
self.brbm = brbm
self.v_particles = sharedX(
rng.randint(2,brbm.out_conv_v_shape),
name='v_particles')
#self.particles = sharedX(
# numpy.zeros(rbm.v_shape),
# name='particles')
self.s_rng = RandomStreams(seed)
return self
class Trainer(object): # updates of this object implement training
@classmethod
def alloc(cls,
brbm,
s_batch,
h_batch,
lrdict,
conf,
rng=234,
iteration_value=0,
):
batchsize = brbm.hs_shape[0]
sampler = l2_Gibbs.alloc(brbm, rng=rng)
print 'alloc trainer'
error = 0.0
return cls(
brbm=brbm,
batchsize=batchsize,
s_batch=s_batch,
h_batch=h_batch,
sampler=sampler,
iteration=sharedX(iteration_value, 'iter'), #float32.....
learn_rates = [lrdict[p] for p in brbm.params()],
learn_rates_fast = [lrdict[p_fast] for p_fast in brbm.params_fast()],
conf=conf,
annealing_coef=sharedX(1.0, 'annealing_coef'),
conv_v_means = sharedX(numpy.zeros(brbm.out_conv_v_shape[1:])+0.5,'conv_v_means'),
conv_v = sharedX(numpy.zeros(brbm.out_conv_v_shape), 'conv_v'),
recons_error = sharedX(error,'reconstruction_error'),
)
def __init__(self, **kwargs):
print 'init trainer'
self.__dict__.update(kwargs)
def updates(self):
print 'start trainer.updates'
conf = self.conf
ups = {}
add_updates = lambda b: safe_update(ups,b)
annealing_coef = 1.0 - self.iteration / float(conf['train_iters'])
ups[self.iteration] = self.iteration + 1 #
ups[self.annealing_coef] = annealing_coef
conv_v = self.brbm.mean_conv_v_given_s_h(
self.s_batch, self.h_batch, With_fast=False)
new_conv_v_means = 0.1 * conv_v.mean(axis=0) + .9*self.conv_v_means
ups[self.conv_v_means] = new_conv_v_means
ups[self.conv_v] = conv_v
#sparsity_cost = 0
#self.sparsity_cost = sparsity_cost
# SML updates PCD
add_updates(
self.brbm.cd_updates(
pos_s=self.s_batch,
pos_h=self.h_batch,
neg_s=self.sampler.s_particles,
neg_h=self.sampler.h_particles,
stepsizes=[annealing_coef*lr for lr in self.learn_rates]+[lr_fast for lr_fast in self.learn_rates_fast]))
if conf['increase_steps_sampling']:
steps_sampling = self.iteration.get_value() / 1000 + conf['constant_steps_sampling']
else:
steps_sampling = conf['constant_steps_sampling']
"""
if conf['chain_reset_prob']:
# advance the 'negative-phase' chain
nois_batch = self.sampler.s_rng.normal(size=self.rbm.v_shape)
#steps_sampling = steps_sampling + conf['chain_reset_burn_in']
resets = self.sampler.s_rng.uniform()<conf['chain_reset_prob']
old_particles = tensor.switch(resets.dimshuffle('x','x','x','x'),
nois_batch, # reset the chain
self.sampler.particles, #continue chain
)
#old_particles = tensor.switch(resets.dimshuffle(0,'x','x','x'),
# self.visible_batch, # reset the chain
# self.sampler.particles, #continue chain
# )
else:
old_particles = self.sampler.particles
"""
#print steps_sampling
s_tmp_particles = self.sampler.s_particles
h_tmp_particles = self.sampler.h_particles
for step in xrange(int(steps_sampling)):
tmp_particles = self.brbm.gibbs_step_for_s_h(s_tmp_particles,
h_tmp_particles, self.sampler.s_rng,
sampling_for_s=conf['sampling_for_s'])
#print tmp_particles
s_tmp_particles, h_tmp_particles = tmp_particles
new_s_particles = s_tmp_particles
new_h_particles = h_tmp_particles
recons_error = 0.0
ups[self.recons_error] = recons_error
ups[self.sampler.s_particles] = new_s_particles
ups[self.sampler.h_particles] = new_h_particles
if conf['alpha_min'] < conf['alpha_max']:
if conf['alpha_logdomain']:
ups[self.brbm.alpha] = tensor.clip(
ups[self.brbm.alpha],
numpy.log(conf['alpha_min']).astype(floatX),
numpy.log(conf['alpha_max']).astype(floatX))
else:
ups[self.brbm.alpha] = tensor.clip(
ups[self.brbm.alpha],
conf['alpha_min'],
conf['alpha_max'])
weight_decay = numpy.asarray(conf['penalty_for_fast_parameters'], dtype=floatX)
for p_fast in self.brbm.params_fast():
new_p_fast = ups[p_fast]
new_p_fast = new_p_fast - weight_decay*p_fast
ups[p_fast] = new_p_fast
if conf['alpha_min'] < conf['alpha_max']:
if conf['alpha_logdomain']:
ups[self.brbm.alpha_fast] = tensor.clip(
ups[self.brbm.alpha_fast],
numpy.log(conf['alpha_min']).astype(floatX),
numpy.log(conf['alpha_max']).astype(floatX))
else:
ups[self.brbm.alpha_fast] = tensor.clip(
ups[self.brbm.alpha_fast],
conf['alpha_min'],
conf['alpha_max'])
return ups
def save_weights_to_files(self, pattern='iter_%05i'):
#pattern = pattern%self.iteration.get_value()
# save particles
#Image.fromarray(tile_conv_weights(self.sampler.particles.get_value(borrow=True),
# flip=False),
# 'RGB').save('particles_%s.png'%pattern)
#self.rbm.save_weights_to_files(pattern)
pass
def save_weights_to_grey_files(self, pattern='iter_%05i'):
pattern = pattern%self.iteration.get_value()
# save particles
"""
particles_for_show = self.sampler.particles.dimshuffle(3,0,1,2)
fn = theano.function([],particles_for_show)
particles_for_show_value = fn()
Image.fromarray(tile_conv_weights(particles_for_show_value,
flip=False),'L').save('particles_%s.png'%pattern)
self.rbm.save_weights_to_grey_files(pattern)
"""
pass
"""
Image.fromarray(tile_conv_weights(self.sampler.particles.get_value(borrow=True),
flip=False),'L').save('particles_%s.png'%pattern)
self.rbm.save_weights_to_grey_files(pattern)
"""
def print_status(self):
def print_minmax(msg, x):
assert numpy.all(numpy.isfinite(x))
print msg, x.min(), x.max()
print 'iter:', self.iteration.get_value()
print_minmax('filters', self.brbm.filters.get_value(borrow=True))
print_minmax('filters_fast', self.brbm.filters_fast.get_value(borrow=True))
print_minmax('h_bias', self.brbm.h_bias.get_value(borrow=True))
print_minmax('h_bias_fast', self.brbm.h_bias_fast.get_value(borrow=True))
print_minmax('conv_v_bias', self.brbm.conv_v_bias.get_value(borrow=True))
print_minmax('conv_v_bias_fast', self.brbm.conv_v_bias_fast.get_value(borrow=True))
print_minmax('mu', self.brbm.mu.get_value(borrow=True))
print_minmax('mu_fast', self.brbm.mu_fast.get_value(borrow=True))
if self.conf['alpha_logdomain']:
print_minmax('alpha',
numpy.exp(self.brbm.alpha.get_value(borrow=True)))
print_minmax('alpha_fast',
numpy.exp(self.brbm.alpha_fast.get_value(borrow=True)))
else:
print_minmax('alpha', self.brbm.alpha.get_value(borrow=True))
print_minmax('alpha_fast', self.brbm.alpha_fast.get_value(borrow=True))
print_minmax('s_particles', self.sampler.s_particles.get_value())
print_minmax('h_particles', self.sampler.h_particles.get_value())
print_minmax('conv_v_means', self.conv_v_means.get_value())
print_minmax('conv_v', self.conv_v.get_value())
print (self.conv_v.get_value()).std()
#print self.conv_h_means.get_value()[0,0:11,0:11]
#print self.rbm.conv_bias_hs.get_value(borrow=True)[0,0,0:3,0:3]
#print self.rbm.h_tiled_conv_mask.get_value(borrow=True)[0,32,0:3,0:3]
#print_minmax('global_h_means', self.global_h_means.get_value())
print 'lr annealing coef:', self.annealing_coef.get_value()
#print 'reconstruction error:', self.recons_error.get_value()
def main_inpaint(layer1_filename,
layer2_filename,
samples_shape=(20,1,76,76),
rng=[777888,43,435,678,888],
scale_separately=False,
sampling_for_v=True,
gibbs_steps=1001,
save_interval=500):
filename = layer2_filename #so we put the path information into the layer 2 filename
n_trial = len(rng)
rbm = cPickle.load(open(layer1_filename))
conf = rbm.conf
n_examples, n_img_channels, n_img_rows, n_img_cols=samples_shape
assert n_img_channels==rbm.v_shape[1]
assert rbm.filters_hs_shape[-1]==rbm.filters_hs_shape[-2]
border = rbm.filters_hs_shape[-1]
assert n_img_rows%border == (border-1)
assert n_img_cols%border == (border-1)
rbm.v_shape = (n_examples,n_img_channels,n_img_rows,n_img_cols)
rbm.out_conv_hs_shape = FilterActs.infer_shape_without_instance(rbm.v_shape,rbm.filters_hs_shape)
brbm = cPickle.load(open(layer2_filename))
l2_conf = brbm.l2_conf
brbm.hs_shape = (rbm.out_conv_hs_shape[0],
rbm.out_conv_hs_shape[1]*rbm.out_conv_hs_shape[2],
rbm.out_conv_hs_shape[3],
rbm.out_conv_hs_shape[4])
brbm.out_conv_v_shape = (brbm.hs_shape[0],
brbm.out_conv_v_shape[1], #the number of maps
brbm.hs_shape[2]-brbm.filters_shape[2]+1,
brbm.hs_shape[3]-brbm.filters_shape[2]+1)
B_test = Brodatz([conf['dataset_path']+conf['data_name'],],
patch_shape=(n_img_channels,
n_img_rows,
n_img_cols),
noise_concelling=0.0, seed=3322,
batchdata_size=n_examples,
rescale=1.0,
new_shapes=[[conf['new_shape_x'],conf['new_shape_x']],],
#new_shapes=[[int(640/conf['data_rescale']),int(640/conf['data_rescale'])],],
validation=conf['validation'],
test_data=True)
test_shp = B_test.test_img[0].shape
img = numpy.zeros((1,)+test_shp)
img[0,] = B_test.test_img[0]
temp_img = B_test.test_img[0]
temp_img_for_save = numpy.asarray(255*(temp_img - temp_img.min()) / (temp_img.max() - temp_img.min() + 1e-6),
dtype='uint8')
Image.fromarray(temp_img_for_save,'L').save('%s_test_img.png'%filename)
inpainted_rows = n_img_rows - 2*border
inpainted_cols = n_img_cols - 2*border
value_NCC_inpainted_test = numpy.zeros((n_examples*n_trial,))
value_NCC_inpainted_center = numpy.zeros((n_examples*n_trial,))
results_f_name = '%s_inapinted.txt'%filename
results_f = open(results_f_name,'w')
for n_trial_index in xrange(n_trial):
l1_sampler = Gibbs.alloc(rbm, rng[n_trial_index])
l2_sampler = l2_Gibbs.alloc(brbm, rng[n_trial_index])
batch_idx = tensor.iscalar()
batch_range = batch_idx * n_examples + numpy.arange(n_examples)
batch_x = Brodatz_op(batch_range,
[conf['dataset_path']+conf['data_name'],], # download from http://www.ux.uis.no/~tranden/brodatz.html
patch_shape=(n_img_channels,
n_img_rows,
n_img_cols),
noise_concelling=0.,
seed=rng[n_trial_index],
batchdata_size=n_examples,
rescale=1.0,
new_shapes=[[conf['new_shape_x'],conf['new_shape_x']],],
#new_shapes=[[int(640/conf['data_rescale']),int(640/conf['data_rescale'])],],
validation=conf['validation'],
test_data=True #we use get the batch data from the test image
)
fn_getdata = theano.function([batch_idx],batch_x)
batchdata = fn_getdata(0)
scaled_batchdata_center = numpy.zeros((n_examples,n_img_channels,inpainted_rows,inpainted_cols))
scaled_batchdata_center[:,:,:,:] = batchdata[:,:,border:n_img_rows-border,border:n_img_cols-border]
batchdata[:,:,border:n_img_rows-border,border:n_img_cols-border] = 0
print 'the min of border: %f, the max of border: %f'%(batchdata.min(),batchdata.max())
shared_batchdata = sharedX(batchdata,'batchdata')
border_mask = numpy.zeros((n_examples,n_img_channels,n_img_rows,n_img_cols),dtype=floatX)
border_mask[:,:,border:n_img_rows-border,border:n_img_cols-border]=1
l1_sampler.particles = shared_batchdata
h_mean = rbm.mean_convhs_h_given_v(l1_sampler.particles, With_fast=False)
s_mean, s_var = rbm.mean_var_convhs_s_given_v(l1_sampler.particles, With_fast=False)
l2_sampler.s_batch = s_mean.reshape(brbm.hs_shape)
l2_sampler.h_batch = h_mean.reshape(brbm.hs_shape)
sh_tmp_particles = brbm.gibbs_step_for_s_h(l2_sampler.s_batch,
l2_sampler.h_batch,
l2_sampler.s_rng,
sampling_for_s=l2_conf['sampling_for_s'],
With_fast=False)
s_tmp_particles, h_tmp_particles = sh_tmp_particles
n_batchsize, n_maps, n_hs_rows, n_hs_cols = brbm.hs_shape
icount, fmodules, filters_per_module, hrows, hcols = rbm.out_conv_hs_shape
assert n_maps==fmodules*filters_per_module
s_particles_5d = s_tmp_particles.reshape((icount, fmodules, filters_per_module, hrows, hcols))
h_particles_5d = h_tmp_particles.reshape((icount, fmodules, filters_per_module, hrows, hcols))
mean_var_samples = rbm.mean_var_v_given_h_s(s_particles_5d, h_particles_5d, With_fast=False)
particles_mean, particles_var = mean_var_samples
new_particles = tensor.mul(particles_mean,border_mask)
new_particles = tensor.add(new_particles,batchdata)
fn = theano.function([], [],
updates={l1_sampler.particles: new_particles})
particles = l1_sampler.particles
savename = '%s_inpaint_%i_trail_%i_%04i.png'%(filename,n_img_rows,n_trial_index,0)
temp = particles.get_value(borrow=True)
Image.fromarray(
tile_conv_weights(
temp,
flip=False,scale_each=True
),
'L').save(savename)
for i in xrange(gibbs_steps):
#print i
if i % save_interval == 0 and i != 0:
savename = '%s_inpaint_%i_trail_%i_%04i.png'%(filename,n_img_rows,n_trial_index,i)
print 'saving'
temp = particles.get_value(borrow=True)
print 'the min of center: %f, the max of center: %f' \
%(temp[:,:,border:n_img_rows-border,border:n_img_cols-border].min(),
temp[:,:,border:n_img_rows-border,border:n_img_cols-border].max())
if scale_separately:
pass
"""
scale_separately_savename = '%s_inpaint_scale_separately_%04i.png'%(filename,i)
blank_img = numpy.zeros((n_examples,n_img_channels,n_img_rows,n_img_cols),dtype=floatX)
tmp = temp[:,:,11:66,11:66]
tmp = (tmp - tmp.min()) / (tmp.max() - tmp.min() + 1e-6)
blank_img[:,:,11:66,11:66] = tmp
blank_img = blank_img + scaled_batchdata
Image.fromarray(
tile_conv_weights(
blank_img,
flip=False,scale_each=True),
'L').save(scale_separately_savename)
"""
else:
print savename
Image.fromarray(
tile_conv_weights(
particles.get_value(borrow=True),
flip=False,scale_each=True),
'L').save(savename)
tmp = particles.get_value(borrow=True)
inpainted_img = tmp[:,:,border:n_img_rows-border,border:n_img_cols-border]
#print inpainted_img.shape
#print scaled_batchdata_center.shape
value_NCC = NCC(scaled_batchdata_center, inpainted_img)
print i
print 'NCC'
print '%f, %f'%(value_NCC.mean(),value_NCC.std())
results_f.write('trail %i, %04i\n'%(n_trial_index,i))
results_f.write('NCC\n')
results_f.write('%f, %f\n'%(value_NCC.mean(),value_NCC.std()))
_,_,rows,cols = inpainted_img.shape
assert rows==cols
CC = CrossCorrelation(img,inpainted_img,
window_size=rows, n_patches_of_samples=0)
print img.shape
print img.min(), img.max()
print inpainted_img.shape
#print rows
value_TSS = CC.TSS()
print 'TSS'
print '%f, %f'%(value_TSS.mean(),value_TSS.std())
results_f.write('TSS\n')
results_f.write('%f, %f\n'%(value_TSS.mean(),value_TSS.std()))
center_MSSIM = 255*(scaled_batchdata_center - scaled_batchdata_center.min())/(scaled_batchdata_center.max()-scaled_batchdata_center.min()+1e-6)
inpainted_MSSIM = 255*(inpainted_img - scaled_batchdata_center.min())/(scaled_batchdata_center.max()-scaled_batchdata_center.min()+1e-6)
mssim = MSSIM(center_MSSIM, inpainted_MSSIM,11)
mssim_mean, mssim_std = mssim.MSSIM()
print 'MSSIM score : %f, %f\n'%(mssim_mean, mssim_std)
results_f.write('MSSIM score\n')
results_f.write('%f, %f\n'%(mssim_mean, mssim_std))
fn()
start = n_trial_index*n_examples
end = (n_trial_index+1)*n_examples
value_NCC_inpainted_center[start:end,] = value_NCC
value_NCC_inpainted_test[start:end,] = value_TSS
results_f.write('Final NCC\n')
results_f.write('%f, %f\n'%(value_NCC_inpainted_center.mean(),value_NCC_inpainted_center.std()))
results_f.write('Final TSS\n')
results_f.write('%f, %f\n'%(value_NCC_inpainted_test.mean(),value_NCC_inpainted_test.std()))
results_f.close()
def main_sample(layer1_filename,
layer2_filename,
samples_shape=(128,1,120,120),
burn_in=5001,
save_interval=1000,
sampling_for_v=True,
rng=777888):
filename = layer2_filename #so we put the path information into the layer 2 filename
rbm = cPickle.load(open(layer1_filename))
conf = rbm.conf
n_samples, n_channels, n_img_rows, n_img_cols = samples_shape
assert n_channels==rbm.v_shape[1]
rbm.v_shape = (n_samples, n_channels, n_img_rows, n_img_cols)
assert rbm.filters_hs_shape[-1]==rbm.filters_hs_shape[-2]
border = rbm.filters_hs_shape[-1]
assert n_img_rows%border == (border-1)
assert n_img_cols%border == (border-1)
rbm.out_conv_hs_shape = FilterActs.infer_shape_without_instance(rbm.v_shape,rbm.filters_hs_shape)
brbm = cPickle.load(open(layer2_filename))
l2_conf = brbm.l2_conf
brbm.hs_shape = (rbm.out_conv_hs_shape[0],
rbm.out_conv_hs_shape[1]*rbm.out_conv_hs_shape[2],
rbm.out_conv_hs_shape[3],
rbm.out_conv_hs_shape[4])
brbm.out_conv_v_shape = (brbm.hs_shape[0],
brbm.out_conv_v_shape[1], #the number of maps dose not change
brbm.hs_shape[2]-brbm.filters_shape[2]+1,
brbm.hs_shape[3]-brbm.filters_shape[2]+1)
sampler = l2_Gibbs.alloc(brbm, rng)
#import pdb;pdb.set_trace()
tmp_particles = brbm.gibbs_step_for_s_h(sampler.s_particles,
sampler.h_particles,
sampler.s_rng,
sampling_for_s=l2_conf['sampling_for_s'],
With_fast=False)
s_tmp_particles, h_tmp_particles = tmp_particles
n_batchsize, n_maps, n_hs_rows, n_hs_cols = brbm.hs_shape
icount, fmodules, filters_per_module, hrows, hcols = rbm.out_conv_hs_shape
assert n_maps==fmodules*filters_per_module
s_particles_5d = s_tmp_particles.reshape((icount, fmodules, filters_per_module, hrows, hcols))
h_particles_5d = h_tmp_particles.reshape((icount, fmodules, filters_per_module, hrows, hcols))
mean_var_samples = rbm.mean_var_v_given_h_s(s_particles_5d, h_particles_5d, With_fast=False)
particles_mean, particles_var = mean_var_samples
fn = theano.function([], [particles_mean, particles_var],
#mode='FAST_COMPILE',
updates={sampler.s_particles: s_tmp_particles,
sampler.h_particles: h_tmp_particles})
B_texture = Brodatz([conf['dataset_path']+conf['data_name'],],
patch_shape=(1,98,98),
#patch_shape=samples_shape[1:],
noise_concelling=0.0,
seed=rng,
batchdata_size=1,
rescale=1.0,
new_shapes=[[conf['new_shape_x'],conf['new_shape_x']],],
#new_shapes=[[int(640/conf['data_rescale']),int(640/conf['data_rescale'])],],
validation=conf['validation'],
test_data=False)
test_shp = B_texture.test_img[0].shape
img = numpy.zeros((1,)+test_shp)
img[0,] = B_texture.test_img[0]
temp_img = B_texture.test_img[0]
temp_img_for_save = numpy.asarray(255*(temp_img - temp_img.min()) / (temp_img.max() - temp_img.min() + 1e-6),
dtype='uint8')
Image.fromarray(temp_img_for_save,'L').save('%s_test_img.png'%filename)
results_f_name = '%s_sample_TCC.txt'%filename
results_f = open(results_f_name,'w')
#savename = '%s_sample_%i_burn_0.png'%(filename,n_img_rows)
#Image.fromarray(
# tile_conv_weights(
# sampler.particles.get_value(borrow=True)[:,:,border:n_img_rows-border,border:n_img_cols-border],
# flip=False,scale_each=True
# ),
# 'L').save(savename)
for i in xrange(burn_in):
mean, var = fn()
if i% 40 ==0:
print i
results_f.write('%04i\n'%i)
savename = '%s_sample_%i_burn_%04i.png'%(filename,n_img_rows,i)
if i % save_interval == 0 and i!=0:
print 'saving'
samples = mean[:,:,border:n_img_rows-border,border:n_img_cols-border]
Image.fromarray(
tile_conv_weights(
samples,
flip=False,
scale_each=True),
'L').save(savename)
CC = CrossCorrelation(img,samples,
window_size=19, n_patches_of_samples=1)
aaa = CC.TSS()
print aaa.mean(),aaa.std()
results_f.write('%f, %f\n'%(aaa.mean(),aaa.std()))
results_f.close()
def main_sampling_inpaint(layer1_filename,layer2_filename):
main_sample(layer1_filename,layer2_filename)
main_inpaint(layer1_filename,layer2_filename)
main_sample(layer1_filename,layer2_filename,
samples_shape=(1,1,362,362),
burn_in=5001,
save_interval=1000,
sampling_for_v=True,
rng=777888)
def main0(rval_doc):
l2_conf = rval_doc['l2_conf']
rbm = cPickle.load(open(l2_conf['rbm_pkl']))
conf = rbm.conf
print rbm.conf['data_name']
sampler = Gibbs.alloc(rbm, rng=33345)
batchsize, n_img_channels, \
n_img_rows, n_img_cols = rbm.v_shape
batch_idx = tensor.iscalar()
batch_range = batch_idx*batchsize + numpy.arange(batchsize)
batch_x = Brodatz_op(batch_range,
[conf['dataset_path']+conf['data_name'],], # download from http://www.ux.uis.no/~tranden/brodatz.html
patch_shape=rbm.v_shape[1:],
noise_concelling=0.,
seed=3322,
batchdata_size=rbm.v_shape[0],
rescale=1.0,
new_shapes=[[conf['new_shape_x'],conf['new_shape_x']],],
validation=conf['validation'],
test_data=False
)
brbm = bRBM.alloc(
l2_conf,
hs_shape=(
rbm.out_conv_hs_shape[0],
rbm.out_conv_hs_shape[1]*rbm.out_conv_hs_shape[2],
rbm.out_conv_hs_shape[3],
rbm.out_conv_hs_shape[4]
),
filters_shape=(
l2_conf['n_filters'],
rbm.out_conv_hs_shape[1]*rbm.out_conv_hs_shape[2],
l2_conf['filters_size'],
l2_conf['filters_size']
), #fmodules(stride) x filters_per_modules x fcolors(channels) x frows x fcols
filters_irange=l2_conf['filters_irange'],
rbm=rbm,
)
brbm.save_weights_to_grey_files('layer2_iter_0000')
base_lr = l2_conf['base_lr_per_example']/batchsize
conv_lr_coef = l2_conf['conv_lr_coef']
h_mean = rbm.mean_convhs_h_given_v(batch_x, With_fast=False)
s_mean_var = rbm.mean_var_convhs_s_given_v(batch_x, With_fast=False)
s_mean, s_var = s_mean_var
batchsize, fmodules, filters_per_module, hrows, hcols = rbm.out_conv_hs_shape
if l2_conf['fast_weights']:
trainer = Trainer.alloc(
brbm,
s_batch=s_mean.reshape((batchsize, fmodules*filters_per_module, hrows, hcols)),
h_batch=h_mean.reshape((batchsize, fmodules*filters_per_module, hrows, hcols)),
lrdict={
brbm.filters: sharedX(conv_lr_coef*base_lr, 'filters_lr'),
brbm.conv_v_bias: sharedX(base_lr, 'conv_v_bias_lr'),
brbm.h_bias: sharedX(base_lr, 'h_bias_lr'),
brbm.mu: sharedX(base_lr, 'mu_lr'),
brbm.alpha: sharedX(0.0, 'alpha_lr'), #we keep the alpha fixed in the second layer training
brbm.filters_fast: sharedX(conv_lr_coef*base_lr, 'filters_fast_lr'),
brbm.conv_v_bias_fast: sharedX(base_lr, 'conv_v_bias_fast_lr'),
brbm.h_bias_fast: sharedX(base_lr, 'h_bias_fast_lr'),
brbm.mu_fast: sharedX(base_lr, 'conv_mu_fast_lr'),
brbm.alpha_fast: sharedX(0.0, 'conv_alpha_fast_lr')
},
conf = l2_conf,
)
else:
trainer = Trainer.alloc(
brbm,
s_batch=s_mean.reshape((batchsize, fmodules*filters_per_module, hrows, hcols)),
h_batch=h_mean.reshape((batchsize, fmodules*filters_per_module, hrows, hcols)),
lrdict={
brbm.filters: sharedX(conv_lr_coef*base_lr, 'filters_lr'),
brbm.conv_v_bias: sharedX(base_lr, 'conv_v_bias_lr'),
brbm.h_bias: sharedX(base_lr, 'h_bias_lr'),
brbm.mu: sharedX(base_lr, 'mu_lr'),
brbm.alpha: sharedX(0.0, 'alpha_lr'),
brbm.filters_fast: sharedX(0.0, 'filters_fast_lr'),
brbm.conv_v_bias_fast: sharedX(0.0, 'conv_v_bias_fast_lr'),
brbm.h_bias_fast: sharedX(0.0, 'h_bias_fast_lr'),
brbm.mu_fast: sharedX(0.0, 'conv_mu_fast_lr'),
brbm.alpha_fast: sharedX(0.0, 'conv_alpha_fast_lr')
},
conf = l2_conf,
)
print 'start building function'
training_updates = trainer.updates() #
train_fn = theano.function(inputs=[batch_idx],
outputs=[],
#mode='FAST_COMPILE',
#mode='DEBUG_MODE',
updates=training_updates
) #
print 'training the second layer...'
iter = 0
while trainer.annealing_coef.get_value()>=0: #
dummy = train_fn(iter) #
if iter % 100 == 0:
trainer.print_status()
if iter % 5000 == 0:
brbm.dump_to_file(os.path.join(_temp_data_path_,'brbm_%06i.pkl'%iter))
if iter <= 1000 and not (iter % 100): #
trainer.print_status()
trainer.save_weights_to_grey_files()
elif not (iter % 1000):
trainer.print_status()
trainer.save_weights_to_grey_files()
iter += 1
layer2_filename = 'brbm_%06i.pkl'%l2_conf['train_iters']
main_sampling_inpaint(l2_conf['rbm_pkl'],layer2_filename)
def main_train():
print 'start main_train'
main0(dict(
l2_conf=dict(
dataset='/data/lisa/exp/luoheng/Brodatz/',
rbm_pkl='./rbm_060000.pkl',
#chain_reset_prob=0.0,#reset for approximately every 1000 iterations #we need scan for the burn in loop
#chain_reset_iterations=100
#chain_reset_burn_in=0,
unnatural_grad=False,
alpha_logdomain=False,
alpha0=10.,
alpha_min=1.,
alpha_max=1000.,
mu0 = 1.0,
train_iters=40000,
base_lr_per_example=0.00001,
conv_lr_coef=1.0,
n_filters=64,
filters_size=2,
filters_irange=.001,
#sparsity_weight_conv=0,#numpy.float32(500),
#sparsity_weight_global=0.,
particles_min=-1000.,
particles_max=1000.,
constant_steps_sampling = 1,
increase_steps_sampling = False,
sampling_for_s=True,
penalty_for_fast_parameters = 0.1,
fast_weights = False
)))
if __name__ == '__main__':
if sys.argv[1] == 'train':
sys.exit(main_train())
if sys.argv[1] == 'sampling':
sys.exit(main_sample(sys.argv[2],sys.argv[3]))
if sys.argv[1] == 'inpaint':
sys.exit(main_inpaint(sys.argv[2],sys.argv[3]))
if sys.argv[1] == 's_i':
sys.exit(main_sampling_inpaint(sys.argv[2],sys.argv[3]))
| {"/test_NCC.py": ["/CrossCorrelation.py"]} |
65,822 | luoheng/TCssrbm | refs/heads/master | /Draw_pics.py | import numpy
from PIL import Image
def most_square_shape(N):
"""rectangle (height, width) with area N that is closest to sqaure
"""
for i in xrange(int(numpy.sqrt(N)),0, -1):
if 0 == N % i:
return (i, N/i)
def tile_conv_weights(w,flip=False, scale_each=False):
"""
Return something that can be rendered as an image to visualize the filters.
"""
#if w.shape[1] != 3:
# raise NotImplementedError('not rgb', w.shape)
if w.shape[2] != w.shape[3]:
raise NotImplementedError('not square', w.shape)
if w.shape[1] == 1:
wmin, wmax = w.min(), w.max()
if not scale_each:
w = numpy.asarray(255 * (w - wmin) / (wmax - wmin + 1e-6), dtype='uint8')
trows, tcols= most_square_shape(w.shape[0])
outrows = trows * w.shape[2] + trows-1
outcols = tcols * w.shape[3] + tcols-1
out = numpy.zeros((outrows, outcols), dtype='uint8')
#tr_stride= 1+w.shape[1]
for tr in range(trows):
for tc in range(tcols):
# this is supposed to flip the filters back into the image
# coordinates as well as put the channels in the right place, but I
# don't know if it really does that
tmp = w[tr*tcols+tc,
0,
::-1 if flip else 1,
::-1 if flip else 1]
if scale_each:
tmp = numpy.asarray(255*(tmp - tmp.min()) / (tmp.max() - tmp.min() + 1e-6),
dtype='uint8')
out[tr*(1+w.shape[2]):tr*(1+w.shape[2])+w.shape[2],
tc*(1+w.shape[3]):tc*(1+w.shape[3])+w.shape[3]] = tmp
return out
| {"/test_NCC.py": ["/CrossCorrelation.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.